1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
68 { ICE_PROTOCOL_LAST, 0 },
72 u8 dummy_gre_packet[] = { 0, 0, 0, 0, /* ICE_MAC_OFOS 0 */
76 0x45, 0, 0, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x80, 0, 0x65, 0x58, /* ICE_NVGRE 34 */
83 0, 0, 0, 0, /* ICE_MAC_IL 42 */
87 0x45, 0, 0, 0x14, /* ICE_IPV4_IL 54 */
95 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
97 { ICE_IPV4_OFOS, 14 },
103 { ICE_PROTOCOL_LAST, 0 },
107 u8 dummy_udp_tun_tcp_packet[] = {
108 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
113 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
114 0x00, 0x01, 0x00, 0x00,
115 0x40, 0x11, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
120 0x00, 0x46, 0x00, 0x00,
122 0x04, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
130 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
131 0x00, 0x01, 0x00, 0x00,
132 0x40, 0x06, 0x00, 0x00,
133 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
139 0x50, 0x02, 0x20, 0x00,
140 0x00, 0x00, 0x00, 0x00
144 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
146 { ICE_IPV4_OFOS, 14 },
151 { ICE_UDP_ILOS, 84 },
152 { ICE_PROTOCOL_LAST, 0 },
156 u8 dummy_udp_tun_udp_packet[] = {
157 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
158 0x00, 0x00, 0x00, 0x00,
159 0x00, 0x00, 0x00, 0x00,
162 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
163 0x00, 0x01, 0x00, 0x00,
164 0x00, 0x11, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00,
168 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
169 0x00, 0x3a, 0x00, 0x00,
171 0x0c, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
179 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
180 0x00, 0x01, 0x00, 0x00,
181 0x00, 0x11, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00,
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
186 0x00, 0x08, 0x00, 0x00,
190 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
192 { ICE_IPV4_OFOS, 14 },
193 { ICE_UDP_ILOS, 34 },
194 { ICE_PROTOCOL_LAST, 0 },
198 dummy_udp_packet[] = {
199 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
200 0x00, 0x00, 0x00, 0x00,
201 0x00, 0x00, 0x00, 0x00,
204 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
205 0x00, 0x01, 0x00, 0x00,
206 0x00, 0x11, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
211 0x00, 0x08, 0x00, 0x00,
213 0x00, 0x00, /* 2 bytes for 4 byte alignment */
217 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
219 { ICE_IPV4_OFOS, 14 },
221 { ICE_PROTOCOL_LAST, 0 },
225 dummy_tcp_packet[] = {
226 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
231 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
232 0x00, 0x01, 0x00, 0x00,
233 0x00, 0x06, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00,
235 0x00, 0x00, 0x00, 0x00,
237 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
238 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
240 0x50, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, /* 2 bytes for 4 byte alignment */
246 /* this is a recipe to profile bitmap association */
247 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
248 ICE_MAX_NUM_PROFILES);
249 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
251 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
254 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
255 * @hw: pointer to hardware structure
256 * @recps: struct that we need to populate
257 * @rid: recipe ID that we are populating
259 * This function is used to populate all the necessary entries into our
260 * bookkeeping so that we have a current list of all the recipes that are
261 * programmed in the firmware.
263 static enum ice_status
264 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid)
266 u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
267 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
268 u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
269 struct ice_aqc_recipe_data_elem *tmp;
270 u16 num_recps = ICE_MAX_NUM_RECIPES;
271 struct ice_prot_lkup_ext *lkup_exts;
272 enum ice_status status;
274 /* Get recipe to profile map so that we can get the fv from
275 * lkups that we read for a recipe from FW.
277 ice_get_recp_to_prof_map(hw);
278 /* we need a buffer big enough to accommodate all the recipes */
279 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
280 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
282 return ICE_ERR_NO_MEMORY;
284 tmp[0].recipe_indx = rid;
285 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
286 /* non-zero status meaning recipe doesn't exist */
289 lkup_exts = &recps[rid].lkup_exts;
290 /* start populating all the entries for recps[rid] based on lkups from
293 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
294 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
295 struct ice_recp_grp_entry *rg_entry;
296 u8 prof_id, prot = 0;
299 rg_entry = (struct ice_recp_grp_entry *)
300 ice_malloc(hw, sizeof(*rg_entry));
302 status = ICE_ERR_NO_MEMORY;
305 /* Avoid 8th bit since its result enable bit */
306 result_idxs[result_idx] = root_bufs.content.result_indx &
307 ~ICE_AQ_RECIPE_RESULT_EN;
308 /* Check if result enable bit is set */
309 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
310 ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
311 result_idxs[result_idx++],
312 available_result_ids);
314 recipe_to_profile[tmp[sub_recps].recipe_indx],
315 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
316 /* get the first profile that is associated with rid */
317 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
318 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
319 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
321 rg_entry->fv_idx[i] = lkup_indx;
322 rg_entry->fv_mask[i] =
323 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
325 /* If the recipe is a chained recipe then all its
326 * child recipe's result will have a result index.
327 * To fill fv_words we should not use those result
328 * index, we only need the protocol ids and offsets.
329 * We will skip all the fv_idx which stores result
330 * index in them. We also need to skip any fv_idx which
331 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
332 * valid offset value.
334 if (result_idxs[0] == rg_entry->fv_idx[i] ||
335 result_idxs[1] == rg_entry->fv_idx[i] ||
336 result_idxs[2] == rg_entry->fv_idx[i] ||
337 result_idxs[3] == rg_entry->fv_idx[i] ||
338 result_idxs[4] == rg_entry->fv_idx[i] ||
339 rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
340 rg_entry->fv_idx[i] == 0)
343 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
344 rg_entry->fv_idx[i], &prot, &off);
345 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
346 lkup_exts->fv_words[fv_word_idx].off = off;
349 /* populate rg_list with the data from the child entry of this
352 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
354 lkup_exts->n_val_words = fv_word_idx;
355 recps[rid].n_grp_count = num_recps;
356 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
357 ice_calloc(hw, recps[rid].n_grp_count,
358 sizeof(struct ice_aqc_recipe_data_elem));
359 if (!recps[rid].root_buf)
362 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
363 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
364 recps[rid].recp_created = true;
365 if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
366 recps[rid].root_rid = rid;
373 * ice_get_recp_to_prof_map - updates recipe to profile mapping
374 * @hw: pointer to hardware structure
376 * This function is used to populate recipe_to_profile matrix where index to
377 * this array is the recipe ID and the element is the mapping of which profiles
378 * is this recipe mapped to.
381 ice_get_recp_to_prof_map(struct ice_hw *hw)
383 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
386 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
389 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
390 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
393 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
394 if (ice_is_bit_set(r_bitmap, j))
395 ice_set_bit(i, recipe_to_profile[j]);
400 * ice_init_def_sw_recp - initialize the recipe book keeping tables
401 * @hw: pointer to the HW struct
403 * Allocate memory for the entire recipe table and initialize the structures/
404 * entries corresponding to basic recipes.
406 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
408 struct ice_sw_recipe *recps;
411 recps = (struct ice_sw_recipe *)
412 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
414 return ICE_ERR_NO_MEMORY;
416 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
417 recps[i].root_rid = i;
418 INIT_LIST_HEAD(&recps[i].filt_rules);
419 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
420 INIT_LIST_HEAD(&recps[i].rg_list);
421 ice_init_lock(&recps[i].filt_rule_lock);
424 hw->switch_info->recp_list = recps;
430 * ice_aq_get_sw_cfg - get switch configuration
431 * @hw: pointer to the hardware structure
432 * @buf: pointer to the result buffer
433 * @buf_size: length of the buffer available for response
434 * @req_desc: pointer to requested descriptor
435 * @num_elems: pointer to number of elements
436 * @cd: pointer to command details structure or NULL
438 * Get switch configuration (0x0200) to be placed in 'buff'.
439 * This admin command returns information such as initial VSI/port number
440 * and switch ID it belongs to.
442 * NOTE: *req_desc is both an input/output parameter.
443 * The caller of this function first calls this function with *request_desc set
444 * to 0. If the response from f/w has *req_desc set to 0, all the switch
445 * configuration information has been returned; if non-zero (meaning not all
446 * the information was returned), the caller should call this function again
447 * with *req_desc set to the previous value returned by f/w to get the
448 * next block of switch configuration information.
450 * *num_elems is output only parameter. This reflects the number of elements
451 * in response buffer. The caller of this function to use *num_elems while
452 * parsing the response buffer.
454 static enum ice_status
455 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
456 u16 buf_size, u16 *req_desc, u16 *num_elems,
457 struct ice_sq_cd *cd)
459 struct ice_aqc_get_sw_cfg *cmd;
460 enum ice_status status;
461 struct ice_aq_desc desc;
463 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
464 cmd = &desc.params.get_sw_conf;
465 cmd->element = CPU_TO_LE16(*req_desc);
467 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
469 *req_desc = LE16_TO_CPU(cmd->element);
470 *num_elems = LE16_TO_CPU(cmd->num_elems);
478 * ice_alloc_sw - allocate resources specific to switch
479 * @hw: pointer to the HW struct
480 * @ena_stats: true to turn on VEB stats
481 * @shared_res: true for shared resource, false for dedicated resource
482 * @sw_id: switch ID returned
483 * @counter_id: VEB counter ID returned
485 * allocates switch resources (SWID and VEB counter) (0x0208)
488 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
491 struct ice_aqc_alloc_free_res_elem *sw_buf;
492 struct ice_aqc_res_elem *sw_ele;
493 enum ice_status status;
496 buf_len = sizeof(*sw_buf);
497 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
498 ice_malloc(hw, buf_len);
500 return ICE_ERR_NO_MEMORY;
502 /* Prepare buffer for switch ID.
503 * The number of resource entries in buffer is passed as 1 since only a
504 * single switch/VEB instance is allocated, and hence a single sw_id
507 sw_buf->num_elems = CPU_TO_LE16(1);
509 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
510 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
511 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
513 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
514 ice_aqc_opc_alloc_res, NULL);
517 goto ice_alloc_sw_exit;
519 sw_ele = &sw_buf->elem[0];
520 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
523 /* Prepare buffer for VEB Counter */
524 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
525 struct ice_aqc_alloc_free_res_elem *counter_buf;
526 struct ice_aqc_res_elem *counter_ele;
528 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
529 ice_malloc(hw, buf_len);
531 status = ICE_ERR_NO_MEMORY;
532 goto ice_alloc_sw_exit;
535 /* The number of resource entries in buffer is passed as 1 since
536 * only a single switch/VEB instance is allocated, and hence a
537 * single VEB counter is requested.
539 counter_buf->num_elems = CPU_TO_LE16(1);
540 counter_buf->res_type =
541 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
542 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
543 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
547 ice_free(hw, counter_buf);
548 goto ice_alloc_sw_exit;
550 counter_ele = &counter_buf->elem[0];
551 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
552 ice_free(hw, counter_buf);
556 ice_free(hw, sw_buf);
561 * ice_free_sw - free resources specific to switch
562 * @hw: pointer to the HW struct
563 * @sw_id: switch ID returned
564 * @counter_id: VEB counter ID returned
566 * free switch resources (SWID and VEB counter) (0x0209)
568 * NOTE: This function frees multiple resources. It continues
569 * releasing other resources even after it encounters error.
570 * The error code returned is the last error it encountered.
572 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
574 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
575 enum ice_status status, ret_status;
578 buf_len = sizeof(*sw_buf);
579 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
580 ice_malloc(hw, buf_len);
582 return ICE_ERR_NO_MEMORY;
584 /* Prepare buffer to free for switch ID res.
585 * The number of resource entries in buffer is passed as 1 since only a
586 * single switch/VEB instance is freed, and hence a single sw_id
589 sw_buf->num_elems = CPU_TO_LE16(1);
590 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
591 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
593 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
594 ice_aqc_opc_free_res, NULL);
597 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
599 /* Prepare buffer to free for VEB Counter resource */
600 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
601 ice_malloc(hw, buf_len);
603 ice_free(hw, sw_buf);
604 return ICE_ERR_NO_MEMORY;
607 /* The number of resource entries in buffer is passed as 1 since only a
608 * single switch/VEB instance is freed, and hence a single VEB counter
611 counter_buf->num_elems = CPU_TO_LE16(1);
612 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
613 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
615 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
616 ice_aqc_opc_free_res, NULL);
618 ice_debug(hw, ICE_DBG_SW,
619 "VEB counter resource could not be freed\n");
623 ice_free(hw, counter_buf);
624 ice_free(hw, sw_buf);
630 * @hw: pointer to the HW struct
631 * @vsi_ctx: pointer to a VSI context struct
632 * @cd: pointer to command details structure or NULL
634 * Add a VSI context to the hardware (0x0210)
637 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
638 struct ice_sq_cd *cd)
640 struct ice_aqc_add_update_free_vsi_resp *res;
641 struct ice_aqc_add_get_update_free_vsi *cmd;
642 struct ice_aq_desc desc;
643 enum ice_status status;
645 cmd = &desc.params.vsi_cmd;
646 res = &desc.params.add_update_free_vsi_res;
648 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
650 if (!vsi_ctx->alloc_from_pool)
651 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
652 ICE_AQ_VSI_IS_VALID);
654 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
656 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
658 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
659 sizeof(vsi_ctx->info), cd);
662 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
663 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
664 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
672 * @hw: pointer to the HW struct
673 * @vsi_ctx: pointer to a VSI context struct
674 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
675 * @cd: pointer to command details structure or NULL
677 * Free VSI context info from hardware (0x0213)
680 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
681 bool keep_vsi_alloc, struct ice_sq_cd *cd)
683 struct ice_aqc_add_update_free_vsi_resp *resp;
684 struct ice_aqc_add_get_update_free_vsi *cmd;
685 struct ice_aq_desc desc;
686 enum ice_status status;
688 cmd = &desc.params.vsi_cmd;
689 resp = &desc.params.add_update_free_vsi_res;
691 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
693 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
695 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
697 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
699 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
700 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
708 * @hw: pointer to the HW struct
709 * @vsi_ctx: pointer to a VSI context struct
710 * @cd: pointer to command details structure or NULL
712 * Update VSI context in the hardware (0x0211)
715 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
716 struct ice_sq_cd *cd)
718 struct ice_aqc_add_update_free_vsi_resp *resp;
719 struct ice_aqc_add_get_update_free_vsi *cmd;
720 struct ice_aq_desc desc;
721 enum ice_status status;
723 cmd = &desc.params.vsi_cmd;
724 resp = &desc.params.add_update_free_vsi_res;
726 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
728 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
730 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
732 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
733 sizeof(vsi_ctx->info), cd);
736 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
737 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
744 * ice_is_vsi_valid - check whether the VSI is valid or not
745 * @hw: pointer to the HW struct
746 * @vsi_handle: VSI handle
748 * check whether the VSI is valid or not
750 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
752 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
756 * ice_get_hw_vsi_num - return the HW VSI number
757 * @hw: pointer to the HW struct
758 * @vsi_handle: VSI handle
760 * return the HW VSI number
761 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
763 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
765 return hw->vsi_ctx[vsi_handle]->vsi_num;
769 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
770 * @hw: pointer to the HW struct
771 * @vsi_handle: VSI handle
773 * return the VSI context entry for a given VSI handle
775 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
777 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
781 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
782 * @hw: pointer to the HW struct
783 * @vsi_handle: VSI handle
784 * @vsi: VSI context pointer
786 * save the VSI context entry for a given VSI handle
789 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
791 hw->vsi_ctx[vsi_handle] = vsi;
795 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
796 * @hw: pointer to the HW struct
797 * @vsi_handle: VSI handle
799 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
801 struct ice_vsi_ctx *vsi;
804 vsi = ice_get_vsi_ctx(hw, vsi_handle);
807 ice_for_each_traffic_class(i) {
808 if (vsi->lan_q_ctx[i]) {
809 ice_free(hw, vsi->lan_q_ctx[i]);
810 vsi->lan_q_ctx[i] = NULL;
816 * ice_clear_vsi_ctx - clear the VSI context entry
817 * @hw: pointer to the HW struct
818 * @vsi_handle: VSI handle
820 * clear the VSI context entry
822 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
824 struct ice_vsi_ctx *vsi;
826 vsi = ice_get_vsi_ctx(hw, vsi_handle);
828 ice_clear_vsi_q_ctx(hw, vsi_handle);
830 hw->vsi_ctx[vsi_handle] = NULL;
835 * ice_clear_all_vsi_ctx - clear all the VSI context entries
836 * @hw: pointer to the HW struct
838 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
842 for (i = 0; i < ICE_MAX_VSI; i++)
843 ice_clear_vsi_ctx(hw, i);
847 * ice_add_vsi - add VSI context to the hardware and VSI handle list
848 * @hw: pointer to the HW struct
849 * @vsi_handle: unique VSI handle provided by drivers
850 * @vsi_ctx: pointer to a VSI context struct
851 * @cd: pointer to command details structure or NULL
853 * Add a VSI context to the hardware also add it into the VSI handle list.
854 * If this function gets called after reset for existing VSIs then update
855 * with the new HW VSI number in the corresponding VSI handle list entry.
858 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
859 struct ice_sq_cd *cd)
861 struct ice_vsi_ctx *tmp_vsi_ctx;
862 enum ice_status status;
864 if (vsi_handle >= ICE_MAX_VSI)
865 return ICE_ERR_PARAM;
866 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
869 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
871 /* Create a new VSI context */
872 tmp_vsi_ctx = (struct ice_vsi_ctx *)
873 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
875 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
876 return ICE_ERR_NO_MEMORY;
878 *tmp_vsi_ctx = *vsi_ctx;
880 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
882 /* update with new HW VSI num */
883 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
884 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
891 * ice_free_vsi- free VSI context from hardware and VSI handle list
892 * @hw: pointer to the HW struct
893 * @vsi_handle: unique VSI handle
894 * @vsi_ctx: pointer to a VSI context struct
895 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
896 * @cd: pointer to command details structure or NULL
898 * Free VSI context info from hardware as well as from VSI handle list
901 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
902 bool keep_vsi_alloc, struct ice_sq_cd *cd)
904 enum ice_status status;
906 if (!ice_is_vsi_valid(hw, vsi_handle))
907 return ICE_ERR_PARAM;
908 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
909 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
911 ice_clear_vsi_ctx(hw, vsi_handle);
917 * @hw: pointer to the HW struct
918 * @vsi_handle: unique VSI handle
919 * @vsi_ctx: pointer to a VSI context struct
920 * @cd: pointer to command details structure or NULL
922 * Update VSI context in the hardware
925 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
926 struct ice_sq_cd *cd)
928 if (!ice_is_vsi_valid(hw, vsi_handle))
929 return ICE_ERR_PARAM;
930 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
931 return ice_aq_update_vsi(hw, vsi_ctx, cd);
935 * ice_aq_get_vsi_params
936 * @hw: pointer to the HW struct
937 * @vsi_ctx: pointer to a VSI context struct
938 * @cd: pointer to command details structure or NULL
940 * Get VSI context info from hardware (0x0212)
943 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
944 struct ice_sq_cd *cd)
946 struct ice_aqc_add_get_update_free_vsi *cmd;
947 struct ice_aqc_get_vsi_resp *resp;
948 struct ice_aq_desc desc;
949 enum ice_status status;
951 cmd = &desc.params.vsi_cmd;
952 resp = &desc.params.get_vsi_resp;
954 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
956 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
958 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
959 sizeof(vsi_ctx->info), cd);
961 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
963 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
964 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
971 * ice_aq_add_update_mir_rule - add/update a mirror rule
972 * @hw: pointer to the HW struct
973 * @rule_type: Rule Type
974 * @dest_vsi: VSI number to which packets will be mirrored
975 * @count: length of the list
976 * @mr_buf: buffer for list of mirrored VSI numbers
977 * @cd: pointer to command details structure or NULL
980 * Add/Update Mirror Rule (0x260).
983 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
984 u16 count, struct ice_mir_rule_buf *mr_buf,
985 struct ice_sq_cd *cd, u16 *rule_id)
987 struct ice_aqc_add_update_mir_rule *cmd;
988 struct ice_aq_desc desc;
989 enum ice_status status;
990 __le16 *mr_list = NULL;
994 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
995 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
996 /* Make sure count and mr_buf are set for these rule_types */
997 if (!(count && mr_buf))
998 return ICE_ERR_PARAM;
1000 buf_size = count * sizeof(__le16);
1001 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1003 return ICE_ERR_NO_MEMORY;
1005 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1006 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1007 /* Make sure count and mr_buf are not set for these
1010 if (count || mr_buf)
1011 return ICE_ERR_PARAM;
1014 ice_debug(hw, ICE_DBG_SW,
1015 "Error due to unsupported rule_type %u\n", rule_type);
1016 return ICE_ERR_OUT_OF_RANGE;
1019 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1021 /* Pre-process 'mr_buf' items for add/update of virtual port
1022 * ingress/egress mirroring (but not physical port ingress/egress
1028 for (i = 0; i < count; i++) {
1031 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1033 /* Validate specified VSI number, make sure it is less
1034 * than ICE_MAX_VSI, if not return with error.
1036 if (id >= ICE_MAX_VSI) {
1037 ice_debug(hw, ICE_DBG_SW,
1038 "Error VSI index (%u) out-of-range\n",
1040 ice_free(hw, mr_list);
1041 return ICE_ERR_OUT_OF_RANGE;
1044 /* add VSI to mirror rule */
1047 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1048 else /* remove VSI from mirror rule */
1049 mr_list[i] = CPU_TO_LE16(id);
1053 cmd = &desc.params.add_update_rule;
1054 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1055 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1056 ICE_AQC_RULE_ID_VALID_M);
1057 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1058 cmd->num_entries = CPU_TO_LE16(count);
1059 cmd->dest = CPU_TO_LE16(dest_vsi);
1061 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1063 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1065 ice_free(hw, mr_list);
1071 * ice_aq_delete_mir_rule - delete a mirror rule
1072 * @hw: pointer to the HW struct
1073 * @rule_id: Mirror rule ID (to be deleted)
1074 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1075 * otherwise it is returned to the shared pool
1076 * @cd: pointer to command details structure or NULL
1078 * Delete Mirror Rule (0x261).
1081 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1082 struct ice_sq_cd *cd)
1084 struct ice_aqc_delete_mir_rule *cmd;
1085 struct ice_aq_desc desc;
1087 /* rule_id should be in the range 0...63 */
1088 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1089 return ICE_ERR_OUT_OF_RANGE;
1091 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1093 cmd = &desc.params.del_rule;
1094 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1095 cmd->rule_id = CPU_TO_LE16(rule_id);
1098 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1100 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1104 * ice_aq_alloc_free_vsi_list
1105 * @hw: pointer to the HW struct
1106 * @vsi_list_id: VSI list ID returned or used for lookup
1107 * @lkup_type: switch rule filter lookup type
1108 * @opc: switch rules population command type - pass in the command opcode
1110 * allocates or free a VSI list resource
1112 static enum ice_status
1113 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1114 enum ice_sw_lkup_type lkup_type,
1115 enum ice_adminq_opc opc)
1117 struct ice_aqc_alloc_free_res_elem *sw_buf;
1118 struct ice_aqc_res_elem *vsi_ele;
1119 enum ice_status status;
1122 buf_len = sizeof(*sw_buf);
1123 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1124 ice_malloc(hw, buf_len);
1126 return ICE_ERR_NO_MEMORY;
1127 sw_buf->num_elems = CPU_TO_LE16(1);
1129 if (lkup_type == ICE_SW_LKUP_MAC ||
1130 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1131 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1132 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1133 lkup_type == ICE_SW_LKUP_PROMISC ||
1134 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1135 lkup_type == ICE_SW_LKUP_LAST) {
1136 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1137 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1139 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1141 status = ICE_ERR_PARAM;
1142 goto ice_aq_alloc_free_vsi_list_exit;
1145 if (opc == ice_aqc_opc_free_res)
1146 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1148 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1150 goto ice_aq_alloc_free_vsi_list_exit;
1152 if (opc == ice_aqc_opc_alloc_res) {
1153 vsi_ele = &sw_buf->elem[0];
1154 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1157 ice_aq_alloc_free_vsi_list_exit:
1158 ice_free(hw, sw_buf);
1163 * ice_aq_set_storm_ctrl - Sets storm control configuration
1164 * @hw: pointer to the HW struct
1165 * @bcast_thresh: represents the upper threshold for broadcast storm control
1166 * @mcast_thresh: represents the upper threshold for multicast storm control
1167 * @ctl_bitmask: storm control control knobs
1169 * Sets the storm control configuration (0x0280)
1172 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1175 struct ice_aqc_storm_cfg *cmd;
1176 struct ice_aq_desc desc;
1178 cmd = &desc.params.storm_conf;
1180 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1182 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1183 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1184 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1186 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1190 * ice_aq_get_storm_ctrl - gets storm control configuration
1191 * @hw: pointer to the HW struct
1192 * @bcast_thresh: represents the upper threshold for broadcast storm control
1193 * @mcast_thresh: represents the upper threshold for multicast storm control
1194 * @ctl_bitmask: storm control control knobs
1196 * Gets the storm control configuration (0x0281)
1199 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1202 enum ice_status status;
1203 struct ice_aq_desc desc;
1205 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1207 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1209 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1212 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1215 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1218 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1225 * ice_aq_sw_rules - add/update/remove switch rules
1226 * @hw: pointer to the HW struct
1227 * @rule_list: pointer to switch rule population list
1228 * @rule_list_sz: total size of the rule list in bytes
1229 * @num_rules: number of switch rules in the rule_list
1230 * @opc: switch rules population command type - pass in the command opcode
1231 * @cd: pointer to command details structure or NULL
1233 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1235 static enum ice_status
1236 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1237 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1239 struct ice_aq_desc desc;
1241 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1243 if (opc != ice_aqc_opc_add_sw_rules &&
1244 opc != ice_aqc_opc_update_sw_rules &&
1245 opc != ice_aqc_opc_remove_sw_rules)
1246 return ICE_ERR_PARAM;
1248 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1250 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1251 desc.params.sw_rules.num_rules_fltr_entry_index =
1252 CPU_TO_LE16(num_rules);
1253 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1257 * ice_aq_add_recipe - add switch recipe
1258 * @hw: pointer to the HW struct
1259 * @s_recipe_list: pointer to switch rule population list
1260 * @num_recipes: number of switch recipes in the list
1261 * @cd: pointer to command details structure or NULL
1266 ice_aq_add_recipe(struct ice_hw *hw,
1267 struct ice_aqc_recipe_data_elem *s_recipe_list,
1268 u16 num_recipes, struct ice_sq_cd *cd)
1270 struct ice_aqc_add_get_recipe *cmd;
1271 struct ice_aq_desc desc;
1274 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1275 cmd = &desc.params.add_get_recipe;
1276 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1278 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1279 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1281 buf_size = num_recipes * sizeof(*s_recipe_list);
1283 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1287 * ice_aq_get_recipe - get switch recipe
1288 * @hw: pointer to the HW struct
1289 * @s_recipe_list: pointer to switch rule population list
1290 * @num_recipes: pointer to the number of recipes (input and output)
1291 * @recipe_root: root recipe number of recipe(s) to retrieve
1292 * @cd: pointer to command details structure or NULL
1296 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1297 * On output, *num_recipes will equal the number of entries returned in
1300 * The caller must supply enough space in s_recipe_list to hold all possible
1301 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1304 ice_aq_get_recipe(struct ice_hw *hw,
1305 struct ice_aqc_recipe_data_elem *s_recipe_list,
1306 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1308 struct ice_aqc_add_get_recipe *cmd;
1309 struct ice_aq_desc desc;
1310 enum ice_status status;
1313 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1314 return ICE_ERR_PARAM;
1316 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1317 cmd = &desc.params.add_get_recipe;
1318 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1320 cmd->return_index = CPU_TO_LE16(recipe_root);
1321 cmd->num_sub_recipes = 0;
1323 buf_size = *num_recipes * sizeof(*s_recipe_list);
1325 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1326 /* cppcheck-suppress constArgument */
1327 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1333 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1334 * @hw: pointer to the HW struct
1335 * @profile_id: package profile ID to associate the recipe with
1336 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1337 * @cd: pointer to command details structure or NULL
1338 * Recipe to profile association (0x0291)
1341 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1342 struct ice_sq_cd *cd)
1344 struct ice_aqc_recipe_to_profile *cmd;
1345 struct ice_aq_desc desc;
1347 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1348 cmd = &desc.params.recipe_to_profile;
1349 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1350 cmd->profile_id = CPU_TO_LE16(profile_id);
1351 /* Set the recipe ID bit in the bitmask to let the device know which
1352 * profile we are associating the recipe to
1354 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1355 ICE_NONDMA_TO_NONDMA);
1357 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1361 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1362 * @hw: pointer to the HW struct
1363 * @profile_id: package profile ID to associate the recipe with
1364 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1365 * @cd: pointer to command details structure or NULL
1366 * Associate profile ID with given recipe (0x0293)
1369 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1370 struct ice_sq_cd *cd)
1372 struct ice_aqc_recipe_to_profile *cmd;
1373 struct ice_aq_desc desc;
1374 enum ice_status status;
1376 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1377 cmd = &desc.params.recipe_to_profile;
1378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1379 cmd->profile_id = CPU_TO_LE16(profile_id);
1381 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1383 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1384 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1390 * ice_alloc_recipe - add recipe resource
1391 * @hw: pointer to the hardware structure
1392 * @rid: recipe ID returned as response to AQ call
1394 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1396 struct ice_aqc_alloc_free_res_elem *sw_buf;
1397 enum ice_status status;
1400 buf_len = sizeof(*sw_buf);
1401 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1403 return ICE_ERR_NO_MEMORY;
1405 sw_buf->num_elems = CPU_TO_LE16(1);
1406 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1407 ICE_AQC_RES_TYPE_S) |
1408 ICE_AQC_RES_TYPE_FLAG_SHARED);
1409 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1410 ice_aqc_opc_alloc_res, NULL);
1412 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1413 ice_free(hw, sw_buf);
1418 /* ice_init_port_info - Initialize port_info with switch configuration data
1419 * @pi: pointer to port_info
1420 * @vsi_port_num: VSI number or port number
1421 * @type: Type of switch element (port or VSI)
1422 * @swid: switch ID of the switch the element is attached to
1423 * @pf_vf_num: PF or VF number
1424 * @is_vf: true if the element is a VF, false otherwise
1427 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1428 u16 swid, u16 pf_vf_num, bool is_vf)
1431 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1432 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1434 pi->pf_vf_num = pf_vf_num;
1436 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1437 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1440 ice_debug(pi->hw, ICE_DBG_SW,
1441 "incorrect VSI/port type received\n");
1446 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1447 * @hw: pointer to the hardware structure
1449 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1451 struct ice_aqc_get_sw_cfg_resp *rbuf;
1452 enum ice_status status;
1453 u16 num_total_ports;
1459 num_total_ports = 1;
1461 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1462 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1465 return ICE_ERR_NO_MEMORY;
1467 /* Multiple calls to ice_aq_get_sw_cfg may be required
1468 * to get all the switch configuration information. The need
1469 * for additional calls is indicated by ice_aq_get_sw_cfg
1470 * writing a non-zero value in req_desc
1473 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1474 &req_desc, &num_elems, NULL);
1479 for (i = 0; i < num_elems; i++) {
1480 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1481 u16 pf_vf_num, swid, vsi_port_num;
1485 ele = rbuf[i].elements;
1486 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1487 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1489 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1490 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1492 swid = LE16_TO_CPU(ele->swid);
1494 if (LE16_TO_CPU(ele->pf_vf_num) &
1495 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1498 type = LE16_TO_CPU(ele->vsi_port_num) >>
1499 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1502 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1503 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1504 if (j == num_total_ports) {
1505 ice_debug(hw, ICE_DBG_SW,
1506 "more ports than expected\n");
1507 status = ICE_ERR_CFG;
1510 ice_init_port_info(hw->port_info,
1511 vsi_port_num, type, swid,
1519 } while (req_desc && !status);
1523 ice_free(hw, (void *)rbuf);
1529 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1530 * @hw: pointer to the hardware structure
1531 * @fi: filter info structure to fill/update
1533 * This helper function populates the lb_en and lan_en elements of the provided
1534 * ice_fltr_info struct using the switch's type and characteristics of the
1535 * switch rule being configured.
1537 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1541 if ((fi->flag & ICE_FLTR_TX) &&
1542 (fi->fltr_act == ICE_FWD_TO_VSI ||
1543 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1544 fi->fltr_act == ICE_FWD_TO_Q ||
1545 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1546 /* Setting LB for prune actions will result in replicated
1547 * packets to the internal switch that will be dropped.
1549 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1552 /* Set lan_en to TRUE if
1553 * 1. The switch is a VEB AND
1555 * 2.1 The lookup is a directional lookup like ethertype,
1556 * promiscuous, ethertype-MAC, promiscuous-VLAN
1557 * and default-port OR
1558 * 2.2 The lookup is VLAN, OR
1559 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1560 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1564 * The switch is a VEPA.
1566 * In all other cases, the LAN enable has to be set to false.
1569 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1570 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1571 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1572 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1573 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1574 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1575 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1576 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1577 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1578 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1587 * ice_ilog2 - Calculates integer log base 2 of a number
1588 * @n: number on which to perform operation
1590 static int ice_ilog2(u64 n)
1594 for (i = 63; i >= 0; i--)
1595 if (((u64)1 << i) & n)
1602 * ice_fill_sw_rule - Helper function to fill switch rule structure
1603 * @hw: pointer to the hardware structure
1604 * @f_info: entry containing packet forwarding information
1605 * @s_rule: switch rule structure to be filled in based on mac_entry
1606 * @opc: switch rules population command type - pass in the command opcode
1609 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1610 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1612 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1620 if (opc == ice_aqc_opc_remove_sw_rules) {
1621 s_rule->pdata.lkup_tx_rx.act = 0;
1622 s_rule->pdata.lkup_tx_rx.index =
1623 CPU_TO_LE16(f_info->fltr_rule_id);
1624 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1628 eth_hdr_sz = sizeof(dummy_eth_header);
1629 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1631 /* initialize the ether header with a dummy header */
1632 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1633 ice_fill_sw_info(hw, f_info);
1635 switch (f_info->fltr_act) {
1636 case ICE_FWD_TO_VSI:
1637 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1638 ICE_SINGLE_ACT_VSI_ID_M;
1639 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1640 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1641 ICE_SINGLE_ACT_VALID_BIT;
1643 case ICE_FWD_TO_VSI_LIST:
1644 act |= ICE_SINGLE_ACT_VSI_LIST;
1645 act |= (f_info->fwd_id.vsi_list_id <<
1646 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1647 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1648 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1649 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1650 ICE_SINGLE_ACT_VALID_BIT;
1653 act |= ICE_SINGLE_ACT_TO_Q;
1654 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1655 ICE_SINGLE_ACT_Q_INDEX_M;
1657 case ICE_DROP_PACKET:
1658 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1659 ICE_SINGLE_ACT_VALID_BIT;
1661 case ICE_FWD_TO_QGRP:
1662 q_rgn = f_info->qgrp_size > 0 ?
1663 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1664 act |= ICE_SINGLE_ACT_TO_Q;
1665 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1666 ICE_SINGLE_ACT_Q_INDEX_M;
1667 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1668 ICE_SINGLE_ACT_Q_REGION_M;
1675 act |= ICE_SINGLE_ACT_LB_ENABLE;
1677 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1679 switch (f_info->lkup_type) {
1680 case ICE_SW_LKUP_MAC:
1681 daddr = f_info->l_data.mac.mac_addr;
1683 case ICE_SW_LKUP_VLAN:
1684 vlan_id = f_info->l_data.vlan.vlan_id;
1685 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1686 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1687 act |= ICE_SINGLE_ACT_PRUNE;
1688 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1691 case ICE_SW_LKUP_ETHERTYPE_MAC:
1692 daddr = f_info->l_data.ethertype_mac.mac_addr;
1694 case ICE_SW_LKUP_ETHERTYPE:
1695 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1696 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1698 case ICE_SW_LKUP_MAC_VLAN:
1699 daddr = f_info->l_data.mac_vlan.mac_addr;
1700 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1702 case ICE_SW_LKUP_PROMISC_VLAN:
1703 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1705 case ICE_SW_LKUP_PROMISC:
1706 daddr = f_info->l_data.mac_vlan.mac_addr;
1712 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1713 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1714 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1716 /* Recipe set depending on lookup type */
1717 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1718 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1719 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1722 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1723 ICE_NONDMA_TO_NONDMA);
1725 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1726 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1727 *off = CPU_TO_BE16(vlan_id);
1730 /* Create the switch rule with the final dummy Ethernet header */
1731 if (opc != ice_aqc_opc_update_sw_rules)
1732 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1736 * ice_add_marker_act
1737 * @hw: pointer to the hardware structure
1738 * @m_ent: the management entry for which sw marker needs to be added
1739 * @sw_marker: sw marker to tag the Rx descriptor with
1740 * @l_id: large action resource ID
1742 * Create a large action to hold software marker and update the switch rule
1743 * entry pointed by m_ent with newly created large action
1745 static enum ice_status
1746 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1747 u16 sw_marker, u16 l_id)
1749 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1750 /* For software marker we need 3 large actions
1751 * 1. FWD action: FWD TO VSI or VSI LIST
1752 * 2. GENERIC VALUE action to hold the profile ID
1753 * 3. GENERIC VALUE action to hold the software marker ID
1755 const u16 num_lg_acts = 3;
1756 enum ice_status status;
1762 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1763 return ICE_ERR_PARAM;
1765 /* Create two back-to-back switch rules and submit them to the HW using
1766 * one memory buffer:
1770 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1771 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1772 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1774 return ICE_ERR_NO_MEMORY;
1776 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1778 /* Fill in the first switch rule i.e. large action */
1779 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1780 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1781 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1783 /* First action VSI forwarding or VSI list forwarding depending on how
1786 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1787 m_ent->fltr_info.fwd_id.hw_vsi_id;
1789 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1790 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1791 ICE_LG_ACT_VSI_LIST_ID_M;
1792 if (m_ent->vsi_count > 1)
1793 act |= ICE_LG_ACT_VSI_LIST;
1794 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1796 /* Second action descriptor type */
1797 act = ICE_LG_ACT_GENERIC;
1799 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1800 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1802 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1803 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1805 /* Third action Marker value */
1806 act |= ICE_LG_ACT_GENERIC;
1807 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1808 ICE_LG_ACT_GENERIC_VALUE_M;
1810 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1812 /* call the fill switch rule to fill the lookup Tx Rx structure */
1813 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1814 ice_aqc_opc_update_sw_rules);
1816 /* Update the action to point to the large action ID */
1817 rx_tx->pdata.lkup_tx_rx.act =
1818 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1819 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1820 ICE_SINGLE_ACT_PTR_VAL_M));
1822 /* Use the filter rule ID of the previously created rule with single
1823 * act. Once the update happens, hardware will treat this as large
1826 rx_tx->pdata.lkup_tx_rx.index =
1827 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1829 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1830 ice_aqc_opc_update_sw_rules, NULL);
1832 m_ent->lg_act_idx = l_id;
1833 m_ent->sw_marker_id = sw_marker;
1836 ice_free(hw, lg_act);
1841 * ice_add_counter_act - add/update filter rule with counter action
1842 * @hw: pointer to the hardware structure
1843 * @m_ent: the management entry for which counter needs to be added
1844 * @counter_id: VLAN counter ID returned as part of allocate resource
1845 * @l_id: large action resource ID
1847 static enum ice_status
1848 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1849 u16 counter_id, u16 l_id)
1851 struct ice_aqc_sw_rules_elem *lg_act;
1852 struct ice_aqc_sw_rules_elem *rx_tx;
1853 enum ice_status status;
1854 /* 2 actions will be added while adding a large action counter */
1855 const int num_acts = 2;
1862 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1863 return ICE_ERR_PARAM;
1865 /* Create two back-to-back switch rules and submit them to the HW using
1866 * one memory buffer:
1870 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
1871 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1872 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
1875 return ICE_ERR_NO_MEMORY;
1877 rx_tx = (struct ice_aqc_sw_rules_elem *)
1878 ((u8 *)lg_act + lg_act_size);
1880 /* Fill in the first switch rule i.e. large action */
1881 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1882 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1883 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
1885 /* First action VSI forwarding or VSI list forwarding depending on how
1888 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1889 m_ent->fltr_info.fwd_id.hw_vsi_id;
1891 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1892 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1893 ICE_LG_ACT_VSI_LIST_ID_M;
1894 if (m_ent->vsi_count > 1)
1895 act |= ICE_LG_ACT_VSI_LIST;
1896 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1898 /* Second action counter ID */
1899 act = ICE_LG_ACT_STAT_COUNT;
1900 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
1901 ICE_LG_ACT_STAT_COUNT_M;
1902 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1904 /* call the fill switch rule to fill the lookup Tx Rx structure */
1905 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1906 ice_aqc_opc_update_sw_rules);
1908 act = ICE_SINGLE_ACT_PTR;
1909 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
1910 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1912 /* Use the filter rule ID of the previously created rule with single
1913 * act. Once the update happens, hardware will treat this as large
1916 f_rule_id = m_ent->fltr_info.fltr_rule_id;
1917 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
1919 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1920 ice_aqc_opc_update_sw_rules, NULL);
1922 m_ent->lg_act_idx = l_id;
1923 m_ent->counter_index = counter_id;
1926 ice_free(hw, lg_act);
1931 * ice_create_vsi_list_map
1932 * @hw: pointer to the hardware structure
1933 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1934 * @num_vsi: number of VSI handles in the array
1935 * @vsi_list_id: VSI list ID generated as part of allocate resource
1937 * Helper function to create a new entry of VSI list ID to VSI mapping
1938 * using the given VSI list ID
1940 static struct ice_vsi_list_map_info *
1941 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1944 struct ice_switch_info *sw = hw->switch_info;
1945 struct ice_vsi_list_map_info *v_map;
1948 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
1953 v_map->vsi_list_id = vsi_list_id;
1955 for (i = 0; i < num_vsi; i++)
1956 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
1958 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
1963 * ice_update_vsi_list_rule
1964 * @hw: pointer to the hardware structure
1965 * @vsi_handle_arr: array of VSI handles to form a VSI list
1966 * @num_vsi: number of VSI handles in the array
1967 * @vsi_list_id: VSI list ID generated as part of allocate resource
1968 * @remove: Boolean value to indicate if this is a remove action
1969 * @opc: switch rules population command type - pass in the command opcode
1970 * @lkup_type: lookup type of the filter
1972 * Call AQ command to add a new switch rule or update existing switch rule
1973 * using the given VSI list ID
1975 static enum ice_status
1976 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1977 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1978 enum ice_sw_lkup_type lkup_type)
1980 struct ice_aqc_sw_rules_elem *s_rule;
1981 enum ice_status status;
1987 return ICE_ERR_PARAM;
1989 if (lkup_type == ICE_SW_LKUP_MAC ||
1990 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1991 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1992 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1993 lkup_type == ICE_SW_LKUP_PROMISC ||
1994 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1995 lkup_type == ICE_SW_LKUP_LAST)
1996 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1997 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1998 else if (lkup_type == ICE_SW_LKUP_VLAN)
1999 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2000 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2002 return ICE_ERR_PARAM;
2004 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2005 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2007 return ICE_ERR_NO_MEMORY;
2008 for (i = 0; i < num_vsi; i++) {
2009 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2010 status = ICE_ERR_PARAM;
2013 /* AQ call requires hw_vsi_id(s) */
2014 s_rule->pdata.vsi_list.vsi[i] =
2015 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2018 s_rule->type = CPU_TO_LE16(type);
2019 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2020 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2022 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2025 ice_free(hw, s_rule);
2030 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2031 * @hw: pointer to the HW struct
2032 * @vsi_handle_arr: array of VSI handles to form a VSI list
2033 * @num_vsi: number of VSI handles in the array
2034 * @vsi_list_id: stores the ID of the VSI list to be created
2035 * @lkup_type: switch rule filter's lookup type
2037 static enum ice_status
2038 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2039 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2041 enum ice_status status;
2043 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2044 ice_aqc_opc_alloc_res);
2048 /* Update the newly created VSI list to include the specified VSIs */
2049 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2050 *vsi_list_id, false,
2051 ice_aqc_opc_add_sw_rules, lkup_type);
2055 * ice_create_pkt_fwd_rule
2056 * @hw: pointer to the hardware structure
2057 * @f_entry: entry containing packet forwarding information
2059 * Create switch rule with given filter information and add an entry
2060 * to the corresponding filter management list to track this switch rule
2063 static enum ice_status
2064 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2065 struct ice_fltr_list_entry *f_entry)
2067 struct ice_fltr_mgmt_list_entry *fm_entry;
2068 struct ice_aqc_sw_rules_elem *s_rule;
2069 enum ice_sw_lkup_type l_type;
2070 struct ice_sw_recipe *recp;
2071 enum ice_status status;
2073 s_rule = (struct ice_aqc_sw_rules_elem *)
2074 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2076 return ICE_ERR_NO_MEMORY;
2077 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2078 ice_malloc(hw, sizeof(*fm_entry));
2080 status = ICE_ERR_NO_MEMORY;
2081 goto ice_create_pkt_fwd_rule_exit;
2084 fm_entry->fltr_info = f_entry->fltr_info;
2086 /* Initialize all the fields for the management entry */
2087 fm_entry->vsi_count = 1;
2088 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2089 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2090 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2092 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2093 ice_aqc_opc_add_sw_rules);
2095 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2096 ice_aqc_opc_add_sw_rules, NULL);
2098 ice_free(hw, fm_entry);
2099 goto ice_create_pkt_fwd_rule_exit;
2102 f_entry->fltr_info.fltr_rule_id =
2103 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2104 fm_entry->fltr_info.fltr_rule_id =
2105 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2107 /* The book keeping entries will get removed when base driver
2108 * calls remove filter AQ command
2110 l_type = fm_entry->fltr_info.lkup_type;
2111 recp = &hw->switch_info->recp_list[l_type];
2112 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2114 ice_create_pkt_fwd_rule_exit:
2115 ice_free(hw, s_rule);
2120 * ice_update_pkt_fwd_rule
2121 * @hw: pointer to the hardware structure
2122 * @f_info: filter information for switch rule
2124 * Call AQ command to update a previously created switch rule with a
2127 static enum ice_status
2128 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2130 struct ice_aqc_sw_rules_elem *s_rule;
2131 enum ice_status status;
2133 s_rule = (struct ice_aqc_sw_rules_elem *)
2134 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2136 return ICE_ERR_NO_MEMORY;
2138 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2140 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2142 /* Update switch rule with new rule set to forward VSI list */
2143 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2144 ice_aqc_opc_update_sw_rules, NULL);
2146 ice_free(hw, s_rule);
2151 * ice_update_sw_rule_bridge_mode
2152 * @hw: pointer to the HW struct
2154 * Updates unicast switch filter rules based on VEB/VEPA mode
2156 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2158 struct ice_switch_info *sw = hw->switch_info;
2159 struct ice_fltr_mgmt_list_entry *fm_entry;
2160 enum ice_status status = ICE_SUCCESS;
2161 struct LIST_HEAD_TYPE *rule_head;
2162 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2164 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2165 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2167 ice_acquire_lock(rule_lock);
2168 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2170 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2171 u8 *addr = fi->l_data.mac.mac_addr;
2173 /* Update unicast Tx rules to reflect the selected
2176 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2177 (fi->fltr_act == ICE_FWD_TO_VSI ||
2178 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2179 fi->fltr_act == ICE_FWD_TO_Q ||
2180 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2181 status = ice_update_pkt_fwd_rule(hw, fi);
2187 ice_release_lock(rule_lock);
2193 * ice_add_update_vsi_list
2194 * @hw: pointer to the hardware structure
2195 * @m_entry: pointer to current filter management list entry
2196 * @cur_fltr: filter information from the book keeping entry
2197 * @new_fltr: filter information with the new VSI to be added
2199 * Call AQ command to add or update previously created VSI list with new VSI.
2201 * Helper function to do book keeping associated with adding filter information
2202 * The algorithm to do the book keeping is described below :
2203 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2204 * if only one VSI has been added till now
2205 * Allocate a new VSI list and add two VSIs
2206 * to this list using switch rule command
2207 * Update the previously created switch rule with the
2208 * newly created VSI list ID
2209 * if a VSI list was previously created
2210 * Add the new VSI to the previously created VSI list set
2211 * using the update switch rule command
2213 static enum ice_status
2214 ice_add_update_vsi_list(struct ice_hw *hw,
2215 struct ice_fltr_mgmt_list_entry *m_entry,
2216 struct ice_fltr_info *cur_fltr,
2217 struct ice_fltr_info *new_fltr)
2219 enum ice_status status = ICE_SUCCESS;
2220 u16 vsi_list_id = 0;
2222 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2223 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2224 return ICE_ERR_NOT_IMPL;
2226 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2227 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2228 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2229 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2230 return ICE_ERR_NOT_IMPL;
2232 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2233 /* Only one entry existed in the mapping and it was not already
2234 * a part of a VSI list. So, create a VSI list with the old and
2237 struct ice_fltr_info tmp_fltr;
2238 u16 vsi_handle_arr[2];
2240 /* A rule already exists with the new VSI being added */
2241 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2242 return ICE_ERR_ALREADY_EXISTS;
2244 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2245 vsi_handle_arr[1] = new_fltr->vsi_handle;
2246 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2248 new_fltr->lkup_type);
2252 tmp_fltr = *new_fltr;
2253 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2254 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2255 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2256 /* Update the previous switch rule of "MAC forward to VSI" to
2257 * "MAC fwd to VSI list"
2259 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2263 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2264 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2265 m_entry->vsi_list_info =
2266 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2269 /* If this entry was large action then the large action needs
2270 * to be updated to point to FWD to VSI list
2272 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2274 ice_add_marker_act(hw, m_entry,
2275 m_entry->sw_marker_id,
2276 m_entry->lg_act_idx);
2278 u16 vsi_handle = new_fltr->vsi_handle;
2279 enum ice_adminq_opc opcode;
2281 if (!m_entry->vsi_list_info)
2284 /* A rule already exists with the new VSI being added */
2285 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2288 /* Update the previously created VSI list set with
2289 * the new VSI ID passed in
2291 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2292 opcode = ice_aqc_opc_update_sw_rules;
2294 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2295 vsi_list_id, false, opcode,
2296 new_fltr->lkup_type);
2297 /* update VSI list mapping info with new VSI ID */
2299 ice_set_bit(vsi_handle,
2300 m_entry->vsi_list_info->vsi_map);
2303 m_entry->vsi_count++;
2308 * ice_find_rule_entry - Search a rule entry
2309 * @hw: pointer to the hardware structure
2310 * @recp_id: lookup type for which the specified rule needs to be searched
2311 * @f_info: rule information
2313 * Helper function to search for a given rule entry
2314 * Returns pointer to entry storing the rule if found
2316 static struct ice_fltr_mgmt_list_entry *
2317 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2319 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2320 struct ice_switch_info *sw = hw->switch_info;
2321 struct LIST_HEAD_TYPE *list_head;
2323 list_head = &sw->recp_list[recp_id].filt_rules;
2324 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2326 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2327 sizeof(f_info->l_data)) &&
2328 f_info->flag == list_itr->fltr_info.flag) {
2337 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2338 * @hw: pointer to the hardware structure
2339 * @recp_id: lookup type for which VSI lists needs to be searched
2340 * @vsi_handle: VSI handle to be found in VSI list
2341 * @vsi_list_id: VSI list ID found containing vsi_handle
2343 * Helper function to search a VSI list with single entry containing given VSI
2344 * handle element. This can be extended further to search VSI list with more
2345 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2347 static struct ice_vsi_list_map_info *
2348 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2351 struct ice_vsi_list_map_info *map_info = NULL;
2352 struct ice_switch_info *sw = hw->switch_info;
2353 struct LIST_HEAD_TYPE *list_head;
2355 list_head = &sw->recp_list[recp_id].filt_rules;
2356 if (sw->recp_list[recp_id].adv_rule) {
2357 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2359 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2360 ice_adv_fltr_mgmt_list_entry,
2362 if (list_itr->vsi_list_info) {
2363 map_info = list_itr->vsi_list_info;
2364 if (ice_is_bit_set(map_info->vsi_map,
2366 *vsi_list_id = map_info->vsi_list_id;
2372 struct ice_fltr_mgmt_list_entry *list_itr;
2374 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2375 ice_fltr_mgmt_list_entry,
2377 if (list_itr->vsi_count == 1 &&
2378 list_itr->vsi_list_info) {
2379 map_info = list_itr->vsi_list_info;
2380 if (ice_is_bit_set(map_info->vsi_map,
2382 *vsi_list_id = map_info->vsi_list_id;
2392 * ice_add_rule_internal - add rule for a given lookup type
2393 * @hw: pointer to the hardware structure
2394 * @recp_id: lookup type (recipe ID) for which rule has to be added
2395 * @f_entry: structure containing MAC forwarding information
2397 * Adds or updates the rule lists for a given recipe
2399 static enum ice_status
2400 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2401 struct ice_fltr_list_entry *f_entry)
2403 struct ice_switch_info *sw = hw->switch_info;
2404 struct ice_fltr_info *new_fltr, *cur_fltr;
2405 struct ice_fltr_mgmt_list_entry *m_entry;
2406 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2407 enum ice_status status = ICE_SUCCESS;
2409 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2410 return ICE_ERR_PARAM;
2412 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2413 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2414 f_entry->fltr_info.fwd_id.hw_vsi_id =
2415 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2417 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2419 ice_acquire_lock(rule_lock);
2420 new_fltr = &f_entry->fltr_info;
2421 if (new_fltr->flag & ICE_FLTR_RX)
2422 new_fltr->src = hw->port_info->lport;
2423 else if (new_fltr->flag & ICE_FLTR_TX)
2425 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2427 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2429 status = ice_create_pkt_fwd_rule(hw, f_entry);
2430 goto exit_add_rule_internal;
2433 cur_fltr = &m_entry->fltr_info;
2434 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2436 exit_add_rule_internal:
2437 ice_release_lock(rule_lock);
2442 * ice_remove_vsi_list_rule
2443 * @hw: pointer to the hardware structure
2444 * @vsi_list_id: VSI list ID generated as part of allocate resource
2445 * @lkup_type: switch rule filter lookup type
2447 * The VSI list should be emptied before this function is called to remove the
2450 static enum ice_status
2451 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2452 enum ice_sw_lkup_type lkup_type)
2454 struct ice_aqc_sw_rules_elem *s_rule;
2455 enum ice_status status;
2458 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2459 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2461 return ICE_ERR_NO_MEMORY;
2463 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2464 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2466 /* Free the vsi_list resource that we allocated. It is assumed that the
2467 * list is empty at this point.
2469 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2470 ice_aqc_opc_free_res);
2472 ice_free(hw, s_rule);
2477 * ice_rem_update_vsi_list
2478 * @hw: pointer to the hardware structure
2479 * @vsi_handle: VSI handle of the VSI to remove
2480 * @fm_list: filter management entry for which the VSI list management needs to
2483 static enum ice_status
2484 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2485 struct ice_fltr_mgmt_list_entry *fm_list)
2487 enum ice_sw_lkup_type lkup_type;
2488 enum ice_status status = ICE_SUCCESS;
2491 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2492 fm_list->vsi_count == 0)
2493 return ICE_ERR_PARAM;
2495 /* A rule with the VSI being removed does not exist */
2496 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2497 return ICE_ERR_DOES_NOT_EXIST;
2499 lkup_type = fm_list->fltr_info.lkup_type;
2500 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2501 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2502 ice_aqc_opc_update_sw_rules,
2507 fm_list->vsi_count--;
2508 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2510 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2511 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2512 struct ice_vsi_list_map_info *vsi_list_info =
2513 fm_list->vsi_list_info;
2516 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2518 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2519 return ICE_ERR_OUT_OF_RANGE;
2521 /* Make sure VSI list is empty before removing it below */
2522 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2524 ice_aqc_opc_update_sw_rules,
2529 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2530 tmp_fltr_info.fwd_id.hw_vsi_id =
2531 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2532 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2533 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2535 ice_debug(hw, ICE_DBG_SW,
2536 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2537 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2541 fm_list->fltr_info = tmp_fltr_info;
2544 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2545 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2546 struct ice_vsi_list_map_info *vsi_list_info =
2547 fm_list->vsi_list_info;
2549 /* Remove the VSI list since it is no longer used */
2550 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2552 ice_debug(hw, ICE_DBG_SW,
2553 "Failed to remove VSI list %d, error %d\n",
2554 vsi_list_id, status);
2558 LIST_DEL(&vsi_list_info->list_entry);
2559 ice_free(hw, vsi_list_info);
2560 fm_list->vsi_list_info = NULL;
2567 * ice_remove_rule_internal - Remove a filter rule of a given type
2569 * @hw: pointer to the hardware structure
2570 * @recp_id: recipe ID for which the rule needs to removed
2571 * @f_entry: rule entry containing filter information
2573 static enum ice_status
2574 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2575 struct ice_fltr_list_entry *f_entry)
2577 struct ice_switch_info *sw = hw->switch_info;
2578 struct ice_fltr_mgmt_list_entry *list_elem;
2579 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2580 enum ice_status status = ICE_SUCCESS;
2581 bool remove_rule = false;
2584 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2585 return ICE_ERR_PARAM;
2586 f_entry->fltr_info.fwd_id.hw_vsi_id =
2587 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2589 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2590 ice_acquire_lock(rule_lock);
2591 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2593 status = ICE_ERR_DOES_NOT_EXIST;
2597 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2599 } else if (!list_elem->vsi_list_info) {
2600 status = ICE_ERR_DOES_NOT_EXIST;
2602 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2603 /* a ref_cnt > 1 indicates that the vsi_list is being
2604 * shared by multiple rules. Decrement the ref_cnt and
2605 * remove this rule, but do not modify the list, as it
2606 * is in-use by other rules.
2608 list_elem->vsi_list_info->ref_cnt--;
2611 /* a ref_cnt of 1 indicates the vsi_list is only used
2612 * by one rule. However, the original removal request is only
2613 * for a single VSI. Update the vsi_list first, and only
2614 * remove the rule if there are no further VSIs in this list.
2616 vsi_handle = f_entry->fltr_info.vsi_handle;
2617 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2620 /* if VSI count goes to zero after updating the VSI list */
2621 if (list_elem->vsi_count == 0)
2626 /* Remove the lookup rule */
2627 struct ice_aqc_sw_rules_elem *s_rule;
2629 s_rule = (struct ice_aqc_sw_rules_elem *)
2630 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2632 status = ICE_ERR_NO_MEMORY;
2636 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2637 ice_aqc_opc_remove_sw_rules);
2639 status = ice_aq_sw_rules(hw, s_rule,
2640 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2641 ice_aqc_opc_remove_sw_rules, NULL);
2645 /* Remove a book keeping from the list */
2646 ice_free(hw, s_rule);
2648 LIST_DEL(&list_elem->list_entry);
2649 ice_free(hw, list_elem);
2652 ice_release_lock(rule_lock);
2657 * ice_aq_get_res_alloc - get allocated resources
2658 * @hw: pointer to the HW struct
2659 * @num_entries: pointer to u16 to store the number of resource entries returned
2660 * @buf: pointer to user-supplied buffer
2661 * @buf_size: size of buff
2662 * @cd: pointer to command details structure or NULL
2664 * The user-supplied buffer must be large enough to store the resource
2665 * information for all resource types. Each resource type is an
2666 * ice_aqc_get_res_resp_data_elem structure.
2669 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2670 u16 buf_size, struct ice_sq_cd *cd)
2672 struct ice_aqc_get_res_alloc *resp;
2673 enum ice_status status;
2674 struct ice_aq_desc desc;
2677 return ICE_ERR_BAD_PTR;
2679 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2680 return ICE_ERR_INVAL_SIZE;
2682 resp = &desc.params.get_res;
2684 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2685 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2687 if (!status && num_entries)
2688 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2694 * ice_aq_get_res_descs - get allocated resource descriptors
2695 * @hw: pointer to the hardware structure
2696 * @num_entries: number of resource entries in buffer
2697 * @buf: Indirect buffer to hold data parameters and response
2698 * @buf_size: size of buffer for indirect commands
2699 * @res_type: resource type
2700 * @res_shared: is resource shared
2701 * @desc_id: input - first desc ID to start; output - next desc ID
2702 * @cd: pointer to command details structure or NULL
2705 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2706 struct ice_aqc_get_allocd_res_desc_resp *buf,
2707 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2708 struct ice_sq_cd *cd)
2710 struct ice_aqc_get_allocd_res_desc *cmd;
2711 struct ice_aq_desc desc;
2712 enum ice_status status;
2714 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2716 cmd = &desc.params.get_res_desc;
2719 return ICE_ERR_PARAM;
2721 if (buf_size != (num_entries * sizeof(*buf)))
2722 return ICE_ERR_PARAM;
2724 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2726 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2727 ICE_AQC_RES_TYPE_M) | (res_shared ?
2728 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2729 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2731 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2733 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2735 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2741 * ice_add_mac - Add a MAC address based filter rule
2742 * @hw: pointer to the hardware structure
2743 * @m_list: list of MAC addresses and forwarding information
2745 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2746 * multiple unicast addresses, the function assumes that all the
2747 * addresses are unique in a given add_mac call. It doesn't
2748 * check for duplicates in this case, removing duplicates from a given
2749 * list should be taken care of in the caller of this function.
2752 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2754 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2755 struct ice_fltr_list_entry *m_list_itr;
2756 struct LIST_HEAD_TYPE *rule_head;
2757 u16 elem_sent, total_elem_left;
2758 struct ice_switch_info *sw;
2759 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2760 enum ice_status status = ICE_SUCCESS;
2761 u16 num_unicast = 0;
2765 return ICE_ERR_PARAM;
2767 sw = hw->switch_info;
2768 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2769 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2771 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2775 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2776 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2777 if (!ice_is_vsi_valid(hw, vsi_handle))
2778 return ICE_ERR_PARAM;
2779 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2780 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2781 /* update the src in case it is VSI num */
2782 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2783 return ICE_ERR_PARAM;
2784 m_list_itr->fltr_info.src = hw_vsi_id;
2785 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2786 IS_ZERO_ETHER_ADDR(add))
2787 return ICE_ERR_PARAM;
2788 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2789 /* Don't overwrite the unicast address */
2790 ice_acquire_lock(rule_lock);
2791 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2792 &m_list_itr->fltr_info)) {
2793 ice_release_lock(rule_lock);
2794 return ICE_ERR_ALREADY_EXISTS;
2796 ice_release_lock(rule_lock);
2798 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2799 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2800 m_list_itr->status =
2801 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2803 if (m_list_itr->status)
2804 return m_list_itr->status;
2808 ice_acquire_lock(rule_lock);
2809 /* Exit if no suitable entries were found for adding bulk switch rule */
2811 status = ICE_SUCCESS;
2812 goto ice_add_mac_exit;
2815 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2817 /* Allocate switch rule buffer for the bulk update for unicast */
2818 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2819 s_rule = (struct ice_aqc_sw_rules_elem *)
2820 ice_calloc(hw, num_unicast, s_rule_size);
2822 status = ICE_ERR_NO_MEMORY;
2823 goto ice_add_mac_exit;
2827 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2829 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2830 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2832 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2833 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2834 ice_aqc_opc_add_sw_rules);
2835 r_iter = (struct ice_aqc_sw_rules_elem *)
2836 ((u8 *)r_iter + s_rule_size);
2840 /* Call AQ bulk switch rule update for all unicast addresses */
2842 /* Call AQ switch rule in AQ_MAX chunk */
2843 for (total_elem_left = num_unicast; total_elem_left > 0;
2844 total_elem_left -= elem_sent) {
2845 struct ice_aqc_sw_rules_elem *entry = r_iter;
2847 elem_sent = min(total_elem_left,
2848 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2849 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2850 elem_sent, ice_aqc_opc_add_sw_rules,
2853 goto ice_add_mac_exit;
2854 r_iter = (struct ice_aqc_sw_rules_elem *)
2855 ((u8 *)r_iter + (elem_sent * s_rule_size));
2858 /* Fill up rule ID based on the value returned from FW */
2860 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2862 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2863 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2864 struct ice_fltr_mgmt_list_entry *fm_entry;
2866 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2867 f_info->fltr_rule_id =
2868 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
2869 f_info->fltr_act = ICE_FWD_TO_VSI;
2870 /* Create an entry to track this MAC address */
2871 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2872 ice_malloc(hw, sizeof(*fm_entry));
2874 status = ICE_ERR_NO_MEMORY;
2875 goto ice_add_mac_exit;
2877 fm_entry->fltr_info = *f_info;
2878 fm_entry->vsi_count = 1;
2879 /* The book keeping entries will get removed when
2880 * base driver calls remove filter AQ command
2883 LIST_ADD(&fm_entry->list_entry, rule_head);
2884 r_iter = (struct ice_aqc_sw_rules_elem *)
2885 ((u8 *)r_iter + s_rule_size);
2890 ice_release_lock(rule_lock);
2892 ice_free(hw, s_rule);
2897 * ice_add_vlan_internal - Add one VLAN based filter rule
2898 * @hw: pointer to the hardware structure
2899 * @f_entry: filter entry containing one VLAN information
2901 static enum ice_status
2902 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2904 struct ice_switch_info *sw = hw->switch_info;
2905 struct ice_fltr_mgmt_list_entry *v_list_itr;
2906 struct ice_fltr_info *new_fltr, *cur_fltr;
2907 enum ice_sw_lkup_type lkup_type;
2908 u16 vsi_list_id = 0, vsi_handle;
2909 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2910 enum ice_status status = ICE_SUCCESS;
2912 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2913 return ICE_ERR_PARAM;
2915 f_entry->fltr_info.fwd_id.hw_vsi_id =
2916 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2917 new_fltr = &f_entry->fltr_info;
2919 /* VLAN ID should only be 12 bits */
2920 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2921 return ICE_ERR_PARAM;
2923 if (new_fltr->src_id != ICE_SRC_ID_VSI)
2924 return ICE_ERR_PARAM;
2926 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2927 lkup_type = new_fltr->lkup_type;
2928 vsi_handle = new_fltr->vsi_handle;
2929 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2930 ice_acquire_lock(rule_lock);
2931 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2933 struct ice_vsi_list_map_info *map_info = NULL;
2935 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2936 /* All VLAN pruning rules use a VSI list. Check if
2937 * there is already a VSI list containing VSI that we
2938 * want to add. If found, use the same vsi_list_id for
2939 * this new VLAN rule or else create a new list.
2941 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2945 status = ice_create_vsi_list_rule(hw,
2953 /* Convert the action to forwarding to a VSI list. */
2954 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2955 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2958 status = ice_create_pkt_fwd_rule(hw, f_entry);
2960 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2963 status = ICE_ERR_DOES_NOT_EXIST;
2966 /* reuse VSI list for new rule and increment ref_cnt */
2968 v_list_itr->vsi_list_info = map_info;
2969 map_info->ref_cnt++;
2971 v_list_itr->vsi_list_info =
2972 ice_create_vsi_list_map(hw, &vsi_handle,
2976 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2977 /* Update existing VSI list to add new VSI ID only if it used
2980 cur_fltr = &v_list_itr->fltr_info;
2981 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2984 /* If VLAN rule exists and VSI list being used by this rule is
2985 * referenced by more than 1 VLAN rule. Then create a new VSI
2986 * list appending previous VSI with new VSI and update existing
2987 * VLAN rule to point to new VSI list ID
2989 struct ice_fltr_info tmp_fltr;
2990 u16 vsi_handle_arr[2];
2993 /* Current implementation only supports reusing VSI list with
2994 * one VSI count. We should never hit below condition
2996 if (v_list_itr->vsi_count > 1 &&
2997 v_list_itr->vsi_list_info->ref_cnt > 1) {
2998 ice_debug(hw, ICE_DBG_SW,
2999 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3000 status = ICE_ERR_CFG;
3005 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3008 /* A rule already exists with the new VSI being added */
3009 if (cur_handle == vsi_handle) {
3010 status = ICE_ERR_ALREADY_EXISTS;
3014 vsi_handle_arr[0] = cur_handle;
3015 vsi_handle_arr[1] = vsi_handle;
3016 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3017 &vsi_list_id, lkup_type);
3021 tmp_fltr = v_list_itr->fltr_info;
3022 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3023 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3024 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3025 /* Update the previous switch rule to a new VSI list which
3026 * includes current VSI that is requested
3028 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3032 /* before overriding VSI list map info. decrement ref_cnt of
3035 v_list_itr->vsi_list_info->ref_cnt--;
3037 /* now update to newly created list */
3038 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3039 v_list_itr->vsi_list_info =
3040 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3042 v_list_itr->vsi_count++;
3046 ice_release_lock(rule_lock);
3051 * ice_add_vlan - Add VLAN based filter rule
3052 * @hw: pointer to the hardware structure
3053 * @v_list: list of VLAN entries and forwarding information
3056 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3058 struct ice_fltr_list_entry *v_list_itr;
3061 return ICE_ERR_PARAM;
3063 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3065 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3066 return ICE_ERR_PARAM;
3067 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3068 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3069 if (v_list_itr->status)
3070 return v_list_itr->status;
3076 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3077 * @hw: pointer to the hardware structure
3078 * @mv_list: list of MAC and VLAN filters
3080 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3081 * pruning bits enabled, then it is the responsibility of the caller to make
3082 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3083 * VLAN won't be received on that VSI otherwise.
3086 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3088 struct ice_fltr_list_entry *mv_list_itr;
3090 if (!mv_list || !hw)
3091 return ICE_ERR_PARAM;
3093 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3095 enum ice_sw_lkup_type l_type =
3096 mv_list_itr->fltr_info.lkup_type;
3098 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3099 return ICE_ERR_PARAM;
3100 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3101 mv_list_itr->status =
3102 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3104 if (mv_list_itr->status)
3105 return mv_list_itr->status;
3111 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3112 * @hw: pointer to the hardware structure
3113 * @em_list: list of ether type MAC filter, MAC is optional
3115 * This function requires the caller to populate the entries in
3116 * the filter list with the necessary fields (including flags to
3117 * indicate Tx or Rx rules).
3120 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3122 struct ice_fltr_list_entry *em_list_itr;
3124 if (!em_list || !hw)
3125 return ICE_ERR_PARAM;
3127 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3129 enum ice_sw_lkup_type l_type =
3130 em_list_itr->fltr_info.lkup_type;
3132 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3133 l_type != ICE_SW_LKUP_ETHERTYPE)
3134 return ICE_ERR_PARAM;
3136 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3138 if (em_list_itr->status)
3139 return em_list_itr->status;
3145 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3146 * @hw: pointer to the hardware structure
3147 * @em_list: list of ethertype or ethertype MAC entries
3150 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3152 struct ice_fltr_list_entry *em_list_itr, *tmp;
3154 if (!em_list || !hw)
3155 return ICE_ERR_PARAM;
3157 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3159 enum ice_sw_lkup_type l_type =
3160 em_list_itr->fltr_info.lkup_type;
3162 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3163 l_type != ICE_SW_LKUP_ETHERTYPE)
3164 return ICE_ERR_PARAM;
3166 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3168 if (em_list_itr->status)
3169 return em_list_itr->status;
3176 * ice_rem_sw_rule_info
3177 * @hw: pointer to the hardware structure
3178 * @rule_head: pointer to the switch list structure that we want to delete
3181 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3183 if (!LIST_EMPTY(rule_head)) {
3184 struct ice_fltr_mgmt_list_entry *entry;
3185 struct ice_fltr_mgmt_list_entry *tmp;
3187 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3188 ice_fltr_mgmt_list_entry, list_entry) {
3189 LIST_DEL(&entry->list_entry);
3190 ice_free(hw, entry);
3196 * ice_rem_adv_rule_info
3197 * @hw: pointer to the hardware structure
3198 * @rule_head: pointer to the switch list structure that we want to delete
3201 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3203 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3204 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3206 if (LIST_EMPTY(rule_head))
3209 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3210 ice_adv_fltr_mgmt_list_entry, list_entry) {
3211 LIST_DEL(&lst_itr->list_entry);
3212 ice_free(hw, lst_itr->lkups);
3213 ice_free(hw, lst_itr);
3218 * ice_rem_all_sw_rules_info
3219 * @hw: pointer to the hardware structure
3221 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3223 struct ice_switch_info *sw = hw->switch_info;
3226 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3227 struct LIST_HEAD_TYPE *rule_head;
3229 rule_head = &sw->recp_list[i].filt_rules;
3230 if (!sw->recp_list[i].adv_rule)
3231 ice_rem_sw_rule_info(hw, rule_head);
3233 ice_rem_adv_rule_info(hw, rule_head);
3238 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3239 * @pi: pointer to the port_info structure
3240 * @vsi_handle: VSI handle to set as default
3241 * @set: true to add the above mentioned switch rule, false to remove it
3242 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3244 * add filter rule to set/unset given VSI as default VSI for the switch
3245 * (represented by swid)
3248 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3251 struct ice_aqc_sw_rules_elem *s_rule;
3252 struct ice_fltr_info f_info;
3253 struct ice_hw *hw = pi->hw;
3254 enum ice_adminq_opc opcode;
3255 enum ice_status status;
3259 if (!ice_is_vsi_valid(hw, vsi_handle))
3260 return ICE_ERR_PARAM;
3261 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3263 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3264 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3265 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3267 return ICE_ERR_NO_MEMORY;
3269 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3271 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3272 f_info.flag = direction;
3273 f_info.fltr_act = ICE_FWD_TO_VSI;
3274 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3276 if (f_info.flag & ICE_FLTR_RX) {
3277 f_info.src = pi->lport;
3278 f_info.src_id = ICE_SRC_ID_LPORT;
3280 f_info.fltr_rule_id =
3281 pi->dflt_rx_vsi_rule_id;
3282 } else if (f_info.flag & ICE_FLTR_TX) {
3283 f_info.src_id = ICE_SRC_ID_VSI;
3284 f_info.src = hw_vsi_id;
3286 f_info.fltr_rule_id =
3287 pi->dflt_tx_vsi_rule_id;
3291 opcode = ice_aqc_opc_add_sw_rules;
3293 opcode = ice_aqc_opc_remove_sw_rules;
3295 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3297 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3298 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3301 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3303 if (f_info.flag & ICE_FLTR_TX) {
3304 pi->dflt_tx_vsi_num = hw_vsi_id;
3305 pi->dflt_tx_vsi_rule_id = index;
3306 } else if (f_info.flag & ICE_FLTR_RX) {
3307 pi->dflt_rx_vsi_num = hw_vsi_id;
3308 pi->dflt_rx_vsi_rule_id = index;
3311 if (f_info.flag & ICE_FLTR_TX) {
3312 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3313 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3314 } else if (f_info.flag & ICE_FLTR_RX) {
3315 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3316 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3321 ice_free(hw, s_rule);
3326 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3327 * @hw: pointer to the hardware structure
3328 * @recp_id: lookup type for which the specified rule needs to be searched
3329 * @f_info: rule information
3331 * Helper function to search for a unicast rule entry - this is to be used
3332 * to remove unicast MAC filter that is not shared with other VSIs on the
3335 * Returns pointer to entry storing the rule if found
3337 static struct ice_fltr_mgmt_list_entry *
3338 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3339 struct ice_fltr_info *f_info)
3341 struct ice_switch_info *sw = hw->switch_info;
3342 struct ice_fltr_mgmt_list_entry *list_itr;
3343 struct LIST_HEAD_TYPE *list_head;
3345 list_head = &sw->recp_list[recp_id].filt_rules;
3346 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3348 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3349 sizeof(f_info->l_data)) &&
3350 f_info->fwd_id.hw_vsi_id ==
3351 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3352 f_info->flag == list_itr->fltr_info.flag)
3359 * ice_remove_mac - remove a MAC address based filter rule
3360 * @hw: pointer to the hardware structure
3361 * @m_list: list of MAC addresses and forwarding information
3363 * This function removes either a MAC filter rule or a specific VSI from a
3364 * VSI list for a multicast MAC address.
3366 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3367 * ice_add_mac. Caller should be aware that this call will only work if all
3368 * the entries passed into m_list were added previously. It will not attempt to
3369 * do a partial remove of entries that were found.
3372 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3374 struct ice_fltr_list_entry *list_itr, *tmp;
3375 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3378 return ICE_ERR_PARAM;
3380 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3381 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3383 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3384 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3387 if (l_type != ICE_SW_LKUP_MAC)
3388 return ICE_ERR_PARAM;
3390 vsi_handle = list_itr->fltr_info.vsi_handle;
3391 if (!ice_is_vsi_valid(hw, vsi_handle))
3392 return ICE_ERR_PARAM;
3394 list_itr->fltr_info.fwd_id.hw_vsi_id =
3395 ice_get_hw_vsi_num(hw, vsi_handle);
3396 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3397 /* Don't remove the unicast address that belongs to
3398 * another VSI on the switch, since it is not being
3401 ice_acquire_lock(rule_lock);
3402 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3403 &list_itr->fltr_info)) {
3404 ice_release_lock(rule_lock);
3405 return ICE_ERR_DOES_NOT_EXIST;
3407 ice_release_lock(rule_lock);
3409 list_itr->status = ice_remove_rule_internal(hw,
3412 if (list_itr->status)
3413 return list_itr->status;
3419 * ice_remove_vlan - Remove VLAN based filter rule
3420 * @hw: pointer to the hardware structure
3421 * @v_list: list of VLAN entries and forwarding information
3424 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3426 struct ice_fltr_list_entry *v_list_itr, *tmp;
3429 return ICE_ERR_PARAM;
3431 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3433 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3435 if (l_type != ICE_SW_LKUP_VLAN)
3436 return ICE_ERR_PARAM;
3437 v_list_itr->status = ice_remove_rule_internal(hw,
3440 if (v_list_itr->status)
3441 return v_list_itr->status;
3447 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3448 * @hw: pointer to the hardware structure
3449 * @v_list: list of MAC VLAN entries and forwarding information
3452 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3454 struct ice_fltr_list_entry *v_list_itr, *tmp;
3457 return ICE_ERR_PARAM;
3459 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3461 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3463 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3464 return ICE_ERR_PARAM;
3465 v_list_itr->status =
3466 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3468 if (v_list_itr->status)
3469 return v_list_itr->status;
3475 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3476 * @fm_entry: filter entry to inspect
3477 * @vsi_handle: VSI handle to compare with filter info
3480 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3482 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3483 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3484 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3485 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3490 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3491 * @hw: pointer to the hardware structure
3492 * @vsi_handle: VSI handle to remove filters from
3493 * @vsi_list_head: pointer to the list to add entry to
3494 * @fi: pointer to fltr_info of filter entry to copy & add
3496 * Helper function, used when creating a list of filters to remove from
3497 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3498 * original filter entry, with the exception of fltr_info.fltr_act and
3499 * fltr_info.fwd_id fields. These are set such that later logic can
3500 * extract which VSI to remove the fltr from, and pass on that information.
3502 static enum ice_status
3503 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3504 struct LIST_HEAD_TYPE *vsi_list_head,
3505 struct ice_fltr_info *fi)
3507 struct ice_fltr_list_entry *tmp;
3509 /* this memory is freed up in the caller function
3510 * once filters for this VSI are removed
3512 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3514 return ICE_ERR_NO_MEMORY;
3516 tmp->fltr_info = *fi;
3518 /* Overwrite these fields to indicate which VSI to remove filter from,
3519 * so find and remove logic can extract the information from the
3520 * list entries. Note that original entries will still have proper
3523 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3524 tmp->fltr_info.vsi_handle = vsi_handle;
3525 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3527 LIST_ADD(&tmp->list_entry, vsi_list_head);
3533 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3534 * @hw: pointer to the hardware structure
3535 * @vsi_handle: VSI handle to remove filters from
3536 * @lkup_list_head: pointer to the list that has certain lookup type filters
3537 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3539 * Locates all filters in lkup_list_head that are used by the given VSI,
3540 * and adds COPIES of those entries to vsi_list_head (intended to be used
3541 * to remove the listed filters).
3542 * Note that this means all entries in vsi_list_head must be explicitly
3543 * deallocated by the caller when done with list.
3545 static enum ice_status
3546 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3547 struct LIST_HEAD_TYPE *lkup_list_head,
3548 struct LIST_HEAD_TYPE *vsi_list_head)
3550 struct ice_fltr_mgmt_list_entry *fm_entry;
3551 enum ice_status status = ICE_SUCCESS;
3553 /* check to make sure VSI ID is valid and within boundary */
3554 if (!ice_is_vsi_valid(hw, vsi_handle))
3555 return ICE_ERR_PARAM;
3557 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3558 ice_fltr_mgmt_list_entry, list_entry) {
3559 struct ice_fltr_info *fi;
3561 fi = &fm_entry->fltr_info;
3562 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3565 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3575 * ice_determine_promisc_mask
3576 * @fi: filter info to parse
3578 * Helper function to determine which ICE_PROMISC_ mask corresponds
3579 * to given filter into.
3581 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3583 u16 vid = fi->l_data.mac_vlan.vlan_id;
3584 u8 *macaddr = fi->l_data.mac.mac_addr;
3585 bool is_tx_fltr = false;
3586 u8 promisc_mask = 0;
3588 if (fi->flag == ICE_FLTR_TX)
3591 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3592 promisc_mask |= is_tx_fltr ?
3593 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3594 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3595 promisc_mask |= is_tx_fltr ?
3596 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3597 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3598 promisc_mask |= is_tx_fltr ?
3599 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3601 promisc_mask |= is_tx_fltr ?
3602 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3604 return promisc_mask;
3608 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3609 * @hw: pointer to the hardware structure
3610 * @vsi_handle: VSI handle to retrieve info from
3611 * @promisc_mask: pointer to mask to be filled in
3612 * @vid: VLAN ID of promisc VLAN VSI
3615 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3618 struct ice_switch_info *sw = hw->switch_info;
3619 struct ice_fltr_mgmt_list_entry *itr;
3620 struct LIST_HEAD_TYPE *rule_head;
3621 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3623 if (!ice_is_vsi_valid(hw, vsi_handle))
3624 return ICE_ERR_PARAM;
3628 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3629 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3631 ice_acquire_lock(rule_lock);
3632 LIST_FOR_EACH_ENTRY(itr, rule_head,
3633 ice_fltr_mgmt_list_entry, list_entry) {
3634 /* Continue if this filter doesn't apply to this VSI or the
3635 * VSI ID is not in the VSI map for this filter
3637 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3640 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3642 ice_release_lock(rule_lock);
3648 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3649 * @hw: pointer to the hardware structure
3650 * @vsi_handle: VSI handle to retrieve info from
3651 * @promisc_mask: pointer to mask to be filled in
3652 * @vid: VLAN ID of promisc VLAN VSI
3655 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3658 struct ice_switch_info *sw = hw->switch_info;
3659 struct ice_fltr_mgmt_list_entry *itr;
3660 struct LIST_HEAD_TYPE *rule_head;
3661 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3663 if (!ice_is_vsi_valid(hw, vsi_handle))
3664 return ICE_ERR_PARAM;
3668 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3669 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3671 ice_acquire_lock(rule_lock);
3672 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3674 /* Continue if this filter doesn't apply to this VSI or the
3675 * VSI ID is not in the VSI map for this filter
3677 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3680 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3682 ice_release_lock(rule_lock);
3688 * ice_remove_promisc - Remove promisc based filter rules
3689 * @hw: pointer to the hardware structure
3690 * @recp_id: recipe ID for which the rule needs to removed
3691 * @v_list: list of promisc entries
3693 static enum ice_status
3694 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3695 struct LIST_HEAD_TYPE *v_list)
3697 struct ice_fltr_list_entry *v_list_itr, *tmp;
3699 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3701 v_list_itr->status =
3702 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3703 if (v_list_itr->status)
3704 return v_list_itr->status;
3710 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3711 * @hw: pointer to the hardware structure
3712 * @vsi_handle: VSI handle to clear mode
3713 * @promisc_mask: mask of promiscuous config bits to clear
3714 * @vid: VLAN ID to clear VLAN promiscuous
3717 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3720 struct ice_switch_info *sw = hw->switch_info;
3721 struct ice_fltr_list_entry *fm_entry, *tmp;
3722 struct LIST_HEAD_TYPE remove_list_head;
3723 struct ice_fltr_mgmt_list_entry *itr;
3724 struct LIST_HEAD_TYPE *rule_head;
3725 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3726 enum ice_status status = ICE_SUCCESS;
3729 if (!ice_is_vsi_valid(hw, vsi_handle))
3730 return ICE_ERR_PARAM;
3733 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3735 recipe_id = ICE_SW_LKUP_PROMISC;
3737 rule_head = &sw->recp_list[recipe_id].filt_rules;
3738 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3740 INIT_LIST_HEAD(&remove_list_head);
3742 ice_acquire_lock(rule_lock);
3743 LIST_FOR_EACH_ENTRY(itr, rule_head,
3744 ice_fltr_mgmt_list_entry, list_entry) {
3745 u8 fltr_promisc_mask = 0;
3747 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3750 fltr_promisc_mask |=
3751 ice_determine_promisc_mask(&itr->fltr_info);
3753 /* Skip if filter is not completely specified by given mask */
3754 if (fltr_promisc_mask & ~promisc_mask)
3757 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3761 ice_release_lock(rule_lock);
3762 goto free_fltr_list;
3765 ice_release_lock(rule_lock);
3767 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3770 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3771 ice_fltr_list_entry, list_entry) {
3772 LIST_DEL(&fm_entry->list_entry);
3773 ice_free(hw, fm_entry);
3780 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3781 * @hw: pointer to the hardware structure
3782 * @vsi_handle: VSI handle to configure
3783 * @promisc_mask: mask of promiscuous config bits
3784 * @vid: VLAN ID to set VLAN promiscuous
3787 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3789 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3790 struct ice_fltr_list_entry f_list_entry;
3791 struct ice_fltr_info new_fltr;
3792 enum ice_status status = ICE_SUCCESS;
3798 ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3800 if (!ice_is_vsi_valid(hw, vsi_handle))
3801 return ICE_ERR_PARAM;
3802 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3804 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3806 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3807 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3808 new_fltr.l_data.mac_vlan.vlan_id = vid;
3809 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3811 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3812 recipe_id = ICE_SW_LKUP_PROMISC;
3815 /* Separate filters must be set for each direction/packet type
3816 * combination, so we will loop over the mask value, store the
3817 * individual type, and clear it out in the input mask as it
3820 while (promisc_mask) {
3826 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3827 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3828 pkt_type = UCAST_FLTR;
3829 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3830 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3831 pkt_type = UCAST_FLTR;
3833 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3834 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3835 pkt_type = MCAST_FLTR;
3836 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3837 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3838 pkt_type = MCAST_FLTR;
3840 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3841 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3842 pkt_type = BCAST_FLTR;
3843 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3844 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3845 pkt_type = BCAST_FLTR;
3849 /* Check for VLAN promiscuous flag */
3850 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3851 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3852 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3853 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3857 /* Set filter DA based on packet type */
3858 mac_addr = new_fltr.l_data.mac.mac_addr;
3859 if (pkt_type == BCAST_FLTR) {
3860 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
3861 } else if (pkt_type == MCAST_FLTR ||
3862 pkt_type == UCAST_FLTR) {
3863 /* Use the dummy ether header DA */
3864 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
3865 ICE_NONDMA_TO_NONDMA);
3866 if (pkt_type == MCAST_FLTR)
3867 mac_addr[0] |= 0x1; /* Set multicast bit */
3870 /* Need to reset this to zero for all iterations */
3873 new_fltr.flag |= ICE_FLTR_TX;
3874 new_fltr.src = hw_vsi_id;
3876 new_fltr.flag |= ICE_FLTR_RX;
3877 new_fltr.src = hw->port_info->lport;
3880 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3881 new_fltr.vsi_handle = vsi_handle;
3882 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3883 f_list_entry.fltr_info = new_fltr;
3885 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3886 if (status != ICE_SUCCESS)
3887 goto set_promisc_exit;
3895 * ice_set_vlan_vsi_promisc
3896 * @hw: pointer to the hardware structure
3897 * @vsi_handle: VSI handle to configure
3898 * @promisc_mask: mask of promiscuous config bits
3899 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3901 * Configure VSI with all associated VLANs to given promiscuous mode(s)
3904 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3905 bool rm_vlan_promisc)
3907 struct ice_switch_info *sw = hw->switch_info;
3908 struct ice_fltr_list_entry *list_itr, *tmp;
3909 struct LIST_HEAD_TYPE vsi_list_head;
3910 struct LIST_HEAD_TYPE *vlan_head;
3911 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
3912 enum ice_status status;
3915 INIT_LIST_HEAD(&vsi_list_head);
3916 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3917 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3918 ice_acquire_lock(vlan_lock);
3919 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3921 ice_release_lock(vlan_lock);
3923 goto free_fltr_list;
3925 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
3927 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3928 if (rm_vlan_promisc)
3929 status = ice_clear_vsi_promisc(hw, vsi_handle,
3930 promisc_mask, vlan_id);
3932 status = ice_set_vsi_promisc(hw, vsi_handle,
3933 promisc_mask, vlan_id);
3939 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
3940 ice_fltr_list_entry, list_entry) {
3941 LIST_DEL(&list_itr->list_entry);
3942 ice_free(hw, list_itr);
3948 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3949 * @hw: pointer to the hardware structure
3950 * @vsi_handle: VSI handle to remove filters from
3951 * @lkup: switch rule filter lookup type
3954 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3955 enum ice_sw_lkup_type lkup)
3957 struct ice_switch_info *sw = hw->switch_info;
3958 struct ice_fltr_list_entry *fm_entry;
3959 struct LIST_HEAD_TYPE remove_list_head;
3960 struct LIST_HEAD_TYPE *rule_head;
3961 struct ice_fltr_list_entry *tmp;
3962 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3963 enum ice_status status;
3965 INIT_LIST_HEAD(&remove_list_head);
3966 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3967 rule_head = &sw->recp_list[lkup].filt_rules;
3968 ice_acquire_lock(rule_lock);
3969 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3971 ice_release_lock(rule_lock);
3976 case ICE_SW_LKUP_MAC:
3977 ice_remove_mac(hw, &remove_list_head);
3979 case ICE_SW_LKUP_VLAN:
3980 ice_remove_vlan(hw, &remove_list_head);
3982 case ICE_SW_LKUP_PROMISC:
3983 case ICE_SW_LKUP_PROMISC_VLAN:
3984 ice_remove_promisc(hw, lkup, &remove_list_head);
3986 case ICE_SW_LKUP_MAC_VLAN:
3987 ice_remove_mac_vlan(hw, &remove_list_head);
3989 case ICE_SW_LKUP_ETHERTYPE:
3990 case ICE_SW_LKUP_ETHERTYPE_MAC:
3991 ice_remove_eth_mac(hw, &remove_list_head);
3993 case ICE_SW_LKUP_DFLT:
3994 ice_debug(hw, ICE_DBG_SW,
3995 "Remove filters for this lookup type hasn't been implemented yet\n");
3997 case ICE_SW_LKUP_LAST:
3998 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4002 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4003 ice_fltr_list_entry, list_entry) {
4004 LIST_DEL(&fm_entry->list_entry);
4005 ice_free(hw, fm_entry);
4010 * ice_remove_vsi_fltr - Remove all filters for a VSI
4011 * @hw: pointer to the hardware structure
4012 * @vsi_handle: VSI handle to remove filters from
4014 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4016 ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
4018 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4019 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4020 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4021 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4022 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4023 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4024 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4025 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4029 * ice_alloc_res_cntr - allocating resource counter
4030 * @hw: pointer to the hardware structure
4031 * @type: type of resource
4032 * @alloc_shared: if set it is shared else dedicated
4033 * @num_items: number of entries requested for FD resource type
4034 * @counter_id: counter index returned by AQ call
4037 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4040 struct ice_aqc_alloc_free_res_elem *buf;
4041 enum ice_status status;
4044 /* Allocate resource */
4045 buf_len = sizeof(*buf);
4046 buf = (struct ice_aqc_alloc_free_res_elem *)
4047 ice_malloc(hw, buf_len);
4049 return ICE_ERR_NO_MEMORY;
4051 buf->num_elems = CPU_TO_LE16(num_items);
4052 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4053 ICE_AQC_RES_TYPE_M) | alloc_shared);
4055 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4056 ice_aqc_opc_alloc_res, NULL);
4060 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4068 * ice_free_res_cntr - free resource counter
4069 * @hw: pointer to the hardware structure
4070 * @type: type of resource
4071 * @alloc_shared: if set it is shared else dedicated
4072 * @num_items: number of entries to be freed for FD resource type
4073 * @counter_id: counter ID resource which needs to be freed
4076 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4079 struct ice_aqc_alloc_free_res_elem *buf;
4080 enum ice_status status;
4084 buf_len = sizeof(*buf);
4085 buf = (struct ice_aqc_alloc_free_res_elem *)
4086 ice_malloc(hw, buf_len);
4088 return ICE_ERR_NO_MEMORY;
4090 buf->num_elems = CPU_TO_LE16(num_items);
4091 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4092 ICE_AQC_RES_TYPE_M) | alloc_shared);
4093 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4095 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4096 ice_aqc_opc_free_res, NULL);
4098 ice_debug(hw, ICE_DBG_SW,
4099 "counter resource could not be freed\n");
4106 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4107 * @hw: pointer to the hardware structure
4108 * @counter_id: returns counter index
4110 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4112 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4113 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4118 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4119 * @hw: pointer to the hardware structure
4120 * @counter_id: counter index to be freed
4122 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4124 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4125 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4130 * ice_alloc_res_lg_act - add large action resource
4131 * @hw: pointer to the hardware structure
4132 * @l_id: large action ID to fill it in
4133 * @num_acts: number of actions to hold with a large action entry
4135 static enum ice_status
4136 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4138 struct ice_aqc_alloc_free_res_elem *sw_buf;
4139 enum ice_status status;
4142 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4143 return ICE_ERR_PARAM;
4145 /* Allocate resource for large action */
4146 buf_len = sizeof(*sw_buf);
4147 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4148 ice_malloc(hw, buf_len);
4150 return ICE_ERR_NO_MEMORY;
4152 sw_buf->num_elems = CPU_TO_LE16(1);
4154 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4155 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4156 * If num_acts is greater than 2, then use
4157 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4158 * The num_acts cannot exceed 4. This was ensured at the
4159 * beginning of the function.
4162 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4163 else if (num_acts == 2)
4164 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4166 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4168 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4169 ice_aqc_opc_alloc_res, NULL);
4171 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4173 ice_free(hw, sw_buf);
4178 * ice_add_mac_with_sw_marker - add filter with sw marker
4179 * @hw: pointer to the hardware structure
4180 * @f_info: filter info structure containing the MAC filter information
4181 * @sw_marker: sw marker to tag the Rx descriptor with
4184 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4187 struct ice_switch_info *sw = hw->switch_info;
4188 struct ice_fltr_mgmt_list_entry *m_entry;
4189 struct ice_fltr_list_entry fl_info;
4190 struct LIST_HEAD_TYPE l_head;
4191 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4192 enum ice_status ret;
4196 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4197 return ICE_ERR_PARAM;
4199 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4200 return ICE_ERR_PARAM;
4202 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4203 return ICE_ERR_PARAM;
4205 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4206 return ICE_ERR_PARAM;
4207 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4209 /* Add filter if it doesn't exist so then the adding of large
4210 * action always results in update
4213 INIT_LIST_HEAD(&l_head);
4214 fl_info.fltr_info = *f_info;
4215 LIST_ADD(&fl_info.list_entry, &l_head);
4217 entry_exists = false;
4218 ret = ice_add_mac(hw, &l_head);
4219 if (ret == ICE_ERR_ALREADY_EXISTS)
4220 entry_exists = true;
4224 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4225 ice_acquire_lock(rule_lock);
4226 /* Get the book keeping entry for the filter */
4227 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4231 /* If counter action was enabled for this rule then don't enable
4232 * sw marker large action
4234 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4235 ret = ICE_ERR_PARAM;
4239 /* if same marker was added before */
4240 if (m_entry->sw_marker_id == sw_marker) {
4241 ret = ICE_ERR_ALREADY_EXISTS;
4245 /* Allocate a hardware table entry to hold large act. Three actions
4246 * for marker based large action
4248 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4252 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4255 /* Update the switch rule to add the marker action */
4256 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4258 ice_release_lock(rule_lock);
4263 ice_release_lock(rule_lock);
4264 /* only remove entry if it did not exist previously */
4266 ret = ice_remove_mac(hw, &l_head);
4272 * ice_add_mac_with_counter - add filter with counter enabled
4273 * @hw: pointer to the hardware structure
4274 * @f_info: pointer to filter info structure containing the MAC filter
4278 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4280 struct ice_switch_info *sw = hw->switch_info;
4281 struct ice_fltr_mgmt_list_entry *m_entry;
4282 struct ice_fltr_list_entry fl_info;
4283 struct LIST_HEAD_TYPE l_head;
4284 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4285 enum ice_status ret;
4290 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4291 return ICE_ERR_PARAM;
4293 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4294 return ICE_ERR_PARAM;
4296 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4297 return ICE_ERR_PARAM;
4298 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4300 entry_exist = false;
4302 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4304 /* Add filter if it doesn't exist so then the adding of large
4305 * action always results in update
4307 INIT_LIST_HEAD(&l_head);
4309 fl_info.fltr_info = *f_info;
4310 LIST_ADD(&fl_info.list_entry, &l_head);
4312 ret = ice_add_mac(hw, &l_head);
4313 if (ret == ICE_ERR_ALREADY_EXISTS)
4318 ice_acquire_lock(rule_lock);
4319 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4321 ret = ICE_ERR_BAD_PTR;
4325 /* Don't enable counter for a filter for which sw marker was enabled */
4326 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4327 ret = ICE_ERR_PARAM;
4331 /* If a counter was already enabled then don't need to add again */
4332 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4333 ret = ICE_ERR_ALREADY_EXISTS;
4337 /* Allocate a hardware table entry to VLAN counter */
4338 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4342 /* Allocate a hardware table entry to hold large act. Two actions for
4343 * counter based large action
4345 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4349 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4352 /* Update the switch rule to add the counter action */
4353 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4355 ice_release_lock(rule_lock);
4360 ice_release_lock(rule_lock);
4361 /* only remove entry if it did not exist previously */
4363 ret = ice_remove_mac(hw, &l_head);
4368 /* This is mapping table entry that maps every word within a given protocol
4369 * structure to the real byte offset as per the specification of that
4371 * for example dst address is 3 words in ethertype header and corresponding
4372 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4373 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4374 * matching entry describing its field. This needs to be updated if new
4375 * structure is added to that union.
4377 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4378 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4379 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4380 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4381 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4382 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4383 26, 28, 30, 32, 34, 36, 38 } },
4384 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4385 26, 28, 30, 32, 34, 36, 38 } },
4386 { ICE_TCP_IL, { 0, 2 } },
4387 { ICE_UDP_OF, { 0, 2 } },
4388 { ICE_UDP_ILOS, { 0, 2 } },
4389 { ICE_SCTP_IL, { 0, 2 } },
4390 { ICE_VXLAN, { 8, 10, 12, 14 } },
4391 { ICE_GENEVE, { 8, 10, 12, 14 } },
4392 { ICE_VXLAN_GPE, { 0, 2, 4 } },
4393 { ICE_NVGRE, { 0, 2, 4, 6 } },
4394 { ICE_PROTOCOL_LAST, { 0 } }
4397 /* The following table describes preferred grouping of recipes.
4398 * If a recipe that needs to be programmed is a superset or matches one of the
4399 * following combinations, then the recipe needs to be chained as per the
4402 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4403 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4404 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4405 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4406 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4407 { 0xffff, 0xffff, 0xffff, 0xffff } },
4408 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4409 { 0xffff, 0xffff, 0xffff, 0xffff } },
4410 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4411 { 0xffff, 0xffff, 0xffff, 0xffff } },
4414 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4415 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4416 { ICE_MAC_IL, ICE_MAC_IL_HW },
4417 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4418 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4419 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4420 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4421 { ICE_TCP_IL, ICE_TCP_IL_HW },
4422 { ICE_UDP_OF, ICE_UDP_OF_HW },
4423 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4424 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4425 { ICE_VXLAN, ICE_UDP_OF_HW },
4426 { ICE_GENEVE, ICE_UDP_OF_HW },
4427 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4428 { ICE_NVGRE, ICE_GRE_OF_HW },
4429 { ICE_PROTOCOL_LAST, 0 }
4433 * ice_find_recp - find a recipe
4434 * @hw: pointer to the hardware structure
4435 * @lkup_exts: extension sequence to match
4437 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4439 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4441 struct ice_sw_recipe *recp;
4444 /* Initialize available_result_ids which tracks available result idx */
4445 for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4446 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4447 available_result_ids);
4449 /* Walk through existing recipes to find a match */
4450 recp = hw->switch_info->recp_list;
4451 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4452 /* If recipe was not created for this ID, in SW bookkeeping,
4453 * check if FW has an entry for this recipe. If the FW has an
4454 * entry update it in our SW bookkeeping and continue with the
4457 if (!recp[i].recp_created)
4458 if (ice_get_recp_frm_fw(hw,
4459 hw->switch_info->recp_list, i))
4462 /* if number of words we are looking for match */
4463 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4464 struct ice_fv_word *a = lkup_exts->fv_words;
4465 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4469 for (p = 0; p < lkup_exts->n_val_words; p++) {
4470 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4472 if (a[p].off == b[q].off &&
4473 a[p].prot_id == b[q].prot_id)
4474 /* Found the "p"th word in the
4479 /* After walking through all the words in the
4480 * "i"th recipe if "p"th word was not found then
4481 * this recipe is not what we are looking for.
4482 * So break out from this loop and try the next
4485 if (q >= recp[i].lkup_exts.n_val_words) {
4490 /* If for "i"th recipe the found was never set to false
4491 * then it means we found our match
4494 return i; /* Return the recipe ID */
4497 return ICE_MAX_NUM_RECIPES;
4501 * ice_prot_type_to_id - get protocol ID from protocol type
4502 * @type: protocol type
4503 * @id: pointer to variable that will receive the ID
4505 * Returns true if found, false otherwise
4507 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4511 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4512 if (ice_prot_id_tbl[i].type == type) {
4513 *id = ice_prot_id_tbl[i].protocol_id;
4520 * ice_find_valid_words - count valid words
4521 * @rule: advanced rule with lookup information
4522 * @lkup_exts: byte offset extractions of the words that are valid
4524 * calculate valid words in a lookup rule using mask value
4527 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4528 struct ice_prot_lkup_ext *lkup_exts)
4534 if (!ice_prot_type_to_id(rule->type, &prot_id))
4537 word = lkup_exts->n_val_words;
4539 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4540 if (((u16 *)&rule->m_u)[j] &&
4541 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4542 /* No more space to accommodate */
4543 if (word >= ICE_MAX_CHAIN_WORDS)
4545 lkup_exts->fv_words[word].off =
4546 ice_prot_ext[rule->type].offs[j];
4547 lkup_exts->fv_words[word].prot_id =
4548 ice_prot_id_tbl[rule->type].protocol_id;
4549 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4553 ret_val = word - lkup_exts->n_val_words;
4554 lkup_exts->n_val_words = word;
4560 * ice_find_prot_off_ind - check for specific ID and offset in rule
4561 * @lkup_exts: an array of protocol header extractions
4562 * @prot_type: protocol type to check
4563 * @off: expected offset of the extraction
4565 * Check if the prot_ext has given protocol ID and offset
4568 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4573 for (j = 0; j < lkup_exts->n_val_words; j++)
4574 if (lkup_exts->fv_words[j].off == off &&
4575 lkup_exts->fv_words[j].prot_id == prot_type)
4578 return ICE_MAX_CHAIN_WORDS;
4582 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4583 * @lkup_exts: an array of protocol header extractions
4584 * @r_policy: preferred recipe grouping policy
4586 * Helper function to check if given recipe group is subset we need to check if
4587 * all the words described by the given recipe group exist in the advanced rule
4588 * look up information
4591 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4592 const struct ice_pref_recipe_group *r_policy)
4594 u8 ind[ICE_NUM_WORDS_RECIPE];
4598 /* check if everything in the r_policy is part of the entire rule */
4599 for (i = 0; i < r_policy->n_val_pairs; i++) {
4602 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4603 r_policy->pairs[i].off);
4604 if (j >= ICE_MAX_CHAIN_WORDS)
4607 /* store the indexes temporarily found by the find function
4608 * this will be used to mark the words as 'done'
4613 /* If the entire policy recipe was a true match, then mark the fields
4614 * that are covered by the recipe as 'done' meaning that these words
4615 * will be clumped together in one recipe.
4616 * "Done" here means in our searching if certain recipe group
4617 * matches or is subset of the given rule, then we mark all
4618 * the corresponding offsets as found. So the remaining recipes should
4619 * be created with whatever words that were left.
4621 for (i = 0; i < count; i++) {
4624 ice_set_bit(in, lkup_exts->done);
4630 * ice_create_first_fit_recp_def - Create a recipe grouping
4631 * @hw: pointer to the hardware structure
4632 * @lkup_exts: an array of protocol header extractions
4633 * @rg_list: pointer to a list that stores new recipe groups
4634 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4636 * Using first fit algorithm, take all the words that are still not done
4637 * and start grouping them in 4-word groups. Each group makes up one
4640 static enum ice_status
4641 ice_create_first_fit_recp_def(struct ice_hw *hw,
4642 struct ice_prot_lkup_ext *lkup_exts,
4643 struct LIST_HEAD_TYPE *rg_list,
4646 struct ice_pref_recipe_group *grp = NULL;
4651 /* Walk through every word in the rule to check if it is not done. If so
4652 * then this word needs to be part of a new recipe.
4654 for (j = 0; j < lkup_exts->n_val_words; j++)
4655 if (!ice_is_bit_set(lkup_exts->done, j)) {
4657 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4658 struct ice_recp_grp_entry *entry;
4660 entry = (struct ice_recp_grp_entry *)
4661 ice_malloc(hw, sizeof(*entry));
4663 return ICE_ERR_NO_MEMORY;
4664 LIST_ADD(&entry->l_entry, rg_list);
4665 grp = &entry->r_group;
4669 grp->pairs[grp->n_val_pairs].prot_id =
4670 lkup_exts->fv_words[j].prot_id;
4671 grp->pairs[grp->n_val_pairs].off =
4672 lkup_exts->fv_words[j].off;
4673 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4681 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4682 * @hw: pointer to the hardware structure
4683 * @fv_list: field vector with the extraction sequence information
4684 * @rg_list: recipe groupings with protocol-offset pairs
4686 * Helper function to fill in the field vector indices for protocol-offset
4687 * pairs. These indexes are then ultimately programmed into a recipe.
4690 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4691 struct LIST_HEAD_TYPE *rg_list)
4693 struct ice_sw_fv_list_entry *fv;
4694 struct ice_recp_grp_entry *rg;
4695 struct ice_fv_word *fv_ext;
4697 if (LIST_EMPTY(fv_list))
4700 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4701 fv_ext = fv->fv_ptr->ew;
4703 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4706 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4707 struct ice_fv_word *pr;
4711 pr = &rg->r_group.pairs[i];
4712 mask = rg->r_group.mask[i];
4714 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4715 if (fv_ext[j].prot_id == pr->prot_id &&
4716 fv_ext[j].off == pr->off) {
4717 /* Store index of field vector */
4719 /* Mask is given by caller as big
4720 * endian, but sent to FW as little
4723 rg->fv_mask[i] = mask << 8 | mask >> 8;
4731 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4732 * @hw: pointer to hardware structure
4733 * @rm: recipe management list entry
4734 * @match_tun: if field vector index for tunnel needs to be programmed
4736 static enum ice_status
4737 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4740 struct ice_aqc_recipe_data_elem *tmp;
4741 struct ice_aqc_recipe_data_elem *buf;
4742 struct ice_recp_grp_entry *entry;
4743 enum ice_status status;
4748 /* When more than one recipe are required, another recipe is needed to
4749 * chain them together. Matching a tunnel metadata ID takes up one of
4750 * the match fields in the chaining recipe reducing the number of
4751 * chained recipes by one.
4753 if (rm->n_grp_count > 1)
4755 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4756 (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4757 return ICE_ERR_MAX_LIMIT;
4759 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4760 ICE_MAX_NUM_RECIPES,
4763 return ICE_ERR_NO_MEMORY;
4765 buf = (struct ice_aqc_recipe_data_elem *)
4766 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4768 status = ICE_ERR_NO_MEMORY;
4772 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4773 recipe_count = ICE_MAX_NUM_RECIPES;
4774 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4776 if (status || recipe_count == 0)
4779 /* Allocate the recipe resources, and configure them according to the
4780 * match fields from protocol headers and extracted field vectors.
4782 chain_idx = ICE_CHAIN_FV_INDEX_START -
4783 ice_find_first_bit(available_result_ids,
4784 ICE_CHAIN_FV_INDEX_START + 1);
4785 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4788 status = ice_alloc_recipe(hw, &entry->rid);
4792 /* Clear the result index of the located recipe, as this will be
4793 * updated, if needed, later in the recipe creation process.
4795 tmp[0].content.result_indx = 0;
4797 buf[recps] = tmp[0];
4798 buf[recps].recipe_indx = (u8)entry->rid;
4799 /* if the recipe is a non-root recipe RID should be programmed
4800 * as 0 for the rules to be applied correctly.
4802 buf[recps].content.rid = 0;
4803 ice_memset(&buf[recps].content.lkup_indx, 0,
4804 sizeof(buf[recps].content.lkup_indx),
4807 /* All recipes use look-up index 0 to match switch ID. */
4808 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4809 buf[recps].content.mask[0] =
4810 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4811 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4814 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4815 buf[recps].content.lkup_indx[i] = 0x80;
4816 buf[recps].content.mask[i] = 0;
4819 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4820 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4821 buf[recps].content.mask[i + 1] =
4822 CPU_TO_LE16(entry->fv_mask[i]);
4825 if (rm->n_grp_count > 1) {
4826 entry->chain_idx = chain_idx;
4827 buf[recps].content.result_indx =
4828 ICE_AQ_RECIPE_RESULT_EN |
4829 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4830 ICE_AQ_RECIPE_RESULT_DATA_M);
4831 ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4832 available_result_ids);
4833 chain_idx = ICE_CHAIN_FV_INDEX_START -
4834 ice_find_first_bit(available_result_ids,
4835 ICE_CHAIN_FV_INDEX_START +
4839 /* fill recipe dependencies */
4840 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4841 ICE_MAX_NUM_RECIPES);
4842 ice_set_bit(buf[recps].recipe_indx,
4843 (ice_bitmap_t *)buf[recps].recipe_bitmap);
4844 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4848 if (rm->n_grp_count == 1) {
4849 rm->root_rid = buf[0].recipe_indx;
4850 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
4851 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4852 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4853 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4854 sizeof(buf[0].recipe_bitmap),
4855 ICE_NONDMA_TO_NONDMA);
4857 status = ICE_ERR_BAD_PTR;
4860 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4861 * the recipe which is getting created if specified
4862 * by user. Usually any advanced switch filter, which results
4863 * into new extraction sequence, ended up creating a new recipe
4864 * of type ROOT and usually recipes are associated with profiles
4865 * Switch rule referreing newly created recipe, needs to have
4866 * either/or 'fwd' or 'join' priority, otherwise switch rule
4867 * evaluation will not happen correctly. In other words, if
4868 * switch rule to be evaluated on priority basis, then recipe
4869 * needs to have priority, otherwise it will be evaluated last.
4871 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4873 struct ice_recp_grp_entry *last_chain_entry;
4876 /* Allocate the last recipe that will chain the outcomes of the
4877 * other recipes together
4879 status = ice_alloc_recipe(hw, &rid);
4883 buf[recps].recipe_indx = (u8)rid;
4884 buf[recps].content.rid = (u8)rid;
4885 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4886 /* the new entry created should also be part of rg_list to
4887 * make sure we have complete recipe
4889 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
4890 sizeof(*last_chain_entry));
4891 if (!last_chain_entry) {
4892 status = ICE_ERR_NO_MEMORY;
4895 last_chain_entry->rid = rid;
4896 ice_memset(&buf[recps].content.lkup_indx, 0,
4897 sizeof(buf[recps].content.lkup_indx),
4899 /* All recipes use look-up index 0 to match switch ID. */
4900 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4901 buf[recps].content.mask[0] =
4902 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4903 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4904 buf[recps].content.lkup_indx[i] =
4905 ICE_AQ_RECIPE_LKUP_IGNORE;
4906 buf[recps].content.mask[i] = 0;
4910 /* update r_bitmap with the recp that is used for chaining */
4911 ice_set_bit(rid, rm->r_bitmap);
4912 /* this is the recipe that chains all the other recipes so it
4913 * should not have a chaining ID to indicate the same
4915 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4916 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
4918 last_chain_entry->fv_idx[i] = entry->chain_idx;
4919 buf[recps].content.lkup_indx[i] = entry->chain_idx;
4920 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
4921 ice_set_bit(entry->rid, rm->r_bitmap);
4923 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
4924 if (sizeof(buf[recps].recipe_bitmap) >=
4925 sizeof(rm->r_bitmap)) {
4926 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4927 sizeof(buf[recps].recipe_bitmap),
4928 ICE_NONDMA_TO_NONDMA);
4930 status = ICE_ERR_BAD_PTR;
4933 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4935 /* To differentiate among different UDP tunnels, a meta data ID
4939 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
4940 buf[recps].content.mask[i] =
4941 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
4945 rm->root_rid = (u8)rid;
4947 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4951 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4952 ice_release_change_lock(hw);
4956 /* Every recipe that just got created add it to the recipe
4959 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4960 struct ice_switch_info *sw = hw->switch_info;
4961 struct ice_sw_recipe *recp;
4963 recp = &sw->recp_list[entry->rid];
4964 recp->root_rid = entry->rid;
4965 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
4966 entry->r_group.n_val_pairs *
4967 sizeof(struct ice_fv_word),
4968 ICE_NONDMA_TO_NONDMA);
4970 recp->n_ext_words = entry->r_group.n_val_pairs;
4971 recp->chain_idx = entry->chain_idx;
4972 recp->recp_created = true;
4973 recp->big_recp = false;
4987 * ice_create_recipe_group - creates recipe group
4988 * @hw: pointer to hardware structure
4989 * @rm: recipe management list entry
4990 * @lkup_exts: lookup elements
4992 static enum ice_status
4993 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4994 struct ice_prot_lkup_ext *lkup_exts)
4996 struct ice_recp_grp_entry *entry;
4997 struct ice_recp_grp_entry *tmp;
4998 enum ice_status status;
5002 rm->n_grp_count = 0;
5004 /* Each switch recipe can match up to 5 words or metadata. One word in
5005 * each recipe is used to match the switch ID. Four words are left for
5006 * matching other values. If the new advanced recipe requires more than
5007 * 4 words, it needs to be split into multiple recipes which are chained
5008 * together using the intermediate result that each produces as input to
5009 * the other recipes in the sequence.
5011 groups = ARRAY_SIZE(ice_recipe_pack);
5013 /* Check if any of the preferred recipes from the grouping policy
5016 for (i = 0; i < groups; i++)
5017 /* Check if the recipe from the preferred grouping matches
5018 * or is a subset of the fields that needs to be looked up.
5020 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
5021 /* This recipe can be used by itself or grouped with
5024 entry = (struct ice_recp_grp_entry *)
5025 ice_malloc(hw, sizeof(*entry));
5027 status = ICE_ERR_NO_MEMORY;
5030 entry->r_group = ice_recipe_pack[i];
5031 LIST_ADD(&entry->l_entry, &rm->rg_list);
5035 /* Create recipes for words that are marked not done by packing them
5038 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5039 &rm->rg_list, &recp_count);
5041 rm->n_grp_count += recp_count;
5042 rm->n_ext_words = lkup_exts->n_val_words;
5043 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5044 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5045 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5046 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5051 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5053 LIST_DEL(&entry->l_entry);
5054 ice_free(hw, entry);
5062 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5063 * @hw: pointer to hardware structure
5064 * @lkups: lookup elements or match criteria for the advanced recipe, one
5065 * structure per protocol header
5066 * @lkups_cnt: number of protocols
5067 * @fv_list: pointer to a list that holds the returned field vectors
5069 static enum ice_status
5070 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5071 struct LIST_HEAD_TYPE *fv_list)
5073 enum ice_status status;
5077 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5079 return ICE_ERR_NO_MEMORY;
5081 for (i = 0; i < lkups_cnt; i++)
5082 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5083 status = ICE_ERR_CFG;
5087 /* Find field vectors that include all specified protocol types */
5088 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
5091 ice_free(hw, prot_ids);
5096 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5097 * @hw: pointer to hardware structure
5098 * @lkups: lookup elements or match criteria for the advanced recipe, one
5099 * structure per protocol header
5100 * @lkups_cnt: number of protocols
5101 * @rinfo: other information regarding the rule e.g. priority and action info
5102 * @rid: return the recipe ID of the recipe created
5104 static enum ice_status
5105 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5106 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5108 struct ice_prot_lkup_ext *lkup_exts;
5109 struct ice_recp_grp_entry *r_entry;
5110 struct ice_sw_fv_list_entry *fvit;
5111 struct ice_recp_grp_entry *r_tmp;
5112 struct ice_sw_fv_list_entry *tmp;
5113 enum ice_status status = ICE_SUCCESS;
5114 struct ice_sw_recipe *rm;
5115 bool match_tun = false;
5119 return ICE_ERR_PARAM;
5121 lkup_exts = (struct ice_prot_lkup_ext *)
5122 ice_malloc(hw, sizeof(*lkup_exts));
5124 return ICE_ERR_NO_MEMORY;
5126 /* Determine the number of words to be matched and if it exceeds a
5127 * recipe's restrictions
5129 for (i = 0; i < lkups_cnt; i++) {
5132 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5133 status = ICE_ERR_CFG;
5134 goto err_free_lkup_exts;
5137 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5139 status = ICE_ERR_CFG;
5140 goto err_free_lkup_exts;
5144 *rid = ice_find_recp(hw, lkup_exts);
5145 if (*rid < ICE_MAX_NUM_RECIPES)
5146 /* Success if found a recipe that match the existing criteria */
5147 goto err_free_lkup_exts;
5149 /* Recipe we need does not exist, add a recipe */
5151 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5153 status = ICE_ERR_NO_MEMORY;
5154 goto err_free_lkup_exts;
5157 /* Get field vectors that contain fields extracted from all the protocol
5158 * headers being programmed.
5160 INIT_LIST_HEAD(&rm->fv_list);
5161 INIT_LIST_HEAD(&rm->rg_list);
5163 status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5167 /* Group match words into recipes using preferred recipe grouping
5170 status = ice_create_recipe_group(hw, rm, lkup_exts);
5174 /* There is only profile for UDP tunnels. So, it is necessary to use a
5175 * metadata ID flag to differentiate different tunnel types. A separate
5176 * recipe needs to be used for the metadata.
5178 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5179 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5180 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5183 /* set the recipe priority if specified */
5184 rm->priority = rinfo->priority ? rinfo->priority : 0;
5186 /* Find offsets from the field vector. Pick the first one for all the
5189 ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5190 status = ice_add_sw_recipe(hw, rm, match_tun);
5194 /* Associate all the recipes created with all the profiles in the
5195 * common field vector.
5197 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5199 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5201 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5202 (u8 *)r_bitmap, NULL);
5206 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5207 ICE_MAX_NUM_RECIPES);
5208 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5212 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5215 ice_release_change_lock(hw);
5221 *rid = rm->root_rid;
5222 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5223 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5225 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5226 ice_recp_grp_entry, l_entry) {
5227 LIST_DEL(&r_entry->l_entry);
5228 ice_free(hw, r_entry);
5231 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5233 LIST_DEL(&fvit->list_entry);
5238 ice_free(hw, rm->root_buf);
5243 ice_free(hw, lkup_exts);
5249 * ice_find_dummy_packet - find dummy packet by tunnel type
5251 * @lkups: lookup elements or match criteria for the advanced recipe, one
5252 * structure per protocol header
5253 * @lkups_cnt: number of protocols
5254 * @tun_type: tunnel type from the match criteria
5255 * @pkt: dummy packet to fill according to filter match criteria
5256 * @pkt_len: packet length of dummy packet
5257 * @offsets: pointer to receive the pointer to the offsets for the packet
5260 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5261 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5263 const struct ice_dummy_pkt_offsets **offsets)
5265 bool tcp = false, udp = false;
5268 for (i = 0; i < lkups_cnt; i++) {
5269 if (lkups[i].type == ICE_UDP_ILOS)
5271 else if (lkups[i].type == ICE_TCP_IL)
5275 if (tun_type == ICE_SW_TUN_NVGRE || tun_type == ICE_ALL_TUNNELS) {
5276 *pkt = dummy_gre_packet;
5277 *pkt_len = sizeof(dummy_gre_packet);
5278 *offsets = dummy_gre_packet_offsets;
5282 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5283 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5285 *pkt = dummy_udp_tun_tcp_packet;
5286 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5287 *offsets = dummy_udp_tun_tcp_packet_offsets;
5291 *pkt = dummy_udp_tun_udp_packet;
5292 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5293 *offsets = dummy_udp_tun_udp_packet_offsets;
5298 *pkt = dummy_udp_packet;
5299 *pkt_len = sizeof(dummy_udp_packet);
5300 *offsets = dummy_udp_packet_offsets;
5304 *pkt = dummy_tcp_packet;
5305 *pkt_len = sizeof(dummy_tcp_packet);
5306 *offsets = dummy_tcp_packet_offsets;
5310 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5312 * @lkups: lookup elements or match criteria for the advanced recipe, one
5313 * structure per protocol header
5314 * @lkups_cnt: number of protocols
5315 * @s_rule: stores rule information from the match criteria
5316 * @dummy_pkt: dummy packet to fill according to filter match criteria
5317 * @pkt_len: packet length of dummy packet
5318 * @offsets: offset info for the dummy packet
5320 static enum ice_status
5321 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5322 struct ice_aqc_sw_rules_elem *s_rule,
5323 const u8 *dummy_pkt, u16 pkt_len,
5324 const struct ice_dummy_pkt_offsets *offsets)
5329 /* Start with a packet with a pre-defined/dummy content. Then, fill
5330 * in the header values to be looked up or matched.
5332 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5334 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5336 for (i = 0; i < lkups_cnt; i++) {
5337 enum ice_protocol_type type;
5338 u16 offset = 0, len = 0, j;
5341 /* find the start of this layer; it should be found since this
5342 * was already checked when search for the dummy packet
5344 type = lkups[i].type;
5345 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5346 if (type == offsets[j].type) {
5347 offset = offsets[j].offset;
5352 /* this should never happen in a correct calling sequence */
5354 return ICE_ERR_PARAM;
5356 switch (lkups[i].type) {
5359 len = sizeof(struct ice_ether_hdr);
5363 len = sizeof(struct ice_ipv4_hdr);
5368 len = sizeof(struct ice_l4_hdr);
5371 len = sizeof(struct ice_sctp_hdr);
5374 len = sizeof(struct ice_nvgre);
5379 len = sizeof(struct ice_udp_tnl_hdr);
5382 return ICE_ERR_PARAM;
5385 /* the length should be a word multiple */
5386 if (len % ICE_BYTES_PER_WORD)
5389 /* We have the offset to the header start, the length, the
5390 * caller's header values and mask. Use this information to
5391 * copy the data into the dummy packet appropriately based on
5392 * the mask. Note that we need to only write the bits as
5393 * indicated by the mask to make sure we don't improperly write
5394 * over any significant packet data.
5396 for (j = 0; j < len / sizeof(u16); j++)
5397 if (((u16 *)&lkups[i].m_u)[j])
5398 ((u16 *)(pkt + offset))[j] =
5399 (((u16 *)(pkt + offset))[j] &
5400 ~((u16 *)&lkups[i].m_u)[j]) |
5401 (((u16 *)&lkups[i].h_u)[j] &
5402 ((u16 *)&lkups[i].m_u)[j]);
5405 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5411 * ice_find_adv_rule_entry - Search a rule entry
5412 * @hw: pointer to the hardware structure
5413 * @lkups: lookup elements or match criteria for the advanced recipe, one
5414 * structure per protocol header
5415 * @lkups_cnt: number of protocols
5416 * @recp_id: recipe ID for which we are finding the rule
5417 * @rinfo: other information regarding the rule e.g. priority and action info
5419 * Helper function to search for a given advance rule entry
5420 * Returns pointer to entry storing the rule if found
5422 static struct ice_adv_fltr_mgmt_list_entry *
5423 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5424 u16 lkups_cnt, u8 recp_id,
5425 struct ice_adv_rule_info *rinfo)
5427 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5428 struct ice_switch_info *sw = hw->switch_info;
5431 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5432 ice_adv_fltr_mgmt_list_entry, list_entry) {
5433 bool lkups_matched = true;
5435 if (lkups_cnt != list_itr->lkups_cnt)
5437 for (i = 0; i < list_itr->lkups_cnt; i++)
5438 if (memcmp(&list_itr->lkups[i], &lkups[i],
5440 lkups_matched = false;
5443 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5444 rinfo->tun_type == list_itr->rule_info.tun_type &&
5452 * ice_adv_add_update_vsi_list
5453 * @hw: pointer to the hardware structure
5454 * @m_entry: pointer to current adv filter management list entry
5455 * @cur_fltr: filter information from the book keeping entry
5456 * @new_fltr: filter information with the new VSI to be added
5458 * Call AQ command to add or update previously created VSI list with new VSI.
5460 * Helper function to do book keeping associated with adding filter information
5461 * The algorithm to do the booking keeping is described below :
5462 * When a VSI needs to subscribe to a given advanced filter
5463 * if only one VSI has been added till now
5464 * Allocate a new VSI list and add two VSIs
5465 * to this list using switch rule command
5466 * Update the previously created switch rule with the
5467 * newly created VSI list ID
5468 * if a VSI list was previously created
5469 * Add the new VSI to the previously created VSI list set
5470 * using the update switch rule command
5472 static enum ice_status
5473 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5474 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5475 struct ice_adv_rule_info *cur_fltr,
5476 struct ice_adv_rule_info *new_fltr)
5478 enum ice_status status;
5479 u16 vsi_list_id = 0;
5481 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5482 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5483 return ICE_ERR_NOT_IMPL;
5485 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5486 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5487 return ICE_ERR_ALREADY_EXISTS;
5489 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5490 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5491 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5492 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5493 return ICE_ERR_NOT_IMPL;
5495 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5496 /* Only one entry existed in the mapping and it was not already
5497 * a part of a VSI list. So, create a VSI list with the old and
5500 struct ice_fltr_info tmp_fltr;
5501 u16 vsi_handle_arr[2];
5503 /* A rule already exists with the new VSI being added */
5504 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5505 new_fltr->sw_act.fwd_id.hw_vsi_id)
5506 return ICE_ERR_ALREADY_EXISTS;
5508 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5509 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5510 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5516 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5517 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5518 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5519 /* Update the previous switch rule of "forward to VSI" to
5522 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5526 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5527 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5528 m_entry->vsi_list_info =
5529 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5532 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5534 if (!m_entry->vsi_list_info)
5537 /* A rule already exists with the new VSI being added */
5538 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5541 /* Update the previously created VSI list set with
5542 * the new VSI ID passed in
5544 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5546 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5548 ice_aqc_opc_update_sw_rules,
5550 /* update VSI list mapping info with new VSI ID */
5552 ice_set_bit(vsi_handle,
5553 m_entry->vsi_list_info->vsi_map);
5556 m_entry->vsi_count++;
5561 * ice_add_adv_rule - helper function to create an advanced switch rule
5562 * @hw: pointer to the hardware structure
5563 * @lkups: information on the words that needs to be looked up. All words
5564 * together makes one recipe
5565 * @lkups_cnt: num of entries in the lkups array
5566 * @rinfo: other information related to the rule that needs to be programmed
5567 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5568 * ignored is case of error.
5570 * This function can program only 1 rule at a time. The lkups is used to
5571 * describe the all the words that forms the "lookup" portion of the recipe.
5572 * These words can span multiple protocols. Callers to this function need to
5573 * pass in a list of protocol headers with lookup information along and mask
5574 * that determines which words are valid from the given protocol header.
5575 * rinfo describes other information related to this rule such as forwarding
5576 * IDs, priority of this rule, etc.
5579 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5580 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5581 struct ice_rule_query_data *added_entry)
5583 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5584 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5585 const struct ice_dummy_pkt_offsets *pkt_offsets;
5586 struct ice_aqc_sw_rules_elem *s_rule = NULL;
5587 struct LIST_HEAD_TYPE *rule_head;
5588 struct ice_switch_info *sw;
5589 enum ice_status status;
5590 const u8 *pkt = NULL;
5596 return ICE_ERR_PARAM;
5598 for (i = 0; i < lkups_cnt; i++) {
5601 /* Validate match masks to make sure that there is something
5604 ptr = (u16 *)&lkups[i].m_u;
5605 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5612 return ICE_ERR_PARAM;
5614 /* make sure that we can locate a dummy packet */
5615 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5618 status = ICE_ERR_PARAM;
5619 goto err_ice_add_adv_rule;
5622 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5623 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5624 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5625 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5628 vsi_handle = rinfo->sw_act.vsi_handle;
5629 if (!ice_is_vsi_valid(hw, vsi_handle))
5630 return ICE_ERR_PARAM;
5632 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5633 rinfo->sw_act.fwd_id.hw_vsi_id =
5634 ice_get_hw_vsi_num(hw, vsi_handle);
5635 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5636 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5638 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5641 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5643 /* we have to add VSI to VSI_LIST and increment vsi_count.
5644 * Also Update VSI list so that we can change forwarding rule
5645 * if the rule already exists, we will check if it exists with
5646 * same vsi_id, if not then add it to the VSI list if it already
5647 * exists if not then create a VSI list and add the existing VSI
5648 * ID and the new VSI ID to the list
5649 * We will add that VSI to the list
5651 status = ice_adv_add_update_vsi_list(hw, m_entry,
5652 &m_entry->rule_info,
5655 added_entry->rid = rid;
5656 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5657 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5661 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5662 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5664 return ICE_ERR_NO_MEMORY;
5665 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5666 switch (rinfo->sw_act.fltr_act) {
5667 case ICE_FWD_TO_VSI:
5668 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5669 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5670 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5673 act |= ICE_SINGLE_ACT_TO_Q;
5674 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5675 ICE_SINGLE_ACT_Q_INDEX_M;
5677 case ICE_FWD_TO_QGRP:
5678 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
5679 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
5680 act |= ICE_SINGLE_ACT_TO_Q;
5681 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5682 ICE_SINGLE_ACT_Q_INDEX_M;
5683 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
5684 ICE_SINGLE_ACT_Q_REGION_M;
5686 case ICE_DROP_PACKET:
5687 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5688 ICE_SINGLE_ACT_VALID_BIT;
5691 status = ICE_ERR_CFG;
5692 goto err_ice_add_adv_rule;
5695 /* set the rule LOOKUP type based on caller specified 'RX'
5696 * instead of hardcoding it to be either LOOKUP_TX/RX
5698 * for 'RX' set the source to be the port number
5699 * for 'TX' set the source to be the source HW VSI number (determined
5703 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5704 s_rule->pdata.lkup_tx_rx.src =
5705 CPU_TO_LE16(hw->port_info->lport);
5707 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5708 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5711 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5712 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5714 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
5717 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5718 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5721 goto err_ice_add_adv_rule;
5722 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5723 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5725 status = ICE_ERR_NO_MEMORY;
5726 goto err_ice_add_adv_rule;
5729 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5730 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5731 ICE_NONDMA_TO_NONDMA);
5732 if (!adv_fltr->lkups) {
5733 status = ICE_ERR_NO_MEMORY;
5734 goto err_ice_add_adv_rule;
5737 adv_fltr->lkups_cnt = lkups_cnt;
5738 adv_fltr->rule_info = *rinfo;
5739 adv_fltr->rule_info.fltr_rule_id =
5740 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5741 sw = hw->switch_info;
5742 sw->recp_list[rid].adv_rule = true;
5743 rule_head = &sw->recp_list[rid].filt_rules;
5745 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5746 struct ice_fltr_info tmp_fltr;
5748 tmp_fltr.fltr_rule_id =
5749 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5750 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5751 tmp_fltr.fwd_id.hw_vsi_id =
5752 ice_get_hw_vsi_num(hw, vsi_handle);
5753 tmp_fltr.vsi_handle = vsi_handle;
5754 /* Update the previous switch rule of "forward to VSI" to
5757 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5759 goto err_ice_add_adv_rule;
5760 adv_fltr->vsi_count = 1;
5763 /* Add rule entry to book keeping list */
5764 LIST_ADD(&adv_fltr->list_entry, rule_head);
5766 added_entry->rid = rid;
5767 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5768 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5770 err_ice_add_adv_rule:
5771 if (status && adv_fltr) {
5772 ice_free(hw, adv_fltr->lkups);
5773 ice_free(hw, adv_fltr);
5776 ice_free(hw, s_rule);
5782 * ice_adv_rem_update_vsi_list
5783 * @hw: pointer to the hardware structure
5784 * @vsi_handle: VSI handle of the VSI to remove
5785 * @fm_list: filter management entry for which the VSI list management needs to
5788 static enum ice_status
5789 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5790 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5792 struct ice_vsi_list_map_info *vsi_list_info;
5793 enum ice_sw_lkup_type lkup_type;
5794 enum ice_status status;
5797 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5798 fm_list->vsi_count == 0)
5799 return ICE_ERR_PARAM;
5801 /* A rule with the VSI being removed does not exist */
5802 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5803 return ICE_ERR_DOES_NOT_EXIST;
5805 lkup_type = ICE_SW_LKUP_LAST;
5806 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5807 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5808 ice_aqc_opc_update_sw_rules,
5813 fm_list->vsi_count--;
5814 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5815 vsi_list_info = fm_list->vsi_list_info;
5816 if (fm_list->vsi_count == 1) {
5817 struct ice_fltr_info tmp_fltr;
5820 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
5822 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5823 return ICE_ERR_OUT_OF_RANGE;
5825 /* Make sure VSI list is empty before removing it below */
5826 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5828 ice_aqc_opc_update_sw_rules,
5832 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5833 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5834 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5835 tmp_fltr.fwd_id.hw_vsi_id =
5836 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5837 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5838 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5840 /* Update the previous switch rule of "MAC forward to VSI" to
5841 * "MAC fwd to VSI list"
5843 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5845 ice_debug(hw, ICE_DBG_SW,
5846 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5847 tmp_fltr.fwd_id.hw_vsi_id, status);
5852 if (fm_list->vsi_count == 1) {
5853 /* Remove the VSI list since it is no longer used */
5854 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5856 ice_debug(hw, ICE_DBG_SW,
5857 "Failed to remove VSI list %d, error %d\n",
5858 vsi_list_id, status);
5862 LIST_DEL(&vsi_list_info->list_entry);
5863 ice_free(hw, vsi_list_info);
5864 fm_list->vsi_list_info = NULL;
5871 * ice_rem_adv_rule - removes existing advanced switch rule
5872 * @hw: pointer to the hardware structure
5873 * @lkups: information on the words that needs to be looked up. All words
5874 * together makes one recipe
5875 * @lkups_cnt: num of entries in the lkups array
5876 * @rinfo: Its the pointer to the rule information for the rule
5878 * This function can be used to remove 1 rule at a time. The lkups is
5879 * used to describe all the words that forms the "lookup" portion of the
5880 * rule. These words can span multiple protocols. Callers to this function
5881 * need to pass in a list of protocol headers with lookup information along
5882 * and mask that determines which words are valid from the given protocol
5883 * header. rinfo describes other information related to this rule such as
5884 * forwarding IDs, priority of this rule, etc.
5887 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5888 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5890 struct ice_adv_fltr_mgmt_list_entry *list_elem;
5891 const struct ice_dummy_pkt_offsets *offsets;
5892 struct ice_prot_lkup_ext lkup_exts;
5893 u16 rule_buf_sz, pkt_len, i, rid;
5894 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5895 enum ice_status status = ICE_SUCCESS;
5896 bool remove_rule = false;
5897 const u8 *pkt = NULL;
5900 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
5901 for (i = 0; i < lkups_cnt; i++) {
5904 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5907 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5911 rid = ice_find_recp(hw, &lkup_exts);
5912 /* If did not find a recipe that match the existing criteria */
5913 if (rid == ICE_MAX_NUM_RECIPES)
5914 return ICE_ERR_PARAM;
5916 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5917 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5918 /* the rule is already removed */
5921 ice_acquire_lock(rule_lock);
5922 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5924 } else if (list_elem->vsi_count > 1) {
5925 list_elem->vsi_list_info->ref_cnt--;
5926 remove_rule = false;
5927 vsi_handle = rinfo->sw_act.vsi_handle;
5928 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5930 vsi_handle = rinfo->sw_act.vsi_handle;
5931 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5933 ice_release_lock(rule_lock);
5936 if (list_elem->vsi_count == 0)
5939 ice_release_lock(rule_lock);
5941 struct ice_aqc_sw_rules_elem *s_rule;
5943 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5944 &pkt_len, &offsets);
5945 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5947 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
5950 return ICE_ERR_NO_MEMORY;
5951 s_rule->pdata.lkup_tx_rx.act = 0;
5952 s_rule->pdata.lkup_tx_rx.index =
5953 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
5954 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5955 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5957 ice_aqc_opc_remove_sw_rules, NULL);
5958 if (status == ICE_SUCCESS) {
5959 ice_acquire_lock(rule_lock);
5960 LIST_DEL(&list_elem->list_entry);
5961 ice_free(hw, list_elem->lkups);
5962 ice_free(hw, list_elem);
5963 ice_release_lock(rule_lock);
5965 ice_free(hw, s_rule);
5971 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5972 * @hw: pointer to the hardware structure
5973 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5975 * This function is used to remove 1 rule at a time. The removal is based on
5976 * the remove_entry parameter. This function will remove rule for a given
5977 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5980 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5981 struct ice_rule_query_data *remove_entry)
5983 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5984 struct LIST_HEAD_TYPE *list_head;
5985 struct ice_adv_rule_info rinfo;
5986 struct ice_switch_info *sw;
5988 sw = hw->switch_info;
5989 if (!sw->recp_list[remove_entry->rid].recp_created)
5990 return ICE_ERR_PARAM;
5991 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5992 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
5994 if (list_itr->rule_info.fltr_rule_id ==
5995 remove_entry->rule_id) {
5996 rinfo = list_itr->rule_info;
5997 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5998 return ice_rem_adv_rule(hw, list_itr->lkups,
5999 list_itr->lkups_cnt, &rinfo);
6002 return ICE_ERR_PARAM;
6006 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6008 * @hw: pointer to the hardware structure
6009 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6011 * This function is used to remove all the rules for a given VSI and as soon
6012 * as removing a rule fails, it will return immediately with the error code,
6013 * else it will return ICE_SUCCESS
6016 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6018 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6019 struct ice_vsi_list_map_info *map_info;
6020 struct LIST_HEAD_TYPE *list_head;
6021 struct ice_adv_rule_info rinfo;
6022 struct ice_switch_info *sw;
6023 enum ice_status status;
6024 u16 vsi_list_id = 0;
6027 sw = hw->switch_info;
6028 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6029 if (!sw->recp_list[rid].recp_created)
6031 if (!sw->recp_list[rid].adv_rule)
6033 list_head = &sw->recp_list[rid].filt_rules;
6035 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6036 ice_adv_fltr_mgmt_list_entry, list_entry) {
6037 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6041 rinfo = list_itr->rule_info;
6042 rinfo.sw_act.vsi_handle = vsi_handle;
6043 status = ice_rem_adv_rule(hw, list_itr->lkups,
6044 list_itr->lkups_cnt, &rinfo);
6054 * ice_replay_fltr - Replay all the filters stored by a specific list head
6055 * @hw: pointer to the hardware structure
6056 * @list_head: list for which filters needs to be replayed
6057 * @recp_id: Recipe ID for which rules need to be replayed
6059 static enum ice_status
6060 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6062 struct ice_fltr_mgmt_list_entry *itr;
6063 struct LIST_HEAD_TYPE l_head;
6064 enum ice_status status = ICE_SUCCESS;
6066 if (LIST_EMPTY(list_head))
6069 /* Move entries from the given list_head to a temporary l_head so that
6070 * they can be replayed. Otherwise when trying to re-add the same
6071 * filter, the function will return already exists
6073 LIST_REPLACE_INIT(list_head, &l_head);
6075 /* Mark the given list_head empty by reinitializing it so filters
6076 * could be added again by *handler
6078 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6080 struct ice_fltr_list_entry f_entry;
6082 f_entry.fltr_info = itr->fltr_info;
6083 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6084 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6085 if (status != ICE_SUCCESS)
6090 /* Add a filter per VSI separately */
6095 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6097 if (!ice_is_vsi_valid(hw, vsi_handle))
6100 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6101 f_entry.fltr_info.vsi_handle = vsi_handle;
6102 f_entry.fltr_info.fwd_id.hw_vsi_id =
6103 ice_get_hw_vsi_num(hw, vsi_handle);
6104 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6105 if (recp_id == ICE_SW_LKUP_VLAN)
6106 status = ice_add_vlan_internal(hw, &f_entry);
6108 status = ice_add_rule_internal(hw, recp_id,
6110 if (status != ICE_SUCCESS)
6115 /* Clear the filter management list */
6116 ice_rem_sw_rule_info(hw, &l_head);
6121 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6122 * @hw: pointer to the hardware structure
6124 * NOTE: This function does not clean up partially added filters on error.
6125 * It is up to caller of the function to issue a reset or fail early.
6127 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6129 struct ice_switch_info *sw = hw->switch_info;
6130 enum ice_status status = ICE_SUCCESS;
6133 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6134 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6136 status = ice_replay_fltr(hw, i, head);
6137 if (status != ICE_SUCCESS)
6144 * ice_replay_vsi_fltr - Replay filters for requested VSI
6145 * @hw: pointer to the hardware structure
6146 * @vsi_handle: driver VSI handle
6147 * @recp_id: Recipe ID for which rules need to be replayed
6148 * @list_head: list for which filters need to be replayed
6150 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6151 * It is required to pass valid VSI handle.
6153 static enum ice_status
6154 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6155 struct LIST_HEAD_TYPE *list_head)
6157 struct ice_fltr_mgmt_list_entry *itr;
6158 enum ice_status status = ICE_SUCCESS;
6161 if (LIST_EMPTY(list_head))
6163 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6165 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6167 struct ice_fltr_list_entry f_entry;
6169 f_entry.fltr_info = itr->fltr_info;
6170 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6171 itr->fltr_info.vsi_handle == vsi_handle) {
6172 /* update the src in case it is VSI num */
6173 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6174 f_entry.fltr_info.src = hw_vsi_id;
6175 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6176 if (status != ICE_SUCCESS)
6180 if (!itr->vsi_list_info ||
6181 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6183 /* Clearing it so that the logic can add it back */
6184 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6185 f_entry.fltr_info.vsi_handle = vsi_handle;
6186 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6187 /* update the src in case it is VSI num */
6188 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6189 f_entry.fltr_info.src = hw_vsi_id;
6190 if (recp_id == ICE_SW_LKUP_VLAN)
6191 status = ice_add_vlan_internal(hw, &f_entry);
6193 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6194 if (status != ICE_SUCCESS)
6202 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6203 * @hw: pointer to the hardware structure
6204 * @vsi_handle: driver VSI handle
6205 * @list_head: list for which filters need to be replayed
6207 * Replay the advanced rule for the given VSI.
6209 static enum ice_status
6210 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6211 struct LIST_HEAD_TYPE *list_head)
6213 struct ice_rule_query_data added_entry = { 0 };
6214 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6215 enum ice_status status = ICE_SUCCESS;
6217 if (LIST_EMPTY(list_head))
6219 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6221 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6222 u16 lk_cnt = adv_fltr->lkups_cnt;
6224 if (vsi_handle != rinfo->sw_act.vsi_handle)
6226 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6235 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6236 * @hw: pointer to the hardware structure
6237 * @vsi_handle: driver VSI handle
6239 * Replays filters for requested VSI via vsi_handle.
6241 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6243 struct ice_switch_info *sw = hw->switch_info;
6244 enum ice_status status;
6247 /* Update the recipes that were created */
6248 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6249 struct LIST_HEAD_TYPE *head;
6251 head = &sw->recp_list[i].filt_replay_rules;
6252 if (!sw->recp_list[i].adv_rule)
6253 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6255 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6256 if (status != ICE_SUCCESS)
6264 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6265 * @hw: pointer to the HW struct
6267 * Deletes the filter replay rules.
6269 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6271 struct ice_switch_info *sw = hw->switch_info;
6277 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6278 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6279 struct LIST_HEAD_TYPE *l_head;
6281 l_head = &sw->recp_list[i].filt_replay_rules;
6282 if (!sw->recp_list[i].adv_rule)
6283 ice_rem_sw_rule_info(hw, l_head);
6285 ice_rem_adv_rule_info(hw, l_head);