1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
108 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
120 u8 dummy_gre_udp_packet[] = {
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_OL 12 */
127 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x2F, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
141 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
148 0x00, 0x08, 0x00, 0x00,
152 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
154 { ICE_ETYPE_OL, 12 },
155 { ICE_IPV4_OFOS, 14 },
159 { ICE_VXLAN_GPE, 42 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
212 { ICE_VXLAN_GPE, 42 },
215 { ICE_UDP_ILOS, 84 },
216 { ICE_PROTOCOL_LAST, 0 },
220 u8 dummy_udp_tun_udp_packet[] = {
221 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x08, 0x00, /* ICE_ETYPE_OL 12 */
227 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
228 0x00, 0x01, 0x00, 0x00,
229 0x00, 0x11, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
234 0x00, 0x3a, 0x00, 0x00,
236 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
237 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
251 0x00, 0x08, 0x00, 0x00,
255 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
257 { ICE_ETYPE_OL, 12 },
258 { ICE_IPV4_OFOS, 14 },
259 { ICE_UDP_ILOS, 34 },
260 { ICE_PROTOCOL_LAST, 0 },
264 dummy_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x08, 0x00, /* ICE_ETYPE_OL 12 */
271 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
272 0x00, 0x01, 0x00, 0x00,
273 0x00, 0x11, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
278 0x00, 0x08, 0x00, 0x00,
280 0x00, 0x00, /* 2 bytes for 4 byte alignment */
284 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
286 { ICE_ETYPE_OL, 12 },
287 { ICE_IPV4_OFOS, 14 },
289 { ICE_PROTOCOL_LAST, 0 },
293 dummy_tcp_packet[] = {
294 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
295 0x00, 0x00, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00,
298 0x08, 0x00, /* ICE_ETYPE_OL 12 */
300 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
301 0x00, 0x01, 0x00, 0x00,
302 0x00, 0x06, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x50, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, /* 2 bytes for 4 byte alignment */
316 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
318 { ICE_ETYPE_OL, 12 },
319 { ICE_IPV6_OFOS, 14 },
321 { ICE_PROTOCOL_LAST, 0 },
325 dummy_tcp_ipv6_packet[] = {
326 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
332 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
333 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x50, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, /* 2 bytes for 4 byte alignment */
353 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
355 { ICE_ETYPE_OL, 12 },
356 { ICE_IPV6_OFOS, 14 },
357 { ICE_UDP_ILOS, 54 },
358 { ICE_PROTOCOL_LAST, 0 },
362 dummy_udp_ipv6_packet[] = {
363 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
364 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
367 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
381 0x00, 0x08, 0x00, 0x00,
383 0x00, 0x00, /* 2 bytes for 4 byte alignment */
387 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
389 { ICE_IPV4_OFOS, 14 },
392 { ICE_PROTOCOL_LAST, 0 },
396 dummy_udp_gtp_packet[] = {
397 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
402 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x11, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
409 0x00, 0x1c, 0x00, 0x00,
411 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
412 0x00, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x85,
415 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
416 0x00, 0x00, 0x00, 0x00,
420 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
422 { ICE_VLAN_OFOS, 14},
424 { ICE_PROTOCOL_LAST, 0 },
428 dummy_pppoe_packet[] = {
429 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
430 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
436 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 18 */
437 0x00, 0x4e, 0x00, 0x21,
439 0x45, 0x00, 0x00, 0x30, /* PDU */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x11, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
446 /* this is a recipe to profile association bitmap */
447 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
448 ICE_MAX_NUM_PROFILES);
450 /* this is a profile to recipe association bitmap */
451 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
452 ICE_MAX_NUM_RECIPES);
454 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
457 * ice_collect_result_idx - copy result index values
458 * @buf: buffer that contains the result index
459 * @recp: the recipe struct to copy data into
461 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
462 struct ice_sw_recipe *recp)
464 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
465 ice_set_bit(buf->content.result_indx &
466 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
470 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
471 * @hw: pointer to hardware structure
472 * @recps: struct that we need to populate
473 * @rid: recipe ID that we are populating
474 * @refresh_required: true if we should get recipe to profile mapping from FW
476 * This function is used to populate all the necessary entries into our
477 * bookkeeping so that we have a current list of all the recipes that are
478 * programmed in the firmware.
480 static enum ice_status
481 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
482 bool *refresh_required)
484 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
485 struct ice_aqc_recipe_data_elem *tmp;
486 u16 num_recps = ICE_MAX_NUM_RECIPES;
487 struct ice_prot_lkup_ext *lkup_exts;
488 u16 i, sub_recps, fv_word_idx = 0;
489 enum ice_status status;
491 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
493 /* we need a buffer big enough to accommodate all the recipes */
494 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
495 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
497 return ICE_ERR_NO_MEMORY;
499 tmp[0].recipe_indx = rid;
500 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
501 /* non-zero status meaning recipe doesn't exist */
505 /* Get recipe to profile map so that we can get the fv from lkups that
506 * we read for a recipe from FW. Since we want to minimize the number of
507 * times we make this FW call, just make one call and cache the copy
508 * until a new recipe is added. This operation is only required the
509 * first time to get the changes from FW. Then to search existing
510 * entries we don't need to update the cache again until another recipe
513 if (*refresh_required) {
514 ice_get_recp_to_prof_map(hw);
515 *refresh_required = false;
518 /* Start populating all the entries for recps[rid] based on lkups from
519 * firmware. Note that we are only creating the root recipe in our
522 lkup_exts = &recps[rid].lkup_exts;
524 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
525 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
526 struct ice_recp_grp_entry *rg_entry;
527 u8 prof, idx, prot = 0;
531 rg_entry = (struct ice_recp_grp_entry *)
532 ice_malloc(hw, sizeof(*rg_entry));
534 status = ICE_ERR_NO_MEMORY;
538 idx = root_bufs.recipe_indx;
539 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
541 /* Mark all result indices in this chain */
542 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
543 ice_set_bit(root_bufs.content.result_indx &
544 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
546 /* get the first profile that is associated with rid */
547 prof = ice_find_first_bit(recipe_to_profile[idx],
548 ICE_MAX_NUM_PROFILES);
549 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
550 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
552 rg_entry->fv_idx[i] = lkup_indx;
553 rg_entry->fv_mask[i] =
554 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
556 /* If the recipe is a chained recipe then all its
557 * child recipe's result will have a result index.
558 * To fill fv_words we should not use those result
559 * index, we only need the protocol ids and offsets.
560 * We will skip all the fv_idx which stores result
561 * index in them. We also need to skip any fv_idx which
562 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
563 * valid offset value.
565 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
566 rg_entry->fv_idx[i]) ||
567 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
568 rg_entry->fv_idx[i] == 0)
571 ice_find_prot_off(hw, ICE_BLK_SW, prof,
572 rg_entry->fv_idx[i], &prot, &off);
573 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
574 lkup_exts->fv_words[fv_word_idx].off = off;
577 /* populate rg_list with the data from the child entry of this
580 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
582 /* Propagate some data to the recipe database */
583 recps[idx].is_root = is_root;
584 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
585 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
586 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
587 recps[idx].chain_idx = root_bufs.content.result_indx &
588 ~ICE_AQ_RECIPE_RESULT_EN;
589 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
591 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
597 /* Only do the following for root recipes entries */
598 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
599 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
600 recps[idx].root_rid = root_bufs.content.rid &
601 ~ICE_AQ_RECIPE_ID_IS_ROOT;
602 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
605 /* Complete initialization of the root recipe entry */
606 lkup_exts->n_val_words = fv_word_idx;
607 recps[rid].big_recp = (num_recps > 1);
608 recps[rid].n_grp_count = num_recps;
609 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
610 ice_memdup(hw, tmp, recps[rid].n_grp_count *
611 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
612 if (!recps[rid].root_buf)
615 /* Copy result indexes */
616 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
617 ICE_NONDMA_TO_NONDMA);
618 recps[rid].recp_created = true;
626 * ice_get_recp_to_prof_map - updates recipe to profile mapping
627 * @hw: pointer to hardware structure
629 * This function is used to populate recipe_to_profile matrix where index to
630 * this array is the recipe ID and the element is the mapping of which profiles
631 * is this recipe mapped to.
634 ice_get_recp_to_prof_map(struct ice_hw *hw)
636 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
639 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
642 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
643 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
644 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
646 ice_memcpy(profile_to_recipe[i], r_bitmap,
647 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
648 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
649 if (ice_is_bit_set(r_bitmap, j))
650 ice_set_bit(i, recipe_to_profile[j]);
655 * ice_init_def_sw_recp - initialize the recipe book keeping tables
656 * @hw: pointer to the HW struct
658 * Allocate memory for the entire recipe table and initialize the structures/
659 * entries corresponding to basic recipes.
661 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
663 struct ice_sw_recipe *recps;
666 recps = (struct ice_sw_recipe *)
667 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
669 return ICE_ERR_NO_MEMORY;
671 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
672 recps[i].root_rid = i;
673 INIT_LIST_HEAD(&recps[i].filt_rules);
674 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
675 INIT_LIST_HEAD(&recps[i].rg_list);
676 ice_init_lock(&recps[i].filt_rule_lock);
679 hw->switch_info->recp_list = recps;
685 * ice_aq_get_sw_cfg - get switch configuration
686 * @hw: pointer to the hardware structure
687 * @buf: pointer to the result buffer
688 * @buf_size: length of the buffer available for response
689 * @req_desc: pointer to requested descriptor
690 * @num_elems: pointer to number of elements
691 * @cd: pointer to command details structure or NULL
693 * Get switch configuration (0x0200) to be placed in 'buff'.
694 * This admin command returns information such as initial VSI/port number
695 * and switch ID it belongs to.
697 * NOTE: *req_desc is both an input/output parameter.
698 * The caller of this function first calls this function with *request_desc set
699 * to 0. If the response from f/w has *req_desc set to 0, all the switch
700 * configuration information has been returned; if non-zero (meaning not all
701 * the information was returned), the caller should call this function again
702 * with *req_desc set to the previous value returned by f/w to get the
703 * next block of switch configuration information.
705 * *num_elems is output only parameter. This reflects the number of elements
706 * in response buffer. The caller of this function to use *num_elems while
707 * parsing the response buffer.
709 static enum ice_status
710 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
711 u16 buf_size, u16 *req_desc, u16 *num_elems,
712 struct ice_sq_cd *cd)
714 struct ice_aqc_get_sw_cfg *cmd;
715 enum ice_status status;
716 struct ice_aq_desc desc;
718 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
719 cmd = &desc.params.get_sw_conf;
720 cmd->element = CPU_TO_LE16(*req_desc);
722 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
724 *req_desc = LE16_TO_CPU(cmd->element);
725 *num_elems = LE16_TO_CPU(cmd->num_elems);
732 * ice_alloc_sw - allocate resources specific to switch
733 * @hw: pointer to the HW struct
734 * @ena_stats: true to turn on VEB stats
735 * @shared_res: true for shared resource, false for dedicated resource
736 * @sw_id: switch ID returned
737 * @counter_id: VEB counter ID returned
739 * allocates switch resources (SWID and VEB counter) (0x0208)
742 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
745 struct ice_aqc_alloc_free_res_elem *sw_buf;
746 struct ice_aqc_res_elem *sw_ele;
747 enum ice_status status;
750 buf_len = sizeof(*sw_buf);
751 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
752 ice_malloc(hw, buf_len);
754 return ICE_ERR_NO_MEMORY;
756 /* Prepare buffer for switch ID.
757 * The number of resource entries in buffer is passed as 1 since only a
758 * single switch/VEB instance is allocated, and hence a single sw_id
761 sw_buf->num_elems = CPU_TO_LE16(1);
763 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
764 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
765 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
767 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
768 ice_aqc_opc_alloc_res, NULL);
771 goto ice_alloc_sw_exit;
773 sw_ele = &sw_buf->elem[0];
774 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
777 /* Prepare buffer for VEB Counter */
778 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
779 struct ice_aqc_alloc_free_res_elem *counter_buf;
780 struct ice_aqc_res_elem *counter_ele;
782 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
783 ice_malloc(hw, buf_len);
785 status = ICE_ERR_NO_MEMORY;
786 goto ice_alloc_sw_exit;
789 /* The number of resource entries in buffer is passed as 1 since
790 * only a single switch/VEB instance is allocated, and hence a
791 * single VEB counter is requested.
793 counter_buf->num_elems = CPU_TO_LE16(1);
794 counter_buf->res_type =
795 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
796 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
797 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
801 ice_free(hw, counter_buf);
802 goto ice_alloc_sw_exit;
804 counter_ele = &counter_buf->elem[0];
805 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
806 ice_free(hw, counter_buf);
810 ice_free(hw, sw_buf);
815 * ice_free_sw - free resources specific to switch
816 * @hw: pointer to the HW struct
817 * @sw_id: switch ID returned
818 * @counter_id: VEB counter ID returned
820 * free switch resources (SWID and VEB counter) (0x0209)
822 * NOTE: This function frees multiple resources. It continues
823 * releasing other resources even after it encounters error.
824 * The error code returned is the last error it encountered.
826 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
828 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
829 enum ice_status status, ret_status;
832 buf_len = sizeof(*sw_buf);
833 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
834 ice_malloc(hw, buf_len);
836 return ICE_ERR_NO_MEMORY;
838 /* Prepare buffer to free for switch ID res.
839 * The number of resource entries in buffer is passed as 1 since only a
840 * single switch/VEB instance is freed, and hence a single sw_id
843 sw_buf->num_elems = CPU_TO_LE16(1);
844 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
845 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
847 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
848 ice_aqc_opc_free_res, NULL);
851 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
853 /* Prepare buffer to free for VEB Counter resource */
854 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
855 ice_malloc(hw, buf_len);
857 ice_free(hw, sw_buf);
858 return ICE_ERR_NO_MEMORY;
861 /* The number of resource entries in buffer is passed as 1 since only a
862 * single switch/VEB instance is freed, and hence a single VEB counter
865 counter_buf->num_elems = CPU_TO_LE16(1);
866 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
867 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
869 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
870 ice_aqc_opc_free_res, NULL);
872 ice_debug(hw, ICE_DBG_SW,
873 "VEB counter resource could not be freed\n");
877 ice_free(hw, counter_buf);
878 ice_free(hw, sw_buf);
884 * @hw: pointer to the HW struct
885 * @vsi_ctx: pointer to a VSI context struct
886 * @cd: pointer to command details structure or NULL
888 * Add a VSI context to the hardware (0x0210)
891 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
892 struct ice_sq_cd *cd)
894 struct ice_aqc_add_update_free_vsi_resp *res;
895 struct ice_aqc_add_get_update_free_vsi *cmd;
896 struct ice_aq_desc desc;
897 enum ice_status status;
899 cmd = &desc.params.vsi_cmd;
900 res = &desc.params.add_update_free_vsi_res;
902 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
904 if (!vsi_ctx->alloc_from_pool)
905 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
906 ICE_AQ_VSI_IS_VALID);
908 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
910 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
912 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
913 sizeof(vsi_ctx->info), cd);
916 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
917 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
918 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
926 * @hw: pointer to the HW struct
927 * @vsi_ctx: pointer to a VSI context struct
928 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
929 * @cd: pointer to command details structure or NULL
931 * Free VSI context info from hardware (0x0213)
934 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
935 bool keep_vsi_alloc, struct ice_sq_cd *cd)
937 struct ice_aqc_add_update_free_vsi_resp *resp;
938 struct ice_aqc_add_get_update_free_vsi *cmd;
939 struct ice_aq_desc desc;
940 enum ice_status status;
942 cmd = &desc.params.vsi_cmd;
943 resp = &desc.params.add_update_free_vsi_res;
945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
947 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
949 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
951 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
953 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
954 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
962 * @hw: pointer to the HW struct
963 * @vsi_ctx: pointer to a VSI context struct
964 * @cd: pointer to command details structure or NULL
966 * Update VSI context in the hardware (0x0211)
969 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
970 struct ice_sq_cd *cd)
972 struct ice_aqc_add_update_free_vsi_resp *resp;
973 struct ice_aqc_add_get_update_free_vsi *cmd;
974 struct ice_aq_desc desc;
975 enum ice_status status;
977 cmd = &desc.params.vsi_cmd;
978 resp = &desc.params.add_update_free_vsi_res;
980 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
982 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
984 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
986 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
987 sizeof(vsi_ctx->info), cd);
990 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
991 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
998 * ice_is_vsi_valid - check whether the VSI is valid or not
999 * @hw: pointer to the HW struct
1000 * @vsi_handle: VSI handle
1002 * check whether the VSI is valid or not
1004 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1006 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1010 * ice_get_hw_vsi_num - return the HW VSI number
1011 * @hw: pointer to the HW struct
1012 * @vsi_handle: VSI handle
1014 * return the HW VSI number
1015 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1017 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1019 return hw->vsi_ctx[vsi_handle]->vsi_num;
1023 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1024 * @hw: pointer to the HW struct
1025 * @vsi_handle: VSI handle
1027 * return the VSI context entry for a given VSI handle
1029 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1031 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1035 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1036 * @hw: pointer to the HW struct
1037 * @vsi_handle: VSI handle
1038 * @vsi: VSI context pointer
1040 * save the VSI context entry for a given VSI handle
1043 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1045 hw->vsi_ctx[vsi_handle] = vsi;
1049 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1050 * @hw: pointer to the HW struct
1051 * @vsi_handle: VSI handle
1053 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1055 struct ice_vsi_ctx *vsi;
1058 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1061 ice_for_each_traffic_class(i) {
1062 if (vsi->lan_q_ctx[i]) {
1063 ice_free(hw, vsi->lan_q_ctx[i]);
1064 vsi->lan_q_ctx[i] = NULL;
1070 * ice_clear_vsi_ctx - clear the VSI context entry
1071 * @hw: pointer to the HW struct
1072 * @vsi_handle: VSI handle
1074 * clear the VSI context entry
1076 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1078 struct ice_vsi_ctx *vsi;
1080 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1082 ice_clear_vsi_q_ctx(hw, vsi_handle);
1084 hw->vsi_ctx[vsi_handle] = NULL;
1089 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1090 * @hw: pointer to the HW struct
1092 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1096 for (i = 0; i < ICE_MAX_VSI; i++)
1097 ice_clear_vsi_ctx(hw, i);
1101 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1102 * @hw: pointer to the HW struct
1103 * @vsi_handle: unique VSI handle provided by drivers
1104 * @vsi_ctx: pointer to a VSI context struct
1105 * @cd: pointer to command details structure or NULL
1107 * Add a VSI context to the hardware also add it into the VSI handle list.
1108 * If this function gets called after reset for existing VSIs then update
1109 * with the new HW VSI number in the corresponding VSI handle list entry.
1112 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1113 struct ice_sq_cd *cd)
1115 struct ice_vsi_ctx *tmp_vsi_ctx;
1116 enum ice_status status;
1118 if (vsi_handle >= ICE_MAX_VSI)
1119 return ICE_ERR_PARAM;
1120 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1123 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1125 /* Create a new VSI context */
1126 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1127 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1129 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1130 return ICE_ERR_NO_MEMORY;
1132 *tmp_vsi_ctx = *vsi_ctx;
1134 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1136 /* update with new HW VSI num */
1137 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1144 * ice_free_vsi- free VSI context from hardware and VSI handle list
1145 * @hw: pointer to the HW struct
1146 * @vsi_handle: unique VSI handle
1147 * @vsi_ctx: pointer to a VSI context struct
1148 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1149 * @cd: pointer to command details structure or NULL
1151 * Free VSI context info from hardware as well as from VSI handle list
1154 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1155 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1157 enum ice_status status;
1159 if (!ice_is_vsi_valid(hw, vsi_handle))
1160 return ICE_ERR_PARAM;
1161 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1162 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1164 ice_clear_vsi_ctx(hw, vsi_handle);
1170 * @hw: pointer to the HW struct
1171 * @vsi_handle: unique VSI handle
1172 * @vsi_ctx: pointer to a VSI context struct
1173 * @cd: pointer to command details structure or NULL
1175 * Update VSI context in the hardware
1178 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1179 struct ice_sq_cd *cd)
1181 if (!ice_is_vsi_valid(hw, vsi_handle))
1182 return ICE_ERR_PARAM;
1183 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1184 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1188 * ice_aq_get_vsi_params
1189 * @hw: pointer to the HW struct
1190 * @vsi_ctx: pointer to a VSI context struct
1191 * @cd: pointer to command details structure or NULL
1193 * Get VSI context info from hardware (0x0212)
1196 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1197 struct ice_sq_cd *cd)
1199 struct ice_aqc_add_get_update_free_vsi *cmd;
1200 struct ice_aqc_get_vsi_resp *resp;
1201 struct ice_aq_desc desc;
1202 enum ice_status status;
1204 cmd = &desc.params.vsi_cmd;
1205 resp = &desc.params.get_vsi_resp;
1207 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1209 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1211 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1212 sizeof(vsi_ctx->info), cd);
1214 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1216 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1217 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1224 * ice_aq_add_update_mir_rule - add/update a mirror rule
1225 * @hw: pointer to the HW struct
1226 * @rule_type: Rule Type
1227 * @dest_vsi: VSI number to which packets will be mirrored
1228 * @count: length of the list
1229 * @mr_buf: buffer for list of mirrored VSI numbers
1230 * @cd: pointer to command details structure or NULL
1233 * Add/Update Mirror Rule (0x260).
1236 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1237 u16 count, struct ice_mir_rule_buf *mr_buf,
1238 struct ice_sq_cd *cd, u16 *rule_id)
1240 struct ice_aqc_add_update_mir_rule *cmd;
1241 struct ice_aq_desc desc;
1242 enum ice_status status;
1243 __le16 *mr_list = NULL;
1246 switch (rule_type) {
1247 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1248 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1249 /* Make sure count and mr_buf are set for these rule_types */
1250 if (!(count && mr_buf))
1251 return ICE_ERR_PARAM;
1253 buf_size = count * sizeof(__le16);
1254 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1256 return ICE_ERR_NO_MEMORY;
1258 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1259 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1260 /* Make sure count and mr_buf are not set for these
1263 if (count || mr_buf)
1264 return ICE_ERR_PARAM;
1267 ice_debug(hw, ICE_DBG_SW,
1268 "Error due to unsupported rule_type %u\n", rule_type);
1269 return ICE_ERR_OUT_OF_RANGE;
1272 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1274 /* Pre-process 'mr_buf' items for add/update of virtual port
1275 * ingress/egress mirroring (but not physical port ingress/egress
1281 for (i = 0; i < count; i++) {
1284 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1286 /* Validate specified VSI number, make sure it is less
1287 * than ICE_MAX_VSI, if not return with error.
1289 if (id >= ICE_MAX_VSI) {
1290 ice_debug(hw, ICE_DBG_SW,
1291 "Error VSI index (%u) out-of-range\n",
1293 ice_free(hw, mr_list);
1294 return ICE_ERR_OUT_OF_RANGE;
1297 /* add VSI to mirror rule */
1300 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1301 else /* remove VSI from mirror rule */
1302 mr_list[i] = CPU_TO_LE16(id);
1306 cmd = &desc.params.add_update_rule;
1307 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1308 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1309 ICE_AQC_RULE_ID_VALID_M);
1310 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1311 cmd->num_entries = CPU_TO_LE16(count);
1312 cmd->dest = CPU_TO_LE16(dest_vsi);
1314 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1316 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1318 ice_free(hw, mr_list);
1324 * ice_aq_delete_mir_rule - delete a mirror rule
1325 * @hw: pointer to the HW struct
1326 * @rule_id: Mirror rule ID (to be deleted)
1327 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1328 * otherwise it is returned to the shared pool
1329 * @cd: pointer to command details structure or NULL
1331 * Delete Mirror Rule (0x261).
1334 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1335 struct ice_sq_cd *cd)
1337 struct ice_aqc_delete_mir_rule *cmd;
1338 struct ice_aq_desc desc;
1340 /* rule_id should be in the range 0...63 */
1341 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1342 return ICE_ERR_OUT_OF_RANGE;
1344 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1346 cmd = &desc.params.del_rule;
1347 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1348 cmd->rule_id = CPU_TO_LE16(rule_id);
1351 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1353 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1357 * ice_aq_alloc_free_vsi_list
1358 * @hw: pointer to the HW struct
1359 * @vsi_list_id: VSI list ID returned or used for lookup
1360 * @lkup_type: switch rule filter lookup type
1361 * @opc: switch rules population command type - pass in the command opcode
1363 * allocates or free a VSI list resource
1365 static enum ice_status
1366 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1367 enum ice_sw_lkup_type lkup_type,
1368 enum ice_adminq_opc opc)
1370 struct ice_aqc_alloc_free_res_elem *sw_buf;
1371 struct ice_aqc_res_elem *vsi_ele;
1372 enum ice_status status;
1375 buf_len = sizeof(*sw_buf);
1376 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1377 ice_malloc(hw, buf_len);
1379 return ICE_ERR_NO_MEMORY;
1380 sw_buf->num_elems = CPU_TO_LE16(1);
1382 if (lkup_type == ICE_SW_LKUP_MAC ||
1383 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1384 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1385 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1386 lkup_type == ICE_SW_LKUP_PROMISC ||
1387 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1388 lkup_type == ICE_SW_LKUP_LAST) {
1389 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1390 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1392 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1394 status = ICE_ERR_PARAM;
1395 goto ice_aq_alloc_free_vsi_list_exit;
1398 if (opc == ice_aqc_opc_free_res)
1399 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1401 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1403 goto ice_aq_alloc_free_vsi_list_exit;
1405 if (opc == ice_aqc_opc_alloc_res) {
1406 vsi_ele = &sw_buf->elem[0];
1407 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1410 ice_aq_alloc_free_vsi_list_exit:
1411 ice_free(hw, sw_buf);
1416 * ice_aq_set_storm_ctrl - Sets storm control configuration
1417 * @hw: pointer to the HW struct
1418 * @bcast_thresh: represents the upper threshold for broadcast storm control
1419 * @mcast_thresh: represents the upper threshold for multicast storm control
1420 * @ctl_bitmask: storm control control knobs
1422 * Sets the storm control configuration (0x0280)
1425 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1428 struct ice_aqc_storm_cfg *cmd;
1429 struct ice_aq_desc desc;
1431 cmd = &desc.params.storm_conf;
1433 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1435 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1436 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1437 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1439 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1443 * ice_aq_get_storm_ctrl - gets storm control configuration
1444 * @hw: pointer to the HW struct
1445 * @bcast_thresh: represents the upper threshold for broadcast storm control
1446 * @mcast_thresh: represents the upper threshold for multicast storm control
1447 * @ctl_bitmask: storm control control knobs
1449 * Gets the storm control configuration (0x0281)
1452 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1455 enum ice_status status;
1456 struct ice_aq_desc desc;
1458 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1460 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1462 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1465 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1468 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1471 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1478 * ice_aq_sw_rules - add/update/remove switch rules
1479 * @hw: pointer to the HW struct
1480 * @rule_list: pointer to switch rule population list
1481 * @rule_list_sz: total size of the rule list in bytes
1482 * @num_rules: number of switch rules in the rule_list
1483 * @opc: switch rules population command type - pass in the command opcode
1484 * @cd: pointer to command details structure or NULL
1486 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1488 static enum ice_status
1489 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1490 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1492 struct ice_aq_desc desc;
1494 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1496 if (opc != ice_aqc_opc_add_sw_rules &&
1497 opc != ice_aqc_opc_update_sw_rules &&
1498 opc != ice_aqc_opc_remove_sw_rules)
1499 return ICE_ERR_PARAM;
1501 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1503 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1504 desc.params.sw_rules.num_rules_fltr_entry_index =
1505 CPU_TO_LE16(num_rules);
1506 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1510 * ice_aq_add_recipe - add switch recipe
1511 * @hw: pointer to the HW struct
1512 * @s_recipe_list: pointer to switch rule population list
1513 * @num_recipes: number of switch recipes in the list
1514 * @cd: pointer to command details structure or NULL
1519 ice_aq_add_recipe(struct ice_hw *hw,
1520 struct ice_aqc_recipe_data_elem *s_recipe_list,
1521 u16 num_recipes, struct ice_sq_cd *cd)
1523 struct ice_aqc_add_get_recipe *cmd;
1524 struct ice_aq_desc desc;
1527 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1528 cmd = &desc.params.add_get_recipe;
1529 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1531 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1532 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1534 buf_size = num_recipes * sizeof(*s_recipe_list);
1536 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1540 * ice_aq_get_recipe - get switch recipe
1541 * @hw: pointer to the HW struct
1542 * @s_recipe_list: pointer to switch rule population list
1543 * @num_recipes: pointer to the number of recipes (input and output)
1544 * @recipe_root: root recipe number of recipe(s) to retrieve
1545 * @cd: pointer to command details structure or NULL
1549 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1550 * On output, *num_recipes will equal the number of entries returned in
1553 * The caller must supply enough space in s_recipe_list to hold all possible
1554 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1557 ice_aq_get_recipe(struct ice_hw *hw,
1558 struct ice_aqc_recipe_data_elem *s_recipe_list,
1559 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1561 struct ice_aqc_add_get_recipe *cmd;
1562 struct ice_aq_desc desc;
1563 enum ice_status status;
1566 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1567 return ICE_ERR_PARAM;
1569 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1570 cmd = &desc.params.add_get_recipe;
1571 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1573 cmd->return_index = CPU_TO_LE16(recipe_root);
1574 cmd->num_sub_recipes = 0;
1576 buf_size = *num_recipes * sizeof(*s_recipe_list);
1578 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1579 /* cppcheck-suppress constArgument */
1580 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1586 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1587 * @hw: pointer to the HW struct
1588 * @profile_id: package profile ID to associate the recipe with
1589 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1590 * @cd: pointer to command details structure or NULL
1591 * Recipe to profile association (0x0291)
1594 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1595 struct ice_sq_cd *cd)
1597 struct ice_aqc_recipe_to_profile *cmd;
1598 struct ice_aq_desc desc;
1600 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1601 cmd = &desc.params.recipe_to_profile;
1602 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1603 cmd->profile_id = CPU_TO_LE16(profile_id);
1604 /* Set the recipe ID bit in the bitmask to let the device know which
1605 * profile we are associating the recipe to
1607 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1608 ICE_NONDMA_TO_NONDMA);
1610 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1614 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1615 * @hw: pointer to the HW struct
1616 * @profile_id: package profile ID to associate the recipe with
1617 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1618 * @cd: pointer to command details structure or NULL
1619 * Associate profile ID with given recipe (0x0293)
1622 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1623 struct ice_sq_cd *cd)
1625 struct ice_aqc_recipe_to_profile *cmd;
1626 struct ice_aq_desc desc;
1627 enum ice_status status;
1629 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1630 cmd = &desc.params.recipe_to_profile;
1631 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1632 cmd->profile_id = CPU_TO_LE16(profile_id);
1634 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1636 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1637 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1643 * ice_alloc_recipe - add recipe resource
1644 * @hw: pointer to the hardware structure
1645 * @rid: recipe ID returned as response to AQ call
1647 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1649 struct ice_aqc_alloc_free_res_elem *sw_buf;
1650 enum ice_status status;
1653 buf_len = sizeof(*sw_buf);
1654 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1656 return ICE_ERR_NO_MEMORY;
1658 sw_buf->num_elems = CPU_TO_LE16(1);
1659 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1660 ICE_AQC_RES_TYPE_S) |
1661 ICE_AQC_RES_TYPE_FLAG_SHARED);
1662 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1663 ice_aqc_opc_alloc_res, NULL);
1665 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1666 ice_free(hw, sw_buf);
1671 /* ice_init_port_info - Initialize port_info with switch configuration data
1672 * @pi: pointer to port_info
1673 * @vsi_port_num: VSI number or port number
1674 * @type: Type of switch element (port or VSI)
1675 * @swid: switch ID of the switch the element is attached to
1676 * @pf_vf_num: PF or VF number
1677 * @is_vf: true if the element is a VF, false otherwise
1680 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1681 u16 swid, u16 pf_vf_num, bool is_vf)
1684 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1685 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1687 pi->pf_vf_num = pf_vf_num;
1689 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1690 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1693 ice_debug(pi->hw, ICE_DBG_SW,
1694 "incorrect VSI/port type received\n");
1699 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1700 * @hw: pointer to the hardware structure
1702 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1704 struct ice_aqc_get_sw_cfg_resp *rbuf;
1705 enum ice_status status;
1706 u16 num_total_ports;
1712 num_total_ports = 1;
1714 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1715 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1718 return ICE_ERR_NO_MEMORY;
1720 /* Multiple calls to ice_aq_get_sw_cfg may be required
1721 * to get all the switch configuration information. The need
1722 * for additional calls is indicated by ice_aq_get_sw_cfg
1723 * writing a non-zero value in req_desc
1726 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1727 &req_desc, &num_elems, NULL);
1732 for (i = 0; i < num_elems; i++) {
1733 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1734 u16 pf_vf_num, swid, vsi_port_num;
1738 ele = rbuf[i].elements;
1739 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1740 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1742 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1743 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1745 swid = LE16_TO_CPU(ele->swid);
1747 if (LE16_TO_CPU(ele->pf_vf_num) &
1748 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1751 type = LE16_TO_CPU(ele->vsi_port_num) >>
1752 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1755 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1756 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1757 if (j == num_total_ports) {
1758 ice_debug(hw, ICE_DBG_SW,
1759 "more ports than expected\n");
1760 status = ICE_ERR_CFG;
1763 ice_init_port_info(hw->port_info,
1764 vsi_port_num, type, swid,
1772 } while (req_desc && !status);
1775 ice_free(hw, (void *)rbuf);
1780 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1781 * @hw: pointer to the hardware structure
1782 * @fi: filter info structure to fill/update
1784 * This helper function populates the lb_en and lan_en elements of the provided
1785 * ice_fltr_info struct using the switch's type and characteristics of the
1786 * switch rule being configured.
1788 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1792 if ((fi->flag & ICE_FLTR_TX) &&
1793 (fi->fltr_act == ICE_FWD_TO_VSI ||
1794 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1795 fi->fltr_act == ICE_FWD_TO_Q ||
1796 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1797 /* Setting LB for prune actions will result in replicated
1798 * packets to the internal switch that will be dropped.
1800 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1803 /* Set lan_en to TRUE if
1804 * 1. The switch is a VEB AND
1806 * 2.1 The lookup is a directional lookup like ethertype,
1807 * promiscuous, ethertype-MAC, promiscuous-VLAN
1808 * and default-port OR
1809 * 2.2 The lookup is VLAN, OR
1810 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1811 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1815 * The switch is a VEPA.
1817 * In all other cases, the LAN enable has to be set to false.
1820 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1821 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1822 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1823 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1824 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1825 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1826 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1827 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1828 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1829 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1838 * ice_fill_sw_rule - Helper function to fill switch rule structure
1839 * @hw: pointer to the hardware structure
1840 * @f_info: entry containing packet forwarding information
1841 * @s_rule: switch rule structure to be filled in based on mac_entry
1842 * @opc: switch rules population command type - pass in the command opcode
1845 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1846 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1848 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1856 if (opc == ice_aqc_opc_remove_sw_rules) {
1857 s_rule->pdata.lkup_tx_rx.act = 0;
1858 s_rule->pdata.lkup_tx_rx.index =
1859 CPU_TO_LE16(f_info->fltr_rule_id);
1860 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1864 eth_hdr_sz = sizeof(dummy_eth_header);
1865 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1867 /* initialize the ether header with a dummy header */
1868 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1869 ice_fill_sw_info(hw, f_info);
1871 switch (f_info->fltr_act) {
1872 case ICE_FWD_TO_VSI:
1873 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1874 ICE_SINGLE_ACT_VSI_ID_M;
1875 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1876 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1877 ICE_SINGLE_ACT_VALID_BIT;
1879 case ICE_FWD_TO_VSI_LIST:
1880 act |= ICE_SINGLE_ACT_VSI_LIST;
1881 act |= (f_info->fwd_id.vsi_list_id <<
1882 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1883 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1884 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1885 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1886 ICE_SINGLE_ACT_VALID_BIT;
1889 act |= ICE_SINGLE_ACT_TO_Q;
1890 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1891 ICE_SINGLE_ACT_Q_INDEX_M;
1893 case ICE_DROP_PACKET:
1894 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1895 ICE_SINGLE_ACT_VALID_BIT;
1897 case ICE_FWD_TO_QGRP:
1898 q_rgn = f_info->qgrp_size > 0 ?
1899 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1900 act |= ICE_SINGLE_ACT_TO_Q;
1901 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1902 ICE_SINGLE_ACT_Q_INDEX_M;
1903 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1904 ICE_SINGLE_ACT_Q_REGION_M;
1911 act |= ICE_SINGLE_ACT_LB_ENABLE;
1913 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1915 switch (f_info->lkup_type) {
1916 case ICE_SW_LKUP_MAC:
1917 daddr = f_info->l_data.mac.mac_addr;
1919 case ICE_SW_LKUP_VLAN:
1920 vlan_id = f_info->l_data.vlan.vlan_id;
1921 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1922 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1923 act |= ICE_SINGLE_ACT_PRUNE;
1924 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1927 case ICE_SW_LKUP_ETHERTYPE_MAC:
1928 daddr = f_info->l_data.ethertype_mac.mac_addr;
1930 case ICE_SW_LKUP_ETHERTYPE:
1931 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1932 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1934 case ICE_SW_LKUP_MAC_VLAN:
1935 daddr = f_info->l_data.mac_vlan.mac_addr;
1936 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1938 case ICE_SW_LKUP_PROMISC_VLAN:
1939 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1941 case ICE_SW_LKUP_PROMISC:
1942 daddr = f_info->l_data.mac_vlan.mac_addr;
1948 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1949 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1950 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1952 /* Recipe set depending on lookup type */
1953 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1954 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1955 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1958 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1959 ICE_NONDMA_TO_NONDMA);
1961 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1962 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1963 *off = CPU_TO_BE16(vlan_id);
1966 /* Create the switch rule with the final dummy Ethernet header */
1967 if (opc != ice_aqc_opc_update_sw_rules)
1968 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1972 * ice_add_marker_act
1973 * @hw: pointer to the hardware structure
1974 * @m_ent: the management entry for which sw marker needs to be added
1975 * @sw_marker: sw marker to tag the Rx descriptor with
1976 * @l_id: large action resource ID
1978 * Create a large action to hold software marker and update the switch rule
1979 * entry pointed by m_ent with newly created large action
1981 static enum ice_status
1982 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1983 u16 sw_marker, u16 l_id)
1985 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1986 /* For software marker we need 3 large actions
1987 * 1. FWD action: FWD TO VSI or VSI LIST
1988 * 2. GENERIC VALUE action to hold the profile ID
1989 * 3. GENERIC VALUE action to hold the software marker ID
1991 const u16 num_lg_acts = 3;
1992 enum ice_status status;
1998 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1999 return ICE_ERR_PARAM;
2001 /* Create two back-to-back switch rules and submit them to the HW using
2002 * one memory buffer:
2006 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2007 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2008 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2010 return ICE_ERR_NO_MEMORY;
2012 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2014 /* Fill in the first switch rule i.e. large action */
2015 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2016 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2017 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2019 /* First action VSI forwarding or VSI list forwarding depending on how
2022 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2023 m_ent->fltr_info.fwd_id.hw_vsi_id;
2025 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2026 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2027 ICE_LG_ACT_VSI_LIST_ID_M;
2028 if (m_ent->vsi_count > 1)
2029 act |= ICE_LG_ACT_VSI_LIST;
2030 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2032 /* Second action descriptor type */
2033 act = ICE_LG_ACT_GENERIC;
2035 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2036 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2038 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2039 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2041 /* Third action Marker value */
2042 act |= ICE_LG_ACT_GENERIC;
2043 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2044 ICE_LG_ACT_GENERIC_VALUE_M;
2046 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2048 /* call the fill switch rule to fill the lookup Tx Rx structure */
2049 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2050 ice_aqc_opc_update_sw_rules);
2052 /* Update the action to point to the large action ID */
2053 rx_tx->pdata.lkup_tx_rx.act =
2054 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2055 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2056 ICE_SINGLE_ACT_PTR_VAL_M));
2058 /* Use the filter rule ID of the previously created rule with single
2059 * act. Once the update happens, hardware will treat this as large
2062 rx_tx->pdata.lkup_tx_rx.index =
2063 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2065 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2066 ice_aqc_opc_update_sw_rules, NULL);
2068 m_ent->lg_act_idx = l_id;
2069 m_ent->sw_marker_id = sw_marker;
2072 ice_free(hw, lg_act);
2077 * ice_add_counter_act - add/update filter rule with counter action
2078 * @hw: pointer to the hardware structure
2079 * @m_ent: the management entry for which counter needs to be added
2080 * @counter_id: VLAN counter ID returned as part of allocate resource
2081 * @l_id: large action resource ID
2083 static enum ice_status
2084 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2085 u16 counter_id, u16 l_id)
2087 struct ice_aqc_sw_rules_elem *lg_act;
2088 struct ice_aqc_sw_rules_elem *rx_tx;
2089 enum ice_status status;
2090 /* 2 actions will be added while adding a large action counter */
2091 const int num_acts = 2;
2098 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2099 return ICE_ERR_PARAM;
2101 /* Create two back-to-back switch rules and submit them to the HW using
2102 * one memory buffer:
2106 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2107 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2108 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2111 return ICE_ERR_NO_MEMORY;
2113 rx_tx = (struct ice_aqc_sw_rules_elem *)
2114 ((u8 *)lg_act + lg_act_size);
2116 /* Fill in the first switch rule i.e. large action */
2117 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2118 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2119 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2121 /* First action VSI forwarding or VSI list forwarding depending on how
2124 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2125 m_ent->fltr_info.fwd_id.hw_vsi_id;
2127 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2128 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2129 ICE_LG_ACT_VSI_LIST_ID_M;
2130 if (m_ent->vsi_count > 1)
2131 act |= ICE_LG_ACT_VSI_LIST;
2132 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2134 /* Second action counter ID */
2135 act = ICE_LG_ACT_STAT_COUNT;
2136 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2137 ICE_LG_ACT_STAT_COUNT_M;
2138 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2140 /* call the fill switch rule to fill the lookup Tx Rx structure */
2141 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2142 ice_aqc_opc_update_sw_rules);
2144 act = ICE_SINGLE_ACT_PTR;
2145 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2146 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2148 /* Use the filter rule ID of the previously created rule with single
2149 * act. Once the update happens, hardware will treat this as large
2152 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2153 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2155 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2156 ice_aqc_opc_update_sw_rules, NULL);
2158 m_ent->lg_act_idx = l_id;
2159 m_ent->counter_index = counter_id;
2162 ice_free(hw, lg_act);
2167 * ice_create_vsi_list_map
2168 * @hw: pointer to the hardware structure
2169 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2170 * @num_vsi: number of VSI handles in the array
2171 * @vsi_list_id: VSI list ID generated as part of allocate resource
2173 * Helper function to create a new entry of VSI list ID to VSI mapping
2174 * using the given VSI list ID
2176 static struct ice_vsi_list_map_info *
2177 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2180 struct ice_switch_info *sw = hw->switch_info;
2181 struct ice_vsi_list_map_info *v_map;
2184 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2189 v_map->vsi_list_id = vsi_list_id;
2191 for (i = 0; i < num_vsi; i++)
2192 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2194 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2199 * ice_update_vsi_list_rule
2200 * @hw: pointer to the hardware structure
2201 * @vsi_handle_arr: array of VSI handles to form a VSI list
2202 * @num_vsi: number of VSI handles in the array
2203 * @vsi_list_id: VSI list ID generated as part of allocate resource
2204 * @remove: Boolean value to indicate if this is a remove action
2205 * @opc: switch rules population command type - pass in the command opcode
2206 * @lkup_type: lookup type of the filter
2208 * Call AQ command to add a new switch rule or update existing switch rule
2209 * using the given VSI list ID
2211 static enum ice_status
2212 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2213 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2214 enum ice_sw_lkup_type lkup_type)
2216 struct ice_aqc_sw_rules_elem *s_rule;
2217 enum ice_status status;
2223 return ICE_ERR_PARAM;
2225 if (lkup_type == ICE_SW_LKUP_MAC ||
2226 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2227 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2228 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2229 lkup_type == ICE_SW_LKUP_PROMISC ||
2230 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2231 lkup_type == ICE_SW_LKUP_LAST)
2232 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2233 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2234 else if (lkup_type == ICE_SW_LKUP_VLAN)
2235 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2236 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2238 return ICE_ERR_PARAM;
2240 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2241 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2243 return ICE_ERR_NO_MEMORY;
2244 for (i = 0; i < num_vsi; i++) {
2245 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2246 status = ICE_ERR_PARAM;
2249 /* AQ call requires hw_vsi_id(s) */
2250 s_rule->pdata.vsi_list.vsi[i] =
2251 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2254 s_rule->type = CPU_TO_LE16(type);
2255 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2256 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2258 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2261 ice_free(hw, s_rule);
2266 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2267 * @hw: pointer to the HW struct
2268 * @vsi_handle_arr: array of VSI handles to form a VSI list
2269 * @num_vsi: number of VSI handles in the array
2270 * @vsi_list_id: stores the ID of the VSI list to be created
2271 * @lkup_type: switch rule filter's lookup type
2273 static enum ice_status
2274 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2275 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2277 enum ice_status status;
2279 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2280 ice_aqc_opc_alloc_res);
2284 /* Update the newly created VSI list to include the specified VSIs */
2285 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2286 *vsi_list_id, false,
2287 ice_aqc_opc_add_sw_rules, lkup_type);
2291 * ice_create_pkt_fwd_rule
2292 * @hw: pointer to the hardware structure
2293 * @f_entry: entry containing packet forwarding information
2295 * Create switch rule with given filter information and add an entry
2296 * to the corresponding filter management list to track this switch rule
2299 static enum ice_status
2300 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2301 struct ice_fltr_list_entry *f_entry)
2303 struct ice_fltr_mgmt_list_entry *fm_entry;
2304 struct ice_aqc_sw_rules_elem *s_rule;
2305 enum ice_sw_lkup_type l_type;
2306 struct ice_sw_recipe *recp;
2307 enum ice_status status;
2309 s_rule = (struct ice_aqc_sw_rules_elem *)
2310 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2312 return ICE_ERR_NO_MEMORY;
2313 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2314 ice_malloc(hw, sizeof(*fm_entry));
2316 status = ICE_ERR_NO_MEMORY;
2317 goto ice_create_pkt_fwd_rule_exit;
2320 fm_entry->fltr_info = f_entry->fltr_info;
2322 /* Initialize all the fields for the management entry */
2323 fm_entry->vsi_count = 1;
2324 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2325 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2326 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2328 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2329 ice_aqc_opc_add_sw_rules);
2331 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2332 ice_aqc_opc_add_sw_rules, NULL);
2334 ice_free(hw, fm_entry);
2335 goto ice_create_pkt_fwd_rule_exit;
2338 f_entry->fltr_info.fltr_rule_id =
2339 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2340 fm_entry->fltr_info.fltr_rule_id =
2341 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2343 /* The book keeping entries will get removed when base driver
2344 * calls remove filter AQ command
2346 l_type = fm_entry->fltr_info.lkup_type;
2347 recp = &hw->switch_info->recp_list[l_type];
2348 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2350 ice_create_pkt_fwd_rule_exit:
2351 ice_free(hw, s_rule);
2356 * ice_update_pkt_fwd_rule
2357 * @hw: pointer to the hardware structure
2358 * @f_info: filter information for switch rule
2360 * Call AQ command to update a previously created switch rule with a
2363 static enum ice_status
2364 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2366 struct ice_aqc_sw_rules_elem *s_rule;
2367 enum ice_status status;
2369 s_rule = (struct ice_aqc_sw_rules_elem *)
2370 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2372 return ICE_ERR_NO_MEMORY;
2374 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2376 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2378 /* Update switch rule with new rule set to forward VSI list */
2379 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2380 ice_aqc_opc_update_sw_rules, NULL);
2382 ice_free(hw, s_rule);
2387 * ice_update_sw_rule_bridge_mode
2388 * @hw: pointer to the HW struct
2390 * Updates unicast switch filter rules based on VEB/VEPA mode
2392 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2394 struct ice_switch_info *sw = hw->switch_info;
2395 struct ice_fltr_mgmt_list_entry *fm_entry;
2396 enum ice_status status = ICE_SUCCESS;
2397 struct LIST_HEAD_TYPE *rule_head;
2398 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2400 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2401 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2403 ice_acquire_lock(rule_lock);
2404 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2406 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2407 u8 *addr = fi->l_data.mac.mac_addr;
2409 /* Update unicast Tx rules to reflect the selected
2412 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2413 (fi->fltr_act == ICE_FWD_TO_VSI ||
2414 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2415 fi->fltr_act == ICE_FWD_TO_Q ||
2416 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2417 status = ice_update_pkt_fwd_rule(hw, fi);
2423 ice_release_lock(rule_lock);
2429 * ice_add_update_vsi_list
2430 * @hw: pointer to the hardware structure
2431 * @m_entry: pointer to current filter management list entry
2432 * @cur_fltr: filter information from the book keeping entry
2433 * @new_fltr: filter information with the new VSI to be added
2435 * Call AQ command to add or update previously created VSI list with new VSI.
2437 * Helper function to do book keeping associated with adding filter information
2438 * The algorithm to do the book keeping is described below :
2439 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2440 * if only one VSI has been added till now
2441 * Allocate a new VSI list and add two VSIs
2442 * to this list using switch rule command
2443 * Update the previously created switch rule with the
2444 * newly created VSI list ID
2445 * if a VSI list was previously created
2446 * Add the new VSI to the previously created VSI list set
2447 * using the update switch rule command
2449 static enum ice_status
2450 ice_add_update_vsi_list(struct ice_hw *hw,
2451 struct ice_fltr_mgmt_list_entry *m_entry,
2452 struct ice_fltr_info *cur_fltr,
2453 struct ice_fltr_info *new_fltr)
2455 enum ice_status status = ICE_SUCCESS;
2456 u16 vsi_list_id = 0;
2458 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2459 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2460 return ICE_ERR_NOT_IMPL;
2462 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2463 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2464 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2465 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2466 return ICE_ERR_NOT_IMPL;
2468 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2469 /* Only one entry existed in the mapping and it was not already
2470 * a part of a VSI list. So, create a VSI list with the old and
2473 struct ice_fltr_info tmp_fltr;
2474 u16 vsi_handle_arr[2];
2476 /* A rule already exists with the new VSI being added */
2477 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2478 return ICE_ERR_ALREADY_EXISTS;
2480 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2481 vsi_handle_arr[1] = new_fltr->vsi_handle;
2482 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2484 new_fltr->lkup_type);
2488 tmp_fltr = *new_fltr;
2489 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2490 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2491 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2492 /* Update the previous switch rule of "MAC forward to VSI" to
2493 * "MAC fwd to VSI list"
2495 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2499 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2500 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2501 m_entry->vsi_list_info =
2502 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2505 /* If this entry was large action then the large action needs
2506 * to be updated to point to FWD to VSI list
2508 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2510 ice_add_marker_act(hw, m_entry,
2511 m_entry->sw_marker_id,
2512 m_entry->lg_act_idx);
2514 u16 vsi_handle = new_fltr->vsi_handle;
2515 enum ice_adminq_opc opcode;
2517 if (!m_entry->vsi_list_info)
2520 /* A rule already exists with the new VSI being added */
2521 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2524 /* Update the previously created VSI list set with
2525 * the new VSI ID passed in
2527 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2528 opcode = ice_aqc_opc_update_sw_rules;
2530 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2531 vsi_list_id, false, opcode,
2532 new_fltr->lkup_type);
2533 /* update VSI list mapping info with new VSI ID */
2535 ice_set_bit(vsi_handle,
2536 m_entry->vsi_list_info->vsi_map);
2539 m_entry->vsi_count++;
2544 * ice_find_rule_entry - Search a rule entry
2545 * @hw: pointer to the hardware structure
2546 * @recp_id: lookup type for which the specified rule needs to be searched
2547 * @f_info: rule information
2549 * Helper function to search for a given rule entry
2550 * Returns pointer to entry storing the rule if found
2552 static struct ice_fltr_mgmt_list_entry *
2553 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2555 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2556 struct ice_switch_info *sw = hw->switch_info;
2557 struct LIST_HEAD_TYPE *list_head;
2559 list_head = &sw->recp_list[recp_id].filt_rules;
2560 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2562 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2563 sizeof(f_info->l_data)) &&
2564 f_info->flag == list_itr->fltr_info.flag) {
2573 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2574 * @hw: pointer to the hardware structure
2575 * @recp_id: lookup type for which VSI lists needs to be searched
2576 * @vsi_handle: VSI handle to be found in VSI list
2577 * @vsi_list_id: VSI list ID found containing vsi_handle
2579 * Helper function to search a VSI list with single entry containing given VSI
2580 * handle element. This can be extended further to search VSI list with more
2581 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2583 static struct ice_vsi_list_map_info *
2584 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2587 struct ice_vsi_list_map_info *map_info = NULL;
2588 struct ice_switch_info *sw = hw->switch_info;
2589 struct LIST_HEAD_TYPE *list_head;
2591 list_head = &sw->recp_list[recp_id].filt_rules;
2592 if (sw->recp_list[recp_id].adv_rule) {
2593 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2595 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2596 ice_adv_fltr_mgmt_list_entry,
2598 if (list_itr->vsi_list_info) {
2599 map_info = list_itr->vsi_list_info;
2600 if (ice_is_bit_set(map_info->vsi_map,
2602 *vsi_list_id = map_info->vsi_list_id;
2608 struct ice_fltr_mgmt_list_entry *list_itr;
2610 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2611 ice_fltr_mgmt_list_entry,
2613 if (list_itr->vsi_count == 1 &&
2614 list_itr->vsi_list_info) {
2615 map_info = list_itr->vsi_list_info;
2616 if (ice_is_bit_set(map_info->vsi_map,
2618 *vsi_list_id = map_info->vsi_list_id;
2628 * ice_add_rule_internal - add rule for a given lookup type
2629 * @hw: pointer to the hardware structure
2630 * @recp_id: lookup type (recipe ID) for which rule has to be added
2631 * @f_entry: structure containing MAC forwarding information
2633 * Adds or updates the rule lists for a given recipe
2635 static enum ice_status
2636 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2637 struct ice_fltr_list_entry *f_entry)
2639 struct ice_switch_info *sw = hw->switch_info;
2640 struct ice_fltr_info *new_fltr, *cur_fltr;
2641 struct ice_fltr_mgmt_list_entry *m_entry;
2642 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2643 enum ice_status status = ICE_SUCCESS;
2645 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2646 return ICE_ERR_PARAM;
2648 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2649 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2650 f_entry->fltr_info.fwd_id.hw_vsi_id =
2651 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2653 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2655 ice_acquire_lock(rule_lock);
2656 new_fltr = &f_entry->fltr_info;
2657 if (new_fltr->flag & ICE_FLTR_RX)
2658 new_fltr->src = hw->port_info->lport;
2659 else if (new_fltr->flag & ICE_FLTR_TX)
2661 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2663 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2665 status = ice_create_pkt_fwd_rule(hw, f_entry);
2666 goto exit_add_rule_internal;
2669 cur_fltr = &m_entry->fltr_info;
2670 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2672 exit_add_rule_internal:
2673 ice_release_lock(rule_lock);
2678 * ice_remove_vsi_list_rule
2679 * @hw: pointer to the hardware structure
2680 * @vsi_list_id: VSI list ID generated as part of allocate resource
2681 * @lkup_type: switch rule filter lookup type
2683 * The VSI list should be emptied before this function is called to remove the
2686 static enum ice_status
2687 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2688 enum ice_sw_lkup_type lkup_type)
2690 struct ice_aqc_sw_rules_elem *s_rule;
2691 enum ice_status status;
2694 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2695 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2697 return ICE_ERR_NO_MEMORY;
2699 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2700 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2702 /* Free the vsi_list resource that we allocated. It is assumed that the
2703 * list is empty at this point.
2705 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2706 ice_aqc_opc_free_res);
2708 ice_free(hw, s_rule);
2713 * ice_rem_update_vsi_list
2714 * @hw: pointer to the hardware structure
2715 * @vsi_handle: VSI handle of the VSI to remove
2716 * @fm_list: filter management entry for which the VSI list management needs to
2719 static enum ice_status
2720 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2721 struct ice_fltr_mgmt_list_entry *fm_list)
2723 enum ice_sw_lkup_type lkup_type;
2724 enum ice_status status = ICE_SUCCESS;
2727 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2728 fm_list->vsi_count == 0)
2729 return ICE_ERR_PARAM;
2731 /* A rule with the VSI being removed does not exist */
2732 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2733 return ICE_ERR_DOES_NOT_EXIST;
2735 lkup_type = fm_list->fltr_info.lkup_type;
2736 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2737 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2738 ice_aqc_opc_update_sw_rules,
2743 fm_list->vsi_count--;
2744 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2746 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2747 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2748 struct ice_vsi_list_map_info *vsi_list_info =
2749 fm_list->vsi_list_info;
2752 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2754 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2755 return ICE_ERR_OUT_OF_RANGE;
2757 /* Make sure VSI list is empty before removing it below */
2758 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2760 ice_aqc_opc_update_sw_rules,
2765 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2766 tmp_fltr_info.fwd_id.hw_vsi_id =
2767 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2768 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2769 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2771 ice_debug(hw, ICE_DBG_SW,
2772 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2773 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2777 fm_list->fltr_info = tmp_fltr_info;
2780 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2781 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2782 struct ice_vsi_list_map_info *vsi_list_info =
2783 fm_list->vsi_list_info;
2785 /* Remove the VSI list since it is no longer used */
2786 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2788 ice_debug(hw, ICE_DBG_SW,
2789 "Failed to remove VSI list %d, error %d\n",
2790 vsi_list_id, status);
2794 LIST_DEL(&vsi_list_info->list_entry);
2795 ice_free(hw, vsi_list_info);
2796 fm_list->vsi_list_info = NULL;
2803 * ice_remove_rule_internal - Remove a filter rule of a given type
2805 * @hw: pointer to the hardware structure
2806 * @recp_id: recipe ID for which the rule needs to removed
2807 * @f_entry: rule entry containing filter information
2809 static enum ice_status
2810 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2811 struct ice_fltr_list_entry *f_entry)
2813 struct ice_switch_info *sw = hw->switch_info;
2814 struct ice_fltr_mgmt_list_entry *list_elem;
2815 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2816 enum ice_status status = ICE_SUCCESS;
2817 bool remove_rule = false;
2820 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2821 return ICE_ERR_PARAM;
2822 f_entry->fltr_info.fwd_id.hw_vsi_id =
2823 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2825 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2826 ice_acquire_lock(rule_lock);
2827 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2829 status = ICE_ERR_DOES_NOT_EXIST;
2833 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2835 } else if (!list_elem->vsi_list_info) {
2836 status = ICE_ERR_DOES_NOT_EXIST;
2838 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2839 /* a ref_cnt > 1 indicates that the vsi_list is being
2840 * shared by multiple rules. Decrement the ref_cnt and
2841 * remove this rule, but do not modify the list, as it
2842 * is in-use by other rules.
2844 list_elem->vsi_list_info->ref_cnt--;
2847 /* a ref_cnt of 1 indicates the vsi_list is only used
2848 * by one rule. However, the original removal request is only
2849 * for a single VSI. Update the vsi_list first, and only
2850 * remove the rule if there are no further VSIs in this list.
2852 vsi_handle = f_entry->fltr_info.vsi_handle;
2853 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2856 /* if VSI count goes to zero after updating the VSI list */
2857 if (list_elem->vsi_count == 0)
2862 /* Remove the lookup rule */
2863 struct ice_aqc_sw_rules_elem *s_rule;
2865 s_rule = (struct ice_aqc_sw_rules_elem *)
2866 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2868 status = ICE_ERR_NO_MEMORY;
2872 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2873 ice_aqc_opc_remove_sw_rules);
2875 status = ice_aq_sw_rules(hw, s_rule,
2876 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2877 ice_aqc_opc_remove_sw_rules, NULL);
2879 /* Remove a book keeping from the list */
2880 ice_free(hw, s_rule);
2885 LIST_DEL(&list_elem->list_entry);
2886 ice_free(hw, list_elem);
2889 ice_release_lock(rule_lock);
2894 * ice_aq_get_res_alloc - get allocated resources
2895 * @hw: pointer to the HW struct
2896 * @num_entries: pointer to u16 to store the number of resource entries returned
2897 * @buf: pointer to user-supplied buffer
2898 * @buf_size: size of buff
2899 * @cd: pointer to command details structure or NULL
2901 * The user-supplied buffer must be large enough to store the resource
2902 * information for all resource types. Each resource type is an
2903 * ice_aqc_get_res_resp_data_elem structure.
2906 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2907 u16 buf_size, struct ice_sq_cd *cd)
2909 struct ice_aqc_get_res_alloc *resp;
2910 enum ice_status status;
2911 struct ice_aq_desc desc;
2914 return ICE_ERR_BAD_PTR;
2916 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2917 return ICE_ERR_INVAL_SIZE;
2919 resp = &desc.params.get_res;
2921 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2922 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2924 if (!status && num_entries)
2925 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2931 * ice_aq_get_res_descs - get allocated resource descriptors
2932 * @hw: pointer to the hardware structure
2933 * @num_entries: number of resource entries in buffer
2934 * @buf: Indirect buffer to hold data parameters and response
2935 * @buf_size: size of buffer for indirect commands
2936 * @res_type: resource type
2937 * @res_shared: is resource shared
2938 * @desc_id: input - first desc ID to start; output - next desc ID
2939 * @cd: pointer to command details structure or NULL
2942 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2943 struct ice_aqc_get_allocd_res_desc_resp *buf,
2944 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2945 struct ice_sq_cd *cd)
2947 struct ice_aqc_get_allocd_res_desc *cmd;
2948 struct ice_aq_desc desc;
2949 enum ice_status status;
2951 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2953 cmd = &desc.params.get_res_desc;
2956 return ICE_ERR_PARAM;
2958 if (buf_size != (num_entries * sizeof(*buf)))
2959 return ICE_ERR_PARAM;
2961 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2963 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2964 ICE_AQC_RES_TYPE_M) | (res_shared ?
2965 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2966 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2968 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2970 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2976 * ice_add_mac - Add a MAC address based filter rule
2977 * @hw: pointer to the hardware structure
2978 * @m_list: list of MAC addresses and forwarding information
2980 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2981 * multiple unicast addresses, the function assumes that all the
2982 * addresses are unique in a given add_mac call. It doesn't
2983 * check for duplicates in this case, removing duplicates from a given
2984 * list should be taken care of in the caller of this function.
2987 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2989 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2990 struct ice_fltr_list_entry *m_list_itr;
2991 struct LIST_HEAD_TYPE *rule_head;
2992 u16 elem_sent, total_elem_left;
2993 struct ice_switch_info *sw;
2994 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2995 enum ice_status status = ICE_SUCCESS;
2996 u16 num_unicast = 0;
3000 return ICE_ERR_PARAM;
3002 sw = hw->switch_info;
3003 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3004 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3006 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3010 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3011 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3012 if (!ice_is_vsi_valid(hw, vsi_handle))
3013 return ICE_ERR_PARAM;
3014 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3015 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3016 /* update the src in case it is VSI num */
3017 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3018 return ICE_ERR_PARAM;
3019 m_list_itr->fltr_info.src = hw_vsi_id;
3020 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3021 IS_ZERO_ETHER_ADDR(add))
3022 return ICE_ERR_PARAM;
3023 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3024 /* Don't overwrite the unicast address */
3025 ice_acquire_lock(rule_lock);
3026 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3027 &m_list_itr->fltr_info)) {
3028 ice_release_lock(rule_lock);
3029 return ICE_ERR_ALREADY_EXISTS;
3031 ice_release_lock(rule_lock);
3033 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3034 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3035 m_list_itr->status =
3036 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3038 if (m_list_itr->status)
3039 return m_list_itr->status;
3043 ice_acquire_lock(rule_lock);
3044 /* Exit if no suitable entries were found for adding bulk switch rule */
3046 status = ICE_SUCCESS;
3047 goto ice_add_mac_exit;
3050 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3052 /* Allocate switch rule buffer for the bulk update for unicast */
3053 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3054 s_rule = (struct ice_aqc_sw_rules_elem *)
3055 ice_calloc(hw, num_unicast, s_rule_size);
3057 status = ICE_ERR_NO_MEMORY;
3058 goto ice_add_mac_exit;
3062 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3064 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3065 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3067 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3068 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3069 ice_aqc_opc_add_sw_rules);
3070 r_iter = (struct ice_aqc_sw_rules_elem *)
3071 ((u8 *)r_iter + s_rule_size);
3075 /* Call AQ bulk switch rule update for all unicast addresses */
3077 /* Call AQ switch rule in AQ_MAX chunk */
3078 for (total_elem_left = num_unicast; total_elem_left > 0;
3079 total_elem_left -= elem_sent) {
3080 struct ice_aqc_sw_rules_elem *entry = r_iter;
3082 elem_sent = min(total_elem_left,
3083 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3084 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3085 elem_sent, ice_aqc_opc_add_sw_rules,
3088 goto ice_add_mac_exit;
3089 r_iter = (struct ice_aqc_sw_rules_elem *)
3090 ((u8 *)r_iter + (elem_sent * s_rule_size));
3093 /* Fill up rule ID based on the value returned from FW */
3095 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3097 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3098 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3099 struct ice_fltr_mgmt_list_entry *fm_entry;
3101 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3102 f_info->fltr_rule_id =
3103 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3104 f_info->fltr_act = ICE_FWD_TO_VSI;
3105 /* Create an entry to track this MAC address */
3106 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3107 ice_malloc(hw, sizeof(*fm_entry));
3109 status = ICE_ERR_NO_MEMORY;
3110 goto ice_add_mac_exit;
3112 fm_entry->fltr_info = *f_info;
3113 fm_entry->vsi_count = 1;
3114 /* The book keeping entries will get removed when
3115 * base driver calls remove filter AQ command
3118 LIST_ADD(&fm_entry->list_entry, rule_head);
3119 r_iter = (struct ice_aqc_sw_rules_elem *)
3120 ((u8 *)r_iter + s_rule_size);
3125 ice_release_lock(rule_lock);
3127 ice_free(hw, s_rule);
3132 * ice_add_vlan_internal - Add one VLAN based filter rule
3133 * @hw: pointer to the hardware structure
3134 * @f_entry: filter entry containing one VLAN information
3136 static enum ice_status
3137 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3139 struct ice_switch_info *sw = hw->switch_info;
3140 struct ice_fltr_mgmt_list_entry *v_list_itr;
3141 struct ice_fltr_info *new_fltr, *cur_fltr;
3142 enum ice_sw_lkup_type lkup_type;
3143 u16 vsi_list_id = 0, vsi_handle;
3144 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3145 enum ice_status status = ICE_SUCCESS;
3147 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3148 return ICE_ERR_PARAM;
3150 f_entry->fltr_info.fwd_id.hw_vsi_id =
3151 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3152 new_fltr = &f_entry->fltr_info;
3154 /* VLAN ID should only be 12 bits */
3155 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3156 return ICE_ERR_PARAM;
3158 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3159 return ICE_ERR_PARAM;
3161 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3162 lkup_type = new_fltr->lkup_type;
3163 vsi_handle = new_fltr->vsi_handle;
3164 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3165 ice_acquire_lock(rule_lock);
3166 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3168 struct ice_vsi_list_map_info *map_info = NULL;
3170 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3171 /* All VLAN pruning rules use a VSI list. Check if
3172 * there is already a VSI list containing VSI that we
3173 * want to add. If found, use the same vsi_list_id for
3174 * this new VLAN rule or else create a new list.
3176 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3180 status = ice_create_vsi_list_rule(hw,
3188 /* Convert the action to forwarding to a VSI list. */
3189 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3190 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3193 status = ice_create_pkt_fwd_rule(hw, f_entry);
3195 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3198 status = ICE_ERR_DOES_NOT_EXIST;
3201 /* reuse VSI list for new rule and increment ref_cnt */
3203 v_list_itr->vsi_list_info = map_info;
3204 map_info->ref_cnt++;
3206 v_list_itr->vsi_list_info =
3207 ice_create_vsi_list_map(hw, &vsi_handle,
3211 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3212 /* Update existing VSI list to add new VSI ID only if it used
3215 cur_fltr = &v_list_itr->fltr_info;
3216 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3219 /* If VLAN rule exists and VSI list being used by this rule is
3220 * referenced by more than 1 VLAN rule. Then create a new VSI
3221 * list appending previous VSI with new VSI and update existing
3222 * VLAN rule to point to new VSI list ID
3224 struct ice_fltr_info tmp_fltr;
3225 u16 vsi_handle_arr[2];
3228 /* Current implementation only supports reusing VSI list with
3229 * one VSI count. We should never hit below condition
3231 if (v_list_itr->vsi_count > 1 &&
3232 v_list_itr->vsi_list_info->ref_cnt > 1) {
3233 ice_debug(hw, ICE_DBG_SW,
3234 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3235 status = ICE_ERR_CFG;
3240 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3243 /* A rule already exists with the new VSI being added */
3244 if (cur_handle == vsi_handle) {
3245 status = ICE_ERR_ALREADY_EXISTS;
3249 vsi_handle_arr[0] = cur_handle;
3250 vsi_handle_arr[1] = vsi_handle;
3251 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3252 &vsi_list_id, lkup_type);
3256 tmp_fltr = v_list_itr->fltr_info;
3257 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3258 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3259 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3260 /* Update the previous switch rule to a new VSI list which
3261 * includes current VSI that is requested
3263 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3267 /* before overriding VSI list map info. decrement ref_cnt of
3270 v_list_itr->vsi_list_info->ref_cnt--;
3272 /* now update to newly created list */
3273 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3274 v_list_itr->vsi_list_info =
3275 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3277 v_list_itr->vsi_count++;
3281 ice_release_lock(rule_lock);
3286 * ice_add_vlan - Add VLAN based filter rule
3287 * @hw: pointer to the hardware structure
3288 * @v_list: list of VLAN entries and forwarding information
3291 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3293 struct ice_fltr_list_entry *v_list_itr;
3296 return ICE_ERR_PARAM;
3298 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3300 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3301 return ICE_ERR_PARAM;
3302 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3303 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3304 if (v_list_itr->status)
3305 return v_list_itr->status;
3311 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3312 * @hw: pointer to the hardware structure
3313 * @mv_list: list of MAC and VLAN filters
3315 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3316 * pruning bits enabled, then it is the responsibility of the caller to make
3317 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3318 * VLAN won't be received on that VSI otherwise.
3321 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3323 struct ice_fltr_list_entry *mv_list_itr;
3325 if (!mv_list || !hw)
3326 return ICE_ERR_PARAM;
3328 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3330 enum ice_sw_lkup_type l_type =
3331 mv_list_itr->fltr_info.lkup_type;
3333 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3334 return ICE_ERR_PARAM;
3335 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3336 mv_list_itr->status =
3337 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3339 if (mv_list_itr->status)
3340 return mv_list_itr->status;
3346 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3347 * @hw: pointer to the hardware structure
3348 * @em_list: list of ether type MAC filter, MAC is optional
3350 * This function requires the caller to populate the entries in
3351 * the filter list with the necessary fields (including flags to
3352 * indicate Tx or Rx rules).
3355 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3357 struct ice_fltr_list_entry *em_list_itr;
3359 if (!em_list || !hw)
3360 return ICE_ERR_PARAM;
3362 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3364 enum ice_sw_lkup_type l_type =
3365 em_list_itr->fltr_info.lkup_type;
3367 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3368 l_type != ICE_SW_LKUP_ETHERTYPE)
3369 return ICE_ERR_PARAM;
3371 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3373 if (em_list_itr->status)
3374 return em_list_itr->status;
3380 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3381 * @hw: pointer to the hardware structure
3382 * @em_list: list of ethertype or ethertype MAC entries
3385 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3387 struct ice_fltr_list_entry *em_list_itr, *tmp;
3389 if (!em_list || !hw)
3390 return ICE_ERR_PARAM;
3392 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3394 enum ice_sw_lkup_type l_type =
3395 em_list_itr->fltr_info.lkup_type;
3397 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3398 l_type != ICE_SW_LKUP_ETHERTYPE)
3399 return ICE_ERR_PARAM;
3401 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3403 if (em_list_itr->status)
3404 return em_list_itr->status;
3410 * ice_rem_sw_rule_info
3411 * @hw: pointer to the hardware structure
3412 * @rule_head: pointer to the switch list structure that we want to delete
3415 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3417 if (!LIST_EMPTY(rule_head)) {
3418 struct ice_fltr_mgmt_list_entry *entry;
3419 struct ice_fltr_mgmt_list_entry *tmp;
3421 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3422 ice_fltr_mgmt_list_entry, list_entry) {
3423 LIST_DEL(&entry->list_entry);
3424 ice_free(hw, entry);
3430 * ice_rem_adv_rule_info
3431 * @hw: pointer to the hardware structure
3432 * @rule_head: pointer to the switch list structure that we want to delete
3435 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3437 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3438 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3440 if (LIST_EMPTY(rule_head))
3443 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3444 ice_adv_fltr_mgmt_list_entry, list_entry) {
3445 LIST_DEL(&lst_itr->list_entry);
3446 ice_free(hw, lst_itr->lkups);
3447 ice_free(hw, lst_itr);
3452 * ice_rem_all_sw_rules_info
3453 * @hw: pointer to the hardware structure
3455 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3457 struct ice_switch_info *sw = hw->switch_info;
3460 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3461 struct LIST_HEAD_TYPE *rule_head;
3463 rule_head = &sw->recp_list[i].filt_rules;
3464 if (!sw->recp_list[i].adv_rule)
3465 ice_rem_sw_rule_info(hw, rule_head);
3467 ice_rem_adv_rule_info(hw, rule_head);
3472 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3473 * @pi: pointer to the port_info structure
3474 * @vsi_handle: VSI handle to set as default
3475 * @set: true to add the above mentioned switch rule, false to remove it
3476 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3478 * add filter rule to set/unset given VSI as default VSI for the switch
3479 * (represented by swid)
3482 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3485 struct ice_aqc_sw_rules_elem *s_rule;
3486 struct ice_fltr_info f_info;
3487 struct ice_hw *hw = pi->hw;
3488 enum ice_adminq_opc opcode;
3489 enum ice_status status;
3493 if (!ice_is_vsi_valid(hw, vsi_handle))
3494 return ICE_ERR_PARAM;
3495 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3497 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3498 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3499 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3501 return ICE_ERR_NO_MEMORY;
3503 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3505 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3506 f_info.flag = direction;
3507 f_info.fltr_act = ICE_FWD_TO_VSI;
3508 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3510 if (f_info.flag & ICE_FLTR_RX) {
3511 f_info.src = pi->lport;
3512 f_info.src_id = ICE_SRC_ID_LPORT;
3514 f_info.fltr_rule_id =
3515 pi->dflt_rx_vsi_rule_id;
3516 } else if (f_info.flag & ICE_FLTR_TX) {
3517 f_info.src_id = ICE_SRC_ID_VSI;
3518 f_info.src = hw_vsi_id;
3520 f_info.fltr_rule_id =
3521 pi->dflt_tx_vsi_rule_id;
3525 opcode = ice_aqc_opc_add_sw_rules;
3527 opcode = ice_aqc_opc_remove_sw_rules;
3529 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3531 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3532 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3535 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3537 if (f_info.flag & ICE_FLTR_TX) {
3538 pi->dflt_tx_vsi_num = hw_vsi_id;
3539 pi->dflt_tx_vsi_rule_id = index;
3540 } else if (f_info.flag & ICE_FLTR_RX) {
3541 pi->dflt_rx_vsi_num = hw_vsi_id;
3542 pi->dflt_rx_vsi_rule_id = index;
3545 if (f_info.flag & ICE_FLTR_TX) {
3546 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3547 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3548 } else if (f_info.flag & ICE_FLTR_RX) {
3549 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3550 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3555 ice_free(hw, s_rule);
3560 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3561 * @hw: pointer to the hardware structure
3562 * @recp_id: lookup type for which the specified rule needs to be searched
3563 * @f_info: rule information
3565 * Helper function to search for a unicast rule entry - this is to be used
3566 * to remove unicast MAC filter that is not shared with other VSIs on the
3569 * Returns pointer to entry storing the rule if found
3571 static struct ice_fltr_mgmt_list_entry *
3572 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3573 struct ice_fltr_info *f_info)
3575 struct ice_switch_info *sw = hw->switch_info;
3576 struct ice_fltr_mgmt_list_entry *list_itr;
3577 struct LIST_HEAD_TYPE *list_head;
3579 list_head = &sw->recp_list[recp_id].filt_rules;
3580 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3582 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3583 sizeof(f_info->l_data)) &&
3584 f_info->fwd_id.hw_vsi_id ==
3585 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3586 f_info->flag == list_itr->fltr_info.flag)
3593 * ice_remove_mac - remove a MAC address based filter rule
3594 * @hw: pointer to the hardware structure
3595 * @m_list: list of MAC addresses and forwarding information
3597 * This function removes either a MAC filter rule or a specific VSI from a
3598 * VSI list for a multicast MAC address.
3600 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3601 * ice_add_mac. Caller should be aware that this call will only work if all
3602 * the entries passed into m_list were added previously. It will not attempt to
3603 * do a partial remove of entries that were found.
3606 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3608 struct ice_fltr_list_entry *list_itr, *tmp;
3609 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3612 return ICE_ERR_PARAM;
3614 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3615 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3617 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3618 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3621 if (l_type != ICE_SW_LKUP_MAC)
3622 return ICE_ERR_PARAM;
3624 vsi_handle = list_itr->fltr_info.vsi_handle;
3625 if (!ice_is_vsi_valid(hw, vsi_handle))
3626 return ICE_ERR_PARAM;
3628 list_itr->fltr_info.fwd_id.hw_vsi_id =
3629 ice_get_hw_vsi_num(hw, vsi_handle);
3630 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3631 /* Don't remove the unicast address that belongs to
3632 * another VSI on the switch, since it is not being
3635 ice_acquire_lock(rule_lock);
3636 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3637 &list_itr->fltr_info)) {
3638 ice_release_lock(rule_lock);
3639 return ICE_ERR_DOES_NOT_EXIST;
3641 ice_release_lock(rule_lock);
3643 list_itr->status = ice_remove_rule_internal(hw,
3646 if (list_itr->status)
3647 return list_itr->status;
3653 * ice_remove_vlan - Remove VLAN based filter rule
3654 * @hw: pointer to the hardware structure
3655 * @v_list: list of VLAN entries and forwarding information
3658 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3660 struct ice_fltr_list_entry *v_list_itr, *tmp;
3663 return ICE_ERR_PARAM;
3665 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3667 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3669 if (l_type != ICE_SW_LKUP_VLAN)
3670 return ICE_ERR_PARAM;
3671 v_list_itr->status = ice_remove_rule_internal(hw,
3674 if (v_list_itr->status)
3675 return v_list_itr->status;
3681 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3682 * @hw: pointer to the hardware structure
3683 * @v_list: list of MAC VLAN entries and forwarding information
3686 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3688 struct ice_fltr_list_entry *v_list_itr, *tmp;
3691 return ICE_ERR_PARAM;
3693 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3695 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3697 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3698 return ICE_ERR_PARAM;
3699 v_list_itr->status =
3700 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3702 if (v_list_itr->status)
3703 return v_list_itr->status;
3709 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3710 * @fm_entry: filter entry to inspect
3711 * @vsi_handle: VSI handle to compare with filter info
3714 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3716 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3717 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3718 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3719 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3724 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3725 * @hw: pointer to the hardware structure
3726 * @vsi_handle: VSI handle to remove filters from
3727 * @vsi_list_head: pointer to the list to add entry to
3728 * @fi: pointer to fltr_info of filter entry to copy & add
3730 * Helper function, used when creating a list of filters to remove from
3731 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3732 * original filter entry, with the exception of fltr_info.fltr_act and
3733 * fltr_info.fwd_id fields. These are set such that later logic can
3734 * extract which VSI to remove the fltr from, and pass on that information.
3736 static enum ice_status
3737 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3738 struct LIST_HEAD_TYPE *vsi_list_head,
3739 struct ice_fltr_info *fi)
3741 struct ice_fltr_list_entry *tmp;
3743 /* this memory is freed up in the caller function
3744 * once filters for this VSI are removed
3746 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3748 return ICE_ERR_NO_MEMORY;
3750 tmp->fltr_info = *fi;
3752 /* Overwrite these fields to indicate which VSI to remove filter from,
3753 * so find and remove logic can extract the information from the
3754 * list entries. Note that original entries will still have proper
3757 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3758 tmp->fltr_info.vsi_handle = vsi_handle;
3759 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3761 LIST_ADD(&tmp->list_entry, vsi_list_head);
3767 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3768 * @hw: pointer to the hardware structure
3769 * @vsi_handle: VSI handle to remove filters from
3770 * @lkup_list_head: pointer to the list that has certain lookup type filters
3771 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3773 * Locates all filters in lkup_list_head that are used by the given VSI,
3774 * and adds COPIES of those entries to vsi_list_head (intended to be used
3775 * to remove the listed filters).
3776 * Note that this means all entries in vsi_list_head must be explicitly
3777 * deallocated by the caller when done with list.
3779 static enum ice_status
3780 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3781 struct LIST_HEAD_TYPE *lkup_list_head,
3782 struct LIST_HEAD_TYPE *vsi_list_head)
3784 struct ice_fltr_mgmt_list_entry *fm_entry;
3785 enum ice_status status = ICE_SUCCESS;
3787 /* check to make sure VSI ID is valid and within boundary */
3788 if (!ice_is_vsi_valid(hw, vsi_handle))
3789 return ICE_ERR_PARAM;
3791 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3792 ice_fltr_mgmt_list_entry, list_entry) {
3793 struct ice_fltr_info *fi;
3795 fi = &fm_entry->fltr_info;
3796 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3799 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3808 * ice_determine_promisc_mask
3809 * @fi: filter info to parse
3811 * Helper function to determine which ICE_PROMISC_ mask corresponds
3812 * to given filter into.
3814 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3816 u16 vid = fi->l_data.mac_vlan.vlan_id;
3817 u8 *macaddr = fi->l_data.mac.mac_addr;
3818 bool is_tx_fltr = false;
3819 u8 promisc_mask = 0;
3821 if (fi->flag == ICE_FLTR_TX)
3824 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3825 promisc_mask |= is_tx_fltr ?
3826 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3827 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3828 promisc_mask |= is_tx_fltr ?
3829 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3830 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3831 promisc_mask |= is_tx_fltr ?
3832 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3834 promisc_mask |= is_tx_fltr ?
3835 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3837 return promisc_mask;
3841 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3842 * @hw: pointer to the hardware structure
3843 * @vsi_handle: VSI handle to retrieve info from
3844 * @promisc_mask: pointer to mask to be filled in
3845 * @vid: VLAN ID of promisc VLAN VSI
3848 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3851 struct ice_switch_info *sw = hw->switch_info;
3852 struct ice_fltr_mgmt_list_entry *itr;
3853 struct LIST_HEAD_TYPE *rule_head;
3854 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3856 if (!ice_is_vsi_valid(hw, vsi_handle))
3857 return ICE_ERR_PARAM;
3861 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3862 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3864 ice_acquire_lock(rule_lock);
3865 LIST_FOR_EACH_ENTRY(itr, rule_head,
3866 ice_fltr_mgmt_list_entry, list_entry) {
3867 /* Continue if this filter doesn't apply to this VSI or the
3868 * VSI ID is not in the VSI map for this filter
3870 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3873 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3875 ice_release_lock(rule_lock);
3881 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3882 * @hw: pointer to the hardware structure
3883 * @vsi_handle: VSI handle to retrieve info from
3884 * @promisc_mask: pointer to mask to be filled in
3885 * @vid: VLAN ID of promisc VLAN VSI
3888 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3891 struct ice_switch_info *sw = hw->switch_info;
3892 struct ice_fltr_mgmt_list_entry *itr;
3893 struct LIST_HEAD_TYPE *rule_head;
3894 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3896 if (!ice_is_vsi_valid(hw, vsi_handle))
3897 return ICE_ERR_PARAM;
3901 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3902 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3904 ice_acquire_lock(rule_lock);
3905 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3907 /* Continue if this filter doesn't apply to this VSI or the
3908 * VSI ID is not in the VSI map for this filter
3910 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3913 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3915 ice_release_lock(rule_lock);
3921 * ice_remove_promisc - Remove promisc based filter rules
3922 * @hw: pointer to the hardware structure
3923 * @recp_id: recipe ID for which the rule needs to removed
3924 * @v_list: list of promisc entries
3926 static enum ice_status
3927 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3928 struct LIST_HEAD_TYPE *v_list)
3930 struct ice_fltr_list_entry *v_list_itr, *tmp;
3932 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3934 v_list_itr->status =
3935 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3936 if (v_list_itr->status)
3937 return v_list_itr->status;
3943 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3944 * @hw: pointer to the hardware structure
3945 * @vsi_handle: VSI handle to clear mode
3946 * @promisc_mask: mask of promiscuous config bits to clear
3947 * @vid: VLAN ID to clear VLAN promiscuous
3950 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3953 struct ice_switch_info *sw = hw->switch_info;
3954 struct ice_fltr_list_entry *fm_entry, *tmp;
3955 struct LIST_HEAD_TYPE remove_list_head;
3956 struct ice_fltr_mgmt_list_entry *itr;
3957 struct LIST_HEAD_TYPE *rule_head;
3958 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3959 enum ice_status status = ICE_SUCCESS;
3962 if (!ice_is_vsi_valid(hw, vsi_handle))
3963 return ICE_ERR_PARAM;
3965 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3966 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3968 recipe_id = ICE_SW_LKUP_PROMISC;
3970 rule_head = &sw->recp_list[recipe_id].filt_rules;
3971 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3973 INIT_LIST_HEAD(&remove_list_head);
3975 ice_acquire_lock(rule_lock);
3976 LIST_FOR_EACH_ENTRY(itr, rule_head,
3977 ice_fltr_mgmt_list_entry, list_entry) {
3978 struct ice_fltr_info *fltr_info;
3979 u8 fltr_promisc_mask = 0;
3981 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3983 fltr_info = &itr->fltr_info;
3985 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3986 vid != fltr_info->l_data.mac_vlan.vlan_id)
3989 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3991 /* Skip if filter is not completely specified by given mask */
3992 if (fltr_promisc_mask & ~promisc_mask)
3995 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3999 ice_release_lock(rule_lock);
4000 goto free_fltr_list;
4003 ice_release_lock(rule_lock);
4005 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4008 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4009 ice_fltr_list_entry, list_entry) {
4010 LIST_DEL(&fm_entry->list_entry);
4011 ice_free(hw, fm_entry);
4018 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4019 * @hw: pointer to the hardware structure
4020 * @vsi_handle: VSI handle to configure
4021 * @promisc_mask: mask of promiscuous config bits
4022 * @vid: VLAN ID to set VLAN promiscuous
4025 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4027 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4028 struct ice_fltr_list_entry f_list_entry;
4029 struct ice_fltr_info new_fltr;
4030 enum ice_status status = ICE_SUCCESS;
4036 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4038 if (!ice_is_vsi_valid(hw, vsi_handle))
4039 return ICE_ERR_PARAM;
4040 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4042 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4044 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4045 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4046 new_fltr.l_data.mac_vlan.vlan_id = vid;
4047 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4049 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4050 recipe_id = ICE_SW_LKUP_PROMISC;
4053 /* Separate filters must be set for each direction/packet type
4054 * combination, so we will loop over the mask value, store the
4055 * individual type, and clear it out in the input mask as it
4058 while (promisc_mask) {
4064 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4065 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4066 pkt_type = UCAST_FLTR;
4067 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4068 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4069 pkt_type = UCAST_FLTR;
4071 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4072 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4073 pkt_type = MCAST_FLTR;
4074 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4075 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4076 pkt_type = MCAST_FLTR;
4078 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4079 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4080 pkt_type = BCAST_FLTR;
4081 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4082 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4083 pkt_type = BCAST_FLTR;
4087 /* Check for VLAN promiscuous flag */
4088 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4089 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4090 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4091 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4095 /* Set filter DA based on packet type */
4096 mac_addr = new_fltr.l_data.mac.mac_addr;
4097 if (pkt_type == BCAST_FLTR) {
4098 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4099 } else if (pkt_type == MCAST_FLTR ||
4100 pkt_type == UCAST_FLTR) {
4101 /* Use the dummy ether header DA */
4102 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4103 ICE_NONDMA_TO_NONDMA);
4104 if (pkt_type == MCAST_FLTR)
4105 mac_addr[0] |= 0x1; /* Set multicast bit */
4108 /* Need to reset this to zero for all iterations */
4111 new_fltr.flag |= ICE_FLTR_TX;
4112 new_fltr.src = hw_vsi_id;
4114 new_fltr.flag |= ICE_FLTR_RX;
4115 new_fltr.src = hw->port_info->lport;
4118 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4119 new_fltr.vsi_handle = vsi_handle;
4120 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4121 f_list_entry.fltr_info = new_fltr;
4123 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4124 if (status != ICE_SUCCESS)
4125 goto set_promisc_exit;
4133 * ice_set_vlan_vsi_promisc
4134 * @hw: pointer to the hardware structure
4135 * @vsi_handle: VSI handle to configure
4136 * @promisc_mask: mask of promiscuous config bits
4137 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4139 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4142 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4143 bool rm_vlan_promisc)
4145 struct ice_switch_info *sw = hw->switch_info;
4146 struct ice_fltr_list_entry *list_itr, *tmp;
4147 struct LIST_HEAD_TYPE vsi_list_head;
4148 struct LIST_HEAD_TYPE *vlan_head;
4149 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4150 enum ice_status status;
4153 INIT_LIST_HEAD(&vsi_list_head);
4154 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4155 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4156 ice_acquire_lock(vlan_lock);
4157 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4159 ice_release_lock(vlan_lock);
4161 goto free_fltr_list;
4163 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4165 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4166 if (rm_vlan_promisc)
4167 status = ice_clear_vsi_promisc(hw, vsi_handle,
4168 promisc_mask, vlan_id);
4170 status = ice_set_vsi_promisc(hw, vsi_handle,
4171 promisc_mask, vlan_id);
4177 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4178 ice_fltr_list_entry, list_entry) {
4179 LIST_DEL(&list_itr->list_entry);
4180 ice_free(hw, list_itr);
4186 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4187 * @hw: pointer to the hardware structure
4188 * @vsi_handle: VSI handle to remove filters from
4189 * @lkup: switch rule filter lookup type
4192 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4193 enum ice_sw_lkup_type lkup)
4195 struct ice_switch_info *sw = hw->switch_info;
4196 struct ice_fltr_list_entry *fm_entry;
4197 struct LIST_HEAD_TYPE remove_list_head;
4198 struct LIST_HEAD_TYPE *rule_head;
4199 struct ice_fltr_list_entry *tmp;
4200 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4201 enum ice_status status;
4203 INIT_LIST_HEAD(&remove_list_head);
4204 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4205 rule_head = &sw->recp_list[lkup].filt_rules;
4206 ice_acquire_lock(rule_lock);
4207 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4209 ice_release_lock(rule_lock);
4214 case ICE_SW_LKUP_MAC:
4215 ice_remove_mac(hw, &remove_list_head);
4217 case ICE_SW_LKUP_VLAN:
4218 ice_remove_vlan(hw, &remove_list_head);
4220 case ICE_SW_LKUP_PROMISC:
4221 case ICE_SW_LKUP_PROMISC_VLAN:
4222 ice_remove_promisc(hw, lkup, &remove_list_head);
4224 case ICE_SW_LKUP_MAC_VLAN:
4225 ice_remove_mac_vlan(hw, &remove_list_head);
4227 case ICE_SW_LKUP_ETHERTYPE:
4228 case ICE_SW_LKUP_ETHERTYPE_MAC:
4229 ice_remove_eth_mac(hw, &remove_list_head);
4231 case ICE_SW_LKUP_DFLT:
4232 ice_debug(hw, ICE_DBG_SW,
4233 "Remove filters for this lookup type hasn't been implemented yet\n");
4235 case ICE_SW_LKUP_LAST:
4236 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4240 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4241 ice_fltr_list_entry, list_entry) {
4242 LIST_DEL(&fm_entry->list_entry);
4243 ice_free(hw, fm_entry);
4248 * ice_remove_vsi_fltr - Remove all filters for a VSI
4249 * @hw: pointer to the hardware structure
4250 * @vsi_handle: VSI handle to remove filters from
4252 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4254 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4256 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4257 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4258 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4259 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4260 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4261 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4262 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4263 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4267 * ice_alloc_res_cntr - allocating resource counter
4268 * @hw: pointer to the hardware structure
4269 * @type: type of resource
4270 * @alloc_shared: if set it is shared else dedicated
4271 * @num_items: number of entries requested for FD resource type
4272 * @counter_id: counter index returned by AQ call
4275 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4278 struct ice_aqc_alloc_free_res_elem *buf;
4279 enum ice_status status;
4282 /* Allocate resource */
4283 buf_len = sizeof(*buf);
4284 buf = (struct ice_aqc_alloc_free_res_elem *)
4285 ice_malloc(hw, buf_len);
4287 return ICE_ERR_NO_MEMORY;
4289 buf->num_elems = CPU_TO_LE16(num_items);
4290 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4291 ICE_AQC_RES_TYPE_M) | alloc_shared);
4293 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4294 ice_aqc_opc_alloc_res, NULL);
4298 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4306 * ice_free_res_cntr - free resource counter
4307 * @hw: pointer to the hardware structure
4308 * @type: type of resource
4309 * @alloc_shared: if set it is shared else dedicated
4310 * @num_items: number of entries to be freed for FD resource type
4311 * @counter_id: counter ID resource which needs to be freed
4314 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4317 struct ice_aqc_alloc_free_res_elem *buf;
4318 enum ice_status status;
4322 buf_len = sizeof(*buf);
4323 buf = (struct ice_aqc_alloc_free_res_elem *)
4324 ice_malloc(hw, buf_len);
4326 return ICE_ERR_NO_MEMORY;
4328 buf->num_elems = CPU_TO_LE16(num_items);
4329 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4330 ICE_AQC_RES_TYPE_M) | alloc_shared);
4331 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4333 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4334 ice_aqc_opc_free_res, NULL);
4336 ice_debug(hw, ICE_DBG_SW,
4337 "counter resource could not be freed\n");
4344 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4345 * @hw: pointer to the hardware structure
4346 * @counter_id: returns counter index
4348 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4350 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4351 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4356 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4357 * @hw: pointer to the hardware structure
4358 * @counter_id: counter index to be freed
4360 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4362 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4363 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4368 * ice_alloc_res_lg_act - add large action resource
4369 * @hw: pointer to the hardware structure
4370 * @l_id: large action ID to fill it in
4371 * @num_acts: number of actions to hold with a large action entry
4373 static enum ice_status
4374 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4376 struct ice_aqc_alloc_free_res_elem *sw_buf;
4377 enum ice_status status;
4380 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4381 return ICE_ERR_PARAM;
4383 /* Allocate resource for large action */
4384 buf_len = sizeof(*sw_buf);
4385 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4386 ice_malloc(hw, buf_len);
4388 return ICE_ERR_NO_MEMORY;
4390 sw_buf->num_elems = CPU_TO_LE16(1);
4392 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4393 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4394 * If num_acts is greater than 2, then use
4395 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4396 * The num_acts cannot exceed 4. This was ensured at the
4397 * beginning of the function.
4400 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4401 else if (num_acts == 2)
4402 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4404 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4406 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4407 ice_aqc_opc_alloc_res, NULL);
4409 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4411 ice_free(hw, sw_buf);
4416 * ice_add_mac_with_sw_marker - add filter with sw marker
4417 * @hw: pointer to the hardware structure
4418 * @f_info: filter info structure containing the MAC filter information
4419 * @sw_marker: sw marker to tag the Rx descriptor with
4422 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4425 struct ice_switch_info *sw = hw->switch_info;
4426 struct ice_fltr_mgmt_list_entry *m_entry;
4427 struct ice_fltr_list_entry fl_info;
4428 struct LIST_HEAD_TYPE l_head;
4429 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4430 enum ice_status ret;
4434 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4435 return ICE_ERR_PARAM;
4437 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4438 return ICE_ERR_PARAM;
4440 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4441 return ICE_ERR_PARAM;
4443 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4444 return ICE_ERR_PARAM;
4445 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4447 /* Add filter if it doesn't exist so then the adding of large
4448 * action always results in update
4451 INIT_LIST_HEAD(&l_head);
4452 fl_info.fltr_info = *f_info;
4453 LIST_ADD(&fl_info.list_entry, &l_head);
4455 entry_exists = false;
4456 ret = ice_add_mac(hw, &l_head);
4457 if (ret == ICE_ERR_ALREADY_EXISTS)
4458 entry_exists = true;
4462 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4463 ice_acquire_lock(rule_lock);
4464 /* Get the book keeping entry for the filter */
4465 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4469 /* If counter action was enabled for this rule then don't enable
4470 * sw marker large action
4472 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4473 ret = ICE_ERR_PARAM;
4477 /* if same marker was added before */
4478 if (m_entry->sw_marker_id == sw_marker) {
4479 ret = ICE_ERR_ALREADY_EXISTS;
4483 /* Allocate a hardware table entry to hold large act. Three actions
4484 * for marker based large action
4486 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4490 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4493 /* Update the switch rule to add the marker action */
4494 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4496 ice_release_lock(rule_lock);
4501 ice_release_lock(rule_lock);
4502 /* only remove entry if it did not exist previously */
4504 ret = ice_remove_mac(hw, &l_head);
4510 * ice_add_mac_with_counter - add filter with counter enabled
4511 * @hw: pointer to the hardware structure
4512 * @f_info: pointer to filter info structure containing the MAC filter
4516 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4518 struct ice_switch_info *sw = hw->switch_info;
4519 struct ice_fltr_mgmt_list_entry *m_entry;
4520 struct ice_fltr_list_entry fl_info;
4521 struct LIST_HEAD_TYPE l_head;
4522 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4523 enum ice_status ret;
4528 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4529 return ICE_ERR_PARAM;
4531 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4532 return ICE_ERR_PARAM;
4534 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4535 return ICE_ERR_PARAM;
4536 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4538 entry_exist = false;
4540 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4542 /* Add filter if it doesn't exist so then the adding of large
4543 * action always results in update
4545 INIT_LIST_HEAD(&l_head);
4547 fl_info.fltr_info = *f_info;
4548 LIST_ADD(&fl_info.list_entry, &l_head);
4550 ret = ice_add_mac(hw, &l_head);
4551 if (ret == ICE_ERR_ALREADY_EXISTS)
4556 ice_acquire_lock(rule_lock);
4557 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4559 ret = ICE_ERR_BAD_PTR;
4563 /* Don't enable counter for a filter for which sw marker was enabled */
4564 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4565 ret = ICE_ERR_PARAM;
4569 /* If a counter was already enabled then don't need to add again */
4570 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4571 ret = ICE_ERR_ALREADY_EXISTS;
4575 /* Allocate a hardware table entry to VLAN counter */
4576 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4580 /* Allocate a hardware table entry to hold large act. Two actions for
4581 * counter based large action
4583 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4587 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4590 /* Update the switch rule to add the counter action */
4591 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4593 ice_release_lock(rule_lock);
4598 ice_release_lock(rule_lock);
4599 /* only remove entry if it did not exist previously */
4601 ret = ice_remove_mac(hw, &l_head);
4606 /* This is mapping table entry that maps every word within a given protocol
4607 * structure to the real byte offset as per the specification of that
4609 * for example dst address is 3 words in ethertype header and corresponding
4610 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4611 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4612 * matching entry describing its field. This needs to be updated if new
4613 * structure is added to that union.
4615 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4616 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4617 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4618 { ICE_ETYPE_OL, { 0 } },
4619 { ICE_VLAN_OFOS, { 0, 2 } },
4620 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4621 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4622 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4623 26, 28, 30, 32, 34, 36, 38 } },
4624 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4625 26, 28, 30, 32, 34, 36, 38 } },
4626 { ICE_TCP_IL, { 0, 2 } },
4627 { ICE_UDP_OF, { 0, 2 } },
4628 { ICE_UDP_ILOS, { 0, 2 } },
4629 { ICE_SCTP_IL, { 0, 2 } },
4630 { ICE_VXLAN, { 8, 10, 12, 14 } },
4631 { ICE_GENEVE, { 8, 10, 12, 14 } },
4632 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4633 { ICE_NVGRE, { 0, 2, 4, 6 } },
4634 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4635 { ICE_PPPOE, { 0, 2, 4, 6 } },
4638 /* The following table describes preferred grouping of recipes.
4639 * If a recipe that needs to be programmed is a superset or matches one of the
4640 * following combinations, then the recipe needs to be chained as per the
4644 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4645 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4646 { ICE_MAC_IL, ICE_MAC_IL_HW },
4647 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4648 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4649 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4650 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4651 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4652 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4653 { ICE_TCP_IL, ICE_TCP_IL_HW },
4654 { ICE_UDP_OF, ICE_UDP_OF_HW },
4655 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4656 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4657 { ICE_VXLAN, ICE_UDP_OF_HW },
4658 { ICE_GENEVE, ICE_UDP_OF_HW },
4659 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4660 { ICE_NVGRE, ICE_GRE_OF_HW },
4661 { ICE_GTP, ICE_UDP_OF_HW },
4662 { ICE_PPPOE, ICE_PPPOE_HW },
4666 * ice_find_recp - find a recipe
4667 * @hw: pointer to the hardware structure
4668 * @lkup_exts: extension sequence to match
4670 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4672 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4674 bool refresh_required = true;
4675 struct ice_sw_recipe *recp;
4678 /* Walk through existing recipes to find a match */
4679 recp = hw->switch_info->recp_list;
4680 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4681 /* If recipe was not created for this ID, in SW bookkeeping,
4682 * check if FW has an entry for this recipe. If the FW has an
4683 * entry update it in our SW bookkeeping and continue with the
4686 if (!recp[i].recp_created)
4687 if (ice_get_recp_frm_fw(hw,
4688 hw->switch_info->recp_list, i,
4692 /* Skip inverse action recipes */
4693 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4694 ICE_AQ_RECIPE_ACT_INV_ACT)
4697 /* if number of words we are looking for match */
4698 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4699 struct ice_fv_word *a = lkup_exts->fv_words;
4700 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4704 for (p = 0; p < lkup_exts->n_val_words; p++) {
4705 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4707 if (a[p].off == b[q].off &&
4708 a[p].prot_id == b[q].prot_id)
4709 /* Found the "p"th word in the
4714 /* After walking through all the words in the
4715 * "i"th recipe if "p"th word was not found then
4716 * this recipe is not what we are looking for.
4717 * So break out from this loop and try the next
4720 if (q >= recp[i].lkup_exts.n_val_words) {
4725 /* If for "i"th recipe the found was never set to false
4726 * then it means we found our match
4729 return i; /* Return the recipe ID */
4732 return ICE_MAX_NUM_RECIPES;
4736 * ice_prot_type_to_id - get protocol ID from protocol type
4737 * @type: protocol type
4738 * @id: pointer to variable that will receive the ID
4740 * Returns true if found, false otherwise
4742 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4746 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4747 if (ice_prot_id_tbl[i].type == type) {
4748 *id = ice_prot_id_tbl[i].protocol_id;
4755 * ice_find_valid_words - count valid words
4756 * @rule: advanced rule with lookup information
4757 * @lkup_exts: byte offset extractions of the words that are valid
4759 * calculate valid words in a lookup rule using mask value
4762 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4763 struct ice_prot_lkup_ext *lkup_exts)
4769 if (!ice_prot_type_to_id(rule->type, &prot_id))
4772 word = lkup_exts->n_val_words;
4774 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4775 if (((u16 *)&rule->m_u)[j] &&
4776 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4777 /* No more space to accommodate */
4778 if (word >= ICE_MAX_CHAIN_WORDS)
4780 lkup_exts->fv_words[word].off =
4781 ice_prot_ext[rule->type].offs[j];
4782 lkup_exts->fv_words[word].prot_id =
4783 ice_prot_id_tbl[rule->type].protocol_id;
4784 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4788 ret_val = word - lkup_exts->n_val_words;
4789 lkup_exts->n_val_words = word;
4795 * ice_create_first_fit_recp_def - Create a recipe grouping
4796 * @hw: pointer to the hardware structure
4797 * @lkup_exts: an array of protocol header extractions
4798 * @rg_list: pointer to a list that stores new recipe groups
4799 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4801 * Using first fit algorithm, take all the words that are still not done
4802 * and start grouping them in 4-word groups. Each group makes up one
4805 static enum ice_status
4806 ice_create_first_fit_recp_def(struct ice_hw *hw,
4807 struct ice_prot_lkup_ext *lkup_exts,
4808 struct LIST_HEAD_TYPE *rg_list,
4811 struct ice_pref_recipe_group *grp = NULL;
4816 /* Walk through every word in the rule to check if it is not done. If so
4817 * then this word needs to be part of a new recipe.
4819 for (j = 0; j < lkup_exts->n_val_words; j++)
4820 if (!ice_is_bit_set(lkup_exts->done, j)) {
4822 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4823 struct ice_recp_grp_entry *entry;
4825 entry = (struct ice_recp_grp_entry *)
4826 ice_malloc(hw, sizeof(*entry));
4828 return ICE_ERR_NO_MEMORY;
4829 LIST_ADD(&entry->l_entry, rg_list);
4830 grp = &entry->r_group;
4834 grp->pairs[grp->n_val_pairs].prot_id =
4835 lkup_exts->fv_words[j].prot_id;
4836 grp->pairs[grp->n_val_pairs].off =
4837 lkup_exts->fv_words[j].off;
4838 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4846 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4847 * @hw: pointer to the hardware structure
4848 * @fv_list: field vector with the extraction sequence information
4849 * @rg_list: recipe groupings with protocol-offset pairs
4851 * Helper function to fill in the field vector indices for protocol-offset
4852 * pairs. These indexes are then ultimately programmed into a recipe.
4854 static enum ice_status
4855 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4856 struct LIST_HEAD_TYPE *rg_list)
4858 struct ice_sw_fv_list_entry *fv;
4859 struct ice_recp_grp_entry *rg;
4860 struct ice_fv_word *fv_ext;
4862 if (LIST_EMPTY(fv_list))
4865 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4866 fv_ext = fv->fv_ptr->ew;
4868 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4871 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4872 struct ice_fv_word *pr;
4877 pr = &rg->r_group.pairs[i];
4878 mask = rg->r_group.mask[i];
4880 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4881 if (fv_ext[j].prot_id == pr->prot_id &&
4882 fv_ext[j].off == pr->off) {
4885 /* Store index of field vector */
4887 /* Mask is given by caller as big
4888 * endian, but sent to FW as little
4891 rg->fv_mask[i] = mask << 8 | mask >> 8;
4895 /* Protocol/offset could not be found, caller gave an
4899 return ICE_ERR_PARAM;
4907 * ice_find_free_recp_res_idx - find free result indexes for recipe
4908 * @hw: pointer to hardware structure
4909 * @profiles: bitmap of profiles that will be associated with the new recipe
4910 * @free_idx: pointer to variable to receive the free index bitmap
4912 * The algorithm used here is:
4913 * 1. When creating a new recipe, create a set P which contains all
4914 * Profiles that will be associated with our new recipe
4916 * 2. For each Profile p in set P:
4917 * a. Add all recipes associated with Profile p into set R
4918 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4919 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4920 * i. Or just assume they all have the same possible indexes:
4922 * i.e., PossibleIndexes = 0x0000F00000000000
4924 * 3. For each Recipe r in set R:
4925 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4926 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4928 * FreeIndexes will contain the bits indicating the indexes free for use,
4929 * then the code needs to update the recipe[r].used_result_idx_bits to
4930 * indicate which indexes were selected for use by this recipe.
4933 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4934 ice_bitmap_t *free_idx)
4936 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4937 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4938 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4942 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4943 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4944 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4945 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4947 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
4948 ice_set_bit(count, possible_idx);
4950 /* For each profile we are going to associate the recipe with, add the
4951 * recipes that are associated with that profile. This will give us
4952 * the set of recipes that our recipe may collide with. Also, determine
4953 * what possible result indexes are usable given this set of profiles.
4956 while (ICE_MAX_NUM_PROFILES >
4957 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4958 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4959 ICE_MAX_NUM_RECIPES);
4960 ice_and_bitmap(possible_idx, possible_idx,
4961 hw->switch_info->prof_res_bm[bit],
4966 /* For each recipe that our new recipe may collide with, determine
4967 * which indexes have been used.
4969 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4970 if (ice_is_bit_set(recipes, bit)) {
4971 ice_or_bitmap(used_idx, used_idx,
4972 hw->switch_info->recp_list[bit].res_idxs,
4976 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4978 /* return number of free indexes */
4981 while (ICE_MAX_FV_WORDS >
4982 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
4991 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4992 * @hw: pointer to hardware structure
4993 * @rm: recipe management list entry
4994 * @match_tun: if field vector index for tunnel needs to be programmed
4995 * @profiles: bitmap of profiles that will be assocated.
4997 static enum ice_status
4998 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4999 bool match_tun, ice_bitmap_t *profiles)
5001 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5002 struct ice_aqc_recipe_data_elem *tmp;
5003 struct ice_aqc_recipe_data_elem *buf;
5004 struct ice_recp_grp_entry *entry;
5005 enum ice_status status;
5011 /* When more than one recipe are required, another recipe is needed to
5012 * chain them together. Matching a tunnel metadata ID takes up one of
5013 * the match fields in the chaining recipe reducing the number of
5014 * chained recipes by one.
5016 /* check number of free result indices */
5017 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5018 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5020 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5021 free_res_idx, rm->n_grp_count);
5023 if (rm->n_grp_count > 1) {
5024 if (rm->n_grp_count > free_res_idx)
5025 return ICE_ERR_MAX_LIMIT;
5030 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5031 ICE_MAX_NUM_RECIPES,
5034 return ICE_ERR_NO_MEMORY;
5036 buf = (struct ice_aqc_recipe_data_elem *)
5037 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5039 status = ICE_ERR_NO_MEMORY;
5043 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5044 recipe_count = ICE_MAX_NUM_RECIPES;
5045 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5047 if (status || recipe_count == 0)
5050 /* Allocate the recipe resources, and configure them according to the
5051 * match fields from protocol headers and extracted field vectors.
5053 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5054 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5057 status = ice_alloc_recipe(hw, &entry->rid);
5061 /* Clear the result index of the located recipe, as this will be
5062 * updated, if needed, later in the recipe creation process.
5064 tmp[0].content.result_indx = 0;
5066 buf[recps] = tmp[0];
5067 buf[recps].recipe_indx = (u8)entry->rid;
5068 /* if the recipe is a non-root recipe RID should be programmed
5069 * as 0 for the rules to be applied correctly.
5071 buf[recps].content.rid = 0;
5072 ice_memset(&buf[recps].content.lkup_indx, 0,
5073 sizeof(buf[recps].content.lkup_indx),
5076 /* All recipes use look-up index 0 to match switch ID. */
5077 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5078 buf[recps].content.mask[0] =
5079 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5080 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5083 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5084 buf[recps].content.lkup_indx[i] = 0x80;
5085 buf[recps].content.mask[i] = 0;
5088 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5089 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5090 buf[recps].content.mask[i + 1] =
5091 CPU_TO_LE16(entry->fv_mask[i]);
5094 if (rm->n_grp_count > 1) {
5095 /* Checks to see if there really is a valid result index
5098 if (chain_idx >= ICE_MAX_FV_WORDS) {
5099 ice_debug(hw, ICE_DBG_SW,
5100 "No chain index available\n");
5101 status = ICE_ERR_MAX_LIMIT;
5105 entry->chain_idx = chain_idx;
5106 buf[recps].content.result_indx =
5107 ICE_AQ_RECIPE_RESULT_EN |
5108 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5109 ICE_AQ_RECIPE_RESULT_DATA_M);
5110 ice_clear_bit(chain_idx, result_idx_bm);
5111 chain_idx = ice_find_first_bit(result_idx_bm,
5115 /* fill recipe dependencies */
5116 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5117 ICE_MAX_NUM_RECIPES);
5118 ice_set_bit(buf[recps].recipe_indx,
5119 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5120 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5124 if (rm->n_grp_count == 1) {
5125 rm->root_rid = buf[0].recipe_indx;
5126 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5127 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5128 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5129 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5130 sizeof(buf[0].recipe_bitmap),
5131 ICE_NONDMA_TO_NONDMA);
5133 status = ICE_ERR_BAD_PTR;
5136 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5137 * the recipe which is getting created if specified
5138 * by user. Usually any advanced switch filter, which results
5139 * into new extraction sequence, ended up creating a new recipe
5140 * of type ROOT and usually recipes are associated with profiles
5141 * Switch rule referreing newly created recipe, needs to have
5142 * either/or 'fwd' or 'join' priority, otherwise switch rule
5143 * evaluation will not happen correctly. In other words, if
5144 * switch rule to be evaluated on priority basis, then recipe
5145 * needs to have priority, otherwise it will be evaluated last.
5147 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5149 struct ice_recp_grp_entry *last_chain_entry;
5152 /* Allocate the last recipe that will chain the outcomes of the
5153 * other recipes together
5155 status = ice_alloc_recipe(hw, &rid);
5159 buf[recps].recipe_indx = (u8)rid;
5160 buf[recps].content.rid = (u8)rid;
5161 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5162 /* the new entry created should also be part of rg_list to
5163 * make sure we have complete recipe
5165 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5166 sizeof(*last_chain_entry));
5167 if (!last_chain_entry) {
5168 status = ICE_ERR_NO_MEMORY;
5171 last_chain_entry->rid = rid;
5172 ice_memset(&buf[recps].content.lkup_indx, 0,
5173 sizeof(buf[recps].content.lkup_indx),
5175 /* All recipes use look-up index 0 to match switch ID. */
5176 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5177 buf[recps].content.mask[0] =
5178 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5179 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5180 buf[recps].content.lkup_indx[i] =
5181 ICE_AQ_RECIPE_LKUP_IGNORE;
5182 buf[recps].content.mask[i] = 0;
5186 /* update r_bitmap with the recp that is used for chaining */
5187 ice_set_bit(rid, rm->r_bitmap);
5188 /* this is the recipe that chains all the other recipes so it
5189 * should not have a chaining ID to indicate the same
5191 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5192 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5194 last_chain_entry->fv_idx[i] = entry->chain_idx;
5195 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5196 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5197 ice_set_bit(entry->rid, rm->r_bitmap);
5199 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5200 if (sizeof(buf[recps].recipe_bitmap) >=
5201 sizeof(rm->r_bitmap)) {
5202 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5203 sizeof(buf[recps].recipe_bitmap),
5204 ICE_NONDMA_TO_NONDMA);
5206 status = ICE_ERR_BAD_PTR;
5209 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5211 /* To differentiate among different UDP tunnels, a meta data ID
5215 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5216 buf[recps].content.mask[i] =
5217 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5221 rm->root_rid = (u8)rid;
5223 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5227 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5228 ice_release_change_lock(hw);
5232 /* Every recipe that just got created add it to the recipe
5235 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5236 struct ice_switch_info *sw = hw->switch_info;
5237 bool is_root, idx_found = false;
5238 struct ice_sw_recipe *recp;
5239 u16 idx, buf_idx = 0;
5241 /* find buffer index for copying some data */
5242 for (idx = 0; idx < rm->n_grp_count; idx++)
5243 if (buf[idx].recipe_indx == entry->rid) {
5249 status = ICE_ERR_OUT_OF_RANGE;
5253 recp = &sw->recp_list[entry->rid];
5254 is_root = (rm->root_rid == entry->rid);
5255 recp->is_root = is_root;
5257 recp->root_rid = entry->rid;
5258 recp->big_recp = (is_root && rm->n_grp_count > 1);
5260 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5261 entry->r_group.n_val_pairs *
5262 sizeof(struct ice_fv_word),
5263 ICE_NONDMA_TO_NONDMA);
5265 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5266 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5268 /* Copy non-result fv index values and masks to recipe. This
5269 * call will also update the result recipe bitmask.
5271 ice_collect_result_idx(&buf[buf_idx], recp);
5273 /* for non-root recipes, also copy to the root, this allows
5274 * easier matching of a complete chained recipe
5277 ice_collect_result_idx(&buf[buf_idx],
5278 &sw->recp_list[rm->root_rid]);
5280 recp->n_ext_words = entry->r_group.n_val_pairs;
5281 recp->chain_idx = entry->chain_idx;
5282 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5283 recp->n_grp_count = rm->n_grp_count;
5284 recp->tun_type = rm->tun_type;
5285 recp->recp_created = true;
5300 * ice_create_recipe_group - creates recipe group
5301 * @hw: pointer to hardware structure
5302 * @rm: recipe management list entry
5303 * @lkup_exts: lookup elements
5305 static enum ice_status
5306 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5307 struct ice_prot_lkup_ext *lkup_exts)
5309 enum ice_status status;
5312 rm->n_grp_count = 0;
5314 /* Create recipes for words that are marked not done by packing them
5317 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5318 &rm->rg_list, &recp_count);
5320 rm->n_grp_count += recp_count;
5321 rm->n_ext_words = lkup_exts->n_val_words;
5322 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5323 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5324 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5325 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5332 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5333 * @hw: pointer to hardware structure
5334 * @lkups: lookup elements or match criteria for the advanced recipe, one
5335 * structure per protocol header
5336 * @lkups_cnt: number of protocols
5337 * @bm: bitmap of field vectors to consider
5338 * @fv_list: pointer to a list that holds the returned field vectors
5340 static enum ice_status
5341 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5342 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5344 enum ice_status status;
5348 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5350 return ICE_ERR_NO_MEMORY;
5352 for (i = 0; i < lkups_cnt; i++)
5353 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5354 status = ICE_ERR_CFG;
5358 /* Find field vectors that include all specified protocol types */
5359 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5362 ice_free(hw, prot_ids);
5367 * ice_add_special_words - Add words that are not protocols, such as metadata
5368 * @rinfo: other information regarding the rule e.g. priority and action info
5369 * @lkup_exts: lookup word structure
5371 static enum ice_status
5372 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5373 struct ice_prot_lkup_ext *lkup_exts)
5375 /* If this is a tunneled packet, then add recipe index to match the
5376 * tunnel bit in the packet metadata flags.
5378 if (rinfo->tun_type != ICE_NON_TUN) {
5379 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5380 u8 word = lkup_exts->n_val_words++;
5382 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5383 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5385 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5387 return ICE_ERR_MAX_LIMIT;
5394 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5395 * @hw: pointer to hardware structure
5396 * @rinfo: other information regarding the rule e.g. priority and action info
5397 * @bm: pointer to memory for returning the bitmap of field vectors
5400 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5403 enum ice_prof_type type;
5405 switch (rinfo->tun_type) {
5407 type = ICE_PROF_NON_TUN;
5409 case ICE_ALL_TUNNELS:
5410 type = ICE_PROF_TUN_ALL;
5412 case ICE_SW_TUN_VXLAN_GPE:
5413 case ICE_SW_TUN_GENEVE:
5414 case ICE_SW_TUN_VXLAN:
5415 case ICE_SW_TUN_UDP:
5416 case ICE_SW_TUN_GTP:
5417 type = ICE_PROF_TUN_UDP;
5419 case ICE_SW_TUN_NVGRE:
5420 type = ICE_PROF_TUN_GRE;
5422 case ICE_SW_TUN_PPPOE:
5423 type = ICE_PROF_TUN_PPPOE;
5425 case ICE_SW_TUN_AND_NON_TUN:
5427 type = ICE_PROF_ALL;
5431 ice_get_sw_fv_bitmap(hw, type, bm);
5435 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5436 * @hw: pointer to hardware structure
5437 * @lkups: lookup elements or match criteria for the advanced recipe, one
5438 * structure per protocol header
5439 * @lkups_cnt: number of protocols
5440 * @rinfo: other information regarding the rule e.g. priority and action info
5441 * @rid: return the recipe ID of the recipe created
5443 static enum ice_status
5444 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5445 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5447 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5448 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5449 struct ice_prot_lkup_ext *lkup_exts;
5450 struct ice_recp_grp_entry *r_entry;
5451 struct ice_sw_fv_list_entry *fvit;
5452 struct ice_recp_grp_entry *r_tmp;
5453 struct ice_sw_fv_list_entry *tmp;
5454 enum ice_status status = ICE_SUCCESS;
5455 struct ice_sw_recipe *rm;
5456 bool match_tun = false;
5460 return ICE_ERR_PARAM;
5462 lkup_exts = (struct ice_prot_lkup_ext *)
5463 ice_malloc(hw, sizeof(*lkup_exts));
5465 return ICE_ERR_NO_MEMORY;
5467 /* Determine the number of words to be matched and if it exceeds a
5468 * recipe's restrictions
5470 for (i = 0; i < lkups_cnt; i++) {
5473 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5474 status = ICE_ERR_CFG;
5475 goto err_free_lkup_exts;
5478 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5480 status = ICE_ERR_CFG;
5481 goto err_free_lkup_exts;
5485 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5487 status = ICE_ERR_NO_MEMORY;
5488 goto err_free_lkup_exts;
5491 /* Get field vectors that contain fields extracted from all the protocol
5492 * headers being programmed.
5494 INIT_LIST_HEAD(&rm->fv_list);
5495 INIT_LIST_HEAD(&rm->rg_list);
5497 /* Get bitmap of field vectors (profiles) that are compatible with the
5498 * rule request; only these will be searched in the subsequent call to
5501 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5503 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5507 /* Group match words into recipes using preferred recipe grouping
5510 status = ice_create_recipe_group(hw, rm, lkup_exts);
5514 /* There is only profile for UDP tunnels. So, it is necessary to use a
5515 * metadata ID flag to differentiate different tunnel types. A separate
5516 * recipe needs to be used for the metadata.
5518 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5519 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5520 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5523 /* set the recipe priority if specified */
5524 rm->priority = rinfo->priority ? rinfo->priority : 0;
5526 /* Find offsets from the field vector. Pick the first one for all the
5529 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5533 /* get bitmap of all profiles the recipe will be associated with */
5534 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5535 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5537 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5538 ice_set_bit((u16)fvit->profile_id, profiles);
5541 /* Create any special protocol/offset pairs, such as looking at tunnel
5542 * bits by extracting metadata
5544 status = ice_add_special_words(rinfo, lkup_exts);
5546 goto err_free_lkup_exts;
5548 /* Look for a recipe which matches our requested fv / mask list */
5549 *rid = ice_find_recp(hw, lkup_exts);
5550 if (*rid < ICE_MAX_NUM_RECIPES)
5551 /* Success if found a recipe that match the existing criteria */
5554 /* Recipe we need does not exist, add a recipe */
5555 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5559 /* Associate all the recipes created with all the profiles in the
5560 * common field vector.
5562 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5564 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5567 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5568 (u8 *)r_bitmap, NULL);
5572 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5573 ICE_MAX_NUM_RECIPES);
5574 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5578 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5581 ice_release_change_lock(hw);
5586 /* Update profile to recipe bitmap array */
5587 ice_memcpy(profile_to_recipe[fvit->profile_id], r_bitmap,
5588 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
5590 /* Update recipe to profile bitmap array */
5591 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5592 if (ice_is_bit_set(r_bitmap, j))
5593 ice_set_bit((u16)fvit->profile_id,
5594 recipe_to_profile[j]);
5597 *rid = rm->root_rid;
5598 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5599 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5601 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5602 ice_recp_grp_entry, l_entry) {
5603 LIST_DEL(&r_entry->l_entry);
5604 ice_free(hw, r_entry);
5607 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5609 LIST_DEL(&fvit->list_entry);
5614 ice_free(hw, rm->root_buf);
5619 ice_free(hw, lkup_exts);
5625 * ice_find_dummy_packet - find dummy packet by tunnel type
5627 * @lkups: lookup elements or match criteria for the advanced recipe, one
5628 * structure per protocol header
5629 * @lkups_cnt: number of protocols
5630 * @tun_type: tunnel type from the match criteria
5631 * @pkt: dummy packet to fill according to filter match criteria
5632 * @pkt_len: packet length of dummy packet
5633 * @offsets: pointer to receive the pointer to the offsets for the packet
5636 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5637 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5639 const struct ice_dummy_pkt_offsets **offsets)
5641 bool tcp = false, udp = false, ipv6 = false;
5644 if (tun_type == ICE_SW_TUN_GTP) {
5645 *pkt = dummy_udp_gtp_packet;
5646 *pkt_len = sizeof(dummy_udp_gtp_packet);
5647 *offsets = dummy_udp_gtp_packet_offsets;
5650 if (tun_type == ICE_SW_TUN_PPPOE) {
5651 *pkt = dummy_pppoe_packet;
5652 *pkt_len = sizeof(dummy_pppoe_packet);
5653 *offsets = dummy_pppoe_packet_offsets;
5656 for (i = 0; i < lkups_cnt; i++) {
5657 if (lkups[i].type == ICE_UDP_ILOS)
5659 else if (lkups[i].type == ICE_TCP_IL)
5661 else if (lkups[i].type == ICE_IPV6_OFOS)
5665 if (tun_type == ICE_ALL_TUNNELS) {
5666 *pkt = dummy_gre_udp_packet;
5667 *pkt_len = sizeof(dummy_gre_udp_packet);
5668 *offsets = dummy_gre_udp_packet_offsets;
5672 if (tun_type == ICE_SW_TUN_NVGRE) {
5674 *pkt = dummy_gre_tcp_packet;
5675 *pkt_len = sizeof(dummy_gre_tcp_packet);
5676 *offsets = dummy_gre_tcp_packet_offsets;
5680 *pkt = dummy_gre_udp_packet;
5681 *pkt_len = sizeof(dummy_gre_udp_packet);
5682 *offsets = dummy_gre_udp_packet_offsets;
5686 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5687 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5689 *pkt = dummy_udp_tun_tcp_packet;
5690 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5691 *offsets = dummy_udp_tun_tcp_packet_offsets;
5695 *pkt = dummy_udp_tun_udp_packet;
5696 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5697 *offsets = dummy_udp_tun_udp_packet_offsets;
5702 *pkt = dummy_udp_packet;
5703 *pkt_len = sizeof(dummy_udp_packet);
5704 *offsets = dummy_udp_packet_offsets;
5706 } else if (udp && ipv6) {
5707 *pkt = dummy_udp_ipv6_packet;
5708 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5709 *offsets = dummy_udp_ipv6_packet_offsets;
5711 } else if ((tcp && ipv6) || ipv6) {
5712 *pkt = dummy_tcp_ipv6_packet;
5713 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5714 *offsets = dummy_tcp_ipv6_packet_offsets;
5718 *pkt = dummy_tcp_packet;
5719 *pkt_len = sizeof(dummy_tcp_packet);
5720 *offsets = dummy_tcp_packet_offsets;
5724 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5726 * @lkups: lookup elements or match criteria for the advanced recipe, one
5727 * structure per protocol header
5728 * @lkups_cnt: number of protocols
5729 * @s_rule: stores rule information from the match criteria
5730 * @dummy_pkt: dummy packet to fill according to filter match criteria
5731 * @pkt_len: packet length of dummy packet
5732 * @offsets: offset info for the dummy packet
5734 static enum ice_status
5735 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5736 struct ice_aqc_sw_rules_elem *s_rule,
5737 const u8 *dummy_pkt, u16 pkt_len,
5738 const struct ice_dummy_pkt_offsets *offsets)
5743 /* Start with a packet with a pre-defined/dummy content. Then, fill
5744 * in the header values to be looked up or matched.
5746 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5748 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5750 for (i = 0; i < lkups_cnt; i++) {
5751 enum ice_protocol_type type;
5752 u16 offset = 0, len = 0, j;
5755 /* find the start of this layer; it should be found since this
5756 * was already checked when search for the dummy packet
5758 type = lkups[i].type;
5759 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5760 if (type == offsets[j].type) {
5761 offset = offsets[j].offset;
5766 /* this should never happen in a correct calling sequence */
5768 return ICE_ERR_PARAM;
5770 switch (lkups[i].type) {
5773 len = sizeof(struct ice_ether_hdr);
5776 len = sizeof(struct ice_ethtype_hdr);
5779 len = sizeof(struct ice_vlan_hdr);
5783 len = sizeof(struct ice_ipv4_hdr);
5787 len = sizeof(struct ice_ipv6_hdr);
5792 len = sizeof(struct ice_l4_hdr);
5795 len = sizeof(struct ice_sctp_hdr);
5798 len = sizeof(struct ice_nvgre);
5803 len = sizeof(struct ice_udp_tnl_hdr);
5807 len = sizeof(struct ice_udp_gtp_hdr);
5810 len = sizeof(struct ice_pppoe_hdr);
5813 return ICE_ERR_PARAM;
5816 /* the length should be a word multiple */
5817 if (len % ICE_BYTES_PER_WORD)
5820 /* We have the offset to the header start, the length, the
5821 * caller's header values and mask. Use this information to
5822 * copy the data into the dummy packet appropriately based on
5823 * the mask. Note that we need to only write the bits as
5824 * indicated by the mask to make sure we don't improperly write
5825 * over any significant packet data.
5827 for (j = 0; j < len / sizeof(u16); j++)
5828 if (((u16 *)&lkups[i].m_u)[j])
5829 ((u16 *)(pkt + offset))[j] =
5830 (((u16 *)(pkt + offset))[j] &
5831 ~((u16 *)&lkups[i].m_u)[j]) |
5832 (((u16 *)&lkups[i].h_u)[j] &
5833 ((u16 *)&lkups[i].m_u)[j]);
5836 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5842 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5843 * @hw: pointer to the hardware structure
5844 * @tun_type: tunnel type
5845 * @pkt: dummy packet to fill in
5846 * @offsets: offset info for the dummy packet
5848 static enum ice_status
5849 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5850 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5855 case ICE_SW_TUN_AND_NON_TUN:
5856 case ICE_SW_TUN_VXLAN_GPE:
5857 case ICE_SW_TUN_VXLAN:
5858 case ICE_SW_TUN_UDP:
5859 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5863 case ICE_SW_TUN_GENEVE:
5864 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5869 /* Nothing needs to be done for this tunnel type */
5873 /* Find the outer UDP protocol header and insert the port number */
5874 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5875 if (offsets[i].type == ICE_UDP_OF) {
5876 struct ice_l4_hdr *hdr;
5879 offset = offsets[i].offset;
5880 hdr = (struct ice_l4_hdr *)&pkt[offset];
5881 hdr->dst_port = open_port << 8 | open_port >> 8;
5891 * ice_find_adv_rule_entry - Search a rule entry
5892 * @hw: pointer to the hardware structure
5893 * @lkups: lookup elements or match criteria for the advanced recipe, one
5894 * structure per protocol header
5895 * @lkups_cnt: number of protocols
5896 * @recp_id: recipe ID for which we are finding the rule
5897 * @rinfo: other information regarding the rule e.g. priority and action info
5899 * Helper function to search for a given advance rule entry
5900 * Returns pointer to entry storing the rule if found
5902 static struct ice_adv_fltr_mgmt_list_entry *
5903 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5904 u16 lkups_cnt, u8 recp_id,
5905 struct ice_adv_rule_info *rinfo)
5907 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5908 struct ice_switch_info *sw = hw->switch_info;
5911 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5912 ice_adv_fltr_mgmt_list_entry, list_entry) {
5913 bool lkups_matched = true;
5915 if (lkups_cnt != list_itr->lkups_cnt)
5917 for (i = 0; i < list_itr->lkups_cnt; i++)
5918 if (memcmp(&list_itr->lkups[i], &lkups[i],
5920 lkups_matched = false;
5923 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5924 rinfo->tun_type == list_itr->rule_info.tun_type &&
5932 * ice_adv_add_update_vsi_list
5933 * @hw: pointer to the hardware structure
5934 * @m_entry: pointer to current adv filter management list entry
5935 * @cur_fltr: filter information from the book keeping entry
5936 * @new_fltr: filter information with the new VSI to be added
5938 * Call AQ command to add or update previously created VSI list with new VSI.
5940 * Helper function to do book keeping associated with adding filter information
5941 * The algorithm to do the booking keeping is described below :
5942 * When a VSI needs to subscribe to a given advanced filter
5943 * if only one VSI has been added till now
5944 * Allocate a new VSI list and add two VSIs
5945 * to this list using switch rule command
5946 * Update the previously created switch rule with the
5947 * newly created VSI list ID
5948 * if a VSI list was previously created
5949 * Add the new VSI to the previously created VSI list set
5950 * using the update switch rule command
5952 static enum ice_status
5953 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5954 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5955 struct ice_adv_rule_info *cur_fltr,
5956 struct ice_adv_rule_info *new_fltr)
5958 enum ice_status status;
5959 u16 vsi_list_id = 0;
5961 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5962 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5963 return ICE_ERR_NOT_IMPL;
5965 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5966 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5967 return ICE_ERR_ALREADY_EXISTS;
5969 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5970 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5971 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5972 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5973 return ICE_ERR_NOT_IMPL;
5975 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5976 /* Only one entry existed in the mapping and it was not already
5977 * a part of a VSI list. So, create a VSI list with the old and
5980 struct ice_fltr_info tmp_fltr;
5981 u16 vsi_handle_arr[2];
5983 /* A rule already exists with the new VSI being added */
5984 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5985 new_fltr->sw_act.fwd_id.hw_vsi_id)
5986 return ICE_ERR_ALREADY_EXISTS;
5988 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5989 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5990 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5996 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5997 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5998 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5999 /* Update the previous switch rule of "forward to VSI" to
6002 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6006 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6007 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6008 m_entry->vsi_list_info =
6009 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6012 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6014 if (!m_entry->vsi_list_info)
6017 /* A rule already exists with the new VSI being added */
6018 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6021 /* Update the previously created VSI list set with
6022 * the new VSI ID passed in
6024 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6026 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6028 ice_aqc_opc_update_sw_rules,
6030 /* update VSI list mapping info with new VSI ID */
6032 ice_set_bit(vsi_handle,
6033 m_entry->vsi_list_info->vsi_map);
6036 m_entry->vsi_count++;
6041 * ice_add_adv_rule - helper function to create an advanced switch rule
6042 * @hw: pointer to the hardware structure
6043 * @lkups: information on the words that needs to be looked up. All words
6044 * together makes one recipe
6045 * @lkups_cnt: num of entries in the lkups array
6046 * @rinfo: other information related to the rule that needs to be programmed
6047 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6048 * ignored is case of error.
6050 * This function can program only 1 rule at a time. The lkups is used to
6051 * describe the all the words that forms the "lookup" portion of the recipe.
6052 * These words can span multiple protocols. Callers to this function need to
6053 * pass in a list of protocol headers with lookup information along and mask
6054 * that determines which words are valid from the given protocol header.
6055 * rinfo describes other information related to this rule such as forwarding
6056 * IDs, priority of this rule, etc.
6059 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6060 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6061 struct ice_rule_query_data *added_entry)
6063 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6064 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6065 const struct ice_dummy_pkt_offsets *pkt_offsets;
6066 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6067 struct LIST_HEAD_TYPE *rule_head;
6068 struct ice_switch_info *sw;
6069 enum ice_status status;
6070 const u8 *pkt = NULL;
6075 /* Initialize profile to result index bitmap */
6076 if (!hw->switch_info->prof_res_bm_init) {
6077 hw->switch_info->prof_res_bm_init = 1;
6078 ice_init_prof_result_bm(hw);
6082 return ICE_ERR_PARAM;
6084 /* get # of words we need to match */
6086 for (i = 0; i < lkups_cnt; i++) {
6089 ptr = (u16 *)&lkups[i].m_u;
6090 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6094 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6095 return ICE_ERR_PARAM;
6097 /* make sure that we can locate a dummy packet */
6098 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6101 status = ICE_ERR_PARAM;
6102 goto err_ice_add_adv_rule;
6105 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6106 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6107 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6108 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6111 vsi_handle = rinfo->sw_act.vsi_handle;
6112 if (!ice_is_vsi_valid(hw, vsi_handle))
6113 return ICE_ERR_PARAM;
6115 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6116 rinfo->sw_act.fwd_id.hw_vsi_id =
6117 ice_get_hw_vsi_num(hw, vsi_handle);
6118 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6119 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6121 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6124 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6126 /* we have to add VSI to VSI_LIST and increment vsi_count.
6127 * Also Update VSI list so that we can change forwarding rule
6128 * if the rule already exists, we will check if it exists with
6129 * same vsi_id, if not then add it to the VSI list if it already
6130 * exists if not then create a VSI list and add the existing VSI
6131 * ID and the new VSI ID to the list
6132 * We will add that VSI to the list
6134 status = ice_adv_add_update_vsi_list(hw, m_entry,
6135 &m_entry->rule_info,
6138 added_entry->rid = rid;
6139 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6140 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6144 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6145 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6147 return ICE_ERR_NO_MEMORY;
6148 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6149 switch (rinfo->sw_act.fltr_act) {
6150 case ICE_FWD_TO_VSI:
6151 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6152 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6153 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6156 act |= ICE_SINGLE_ACT_TO_Q;
6157 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6158 ICE_SINGLE_ACT_Q_INDEX_M;
6160 case ICE_FWD_TO_QGRP:
6161 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6162 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6163 act |= ICE_SINGLE_ACT_TO_Q;
6164 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6165 ICE_SINGLE_ACT_Q_INDEX_M;
6166 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6167 ICE_SINGLE_ACT_Q_REGION_M;
6169 case ICE_DROP_PACKET:
6170 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6171 ICE_SINGLE_ACT_VALID_BIT;
6174 status = ICE_ERR_CFG;
6175 goto err_ice_add_adv_rule;
6178 /* set the rule LOOKUP type based on caller specified 'RX'
6179 * instead of hardcoding it to be either LOOKUP_TX/RX
6181 * for 'RX' set the source to be the port number
6182 * for 'TX' set the source to be the source HW VSI number (determined
6186 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6187 s_rule->pdata.lkup_tx_rx.src =
6188 CPU_TO_LE16(hw->port_info->lport);
6190 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6191 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6194 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6195 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6197 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6198 pkt_len, pkt_offsets);
6200 goto err_ice_add_adv_rule;
6202 if (rinfo->tun_type != ICE_NON_TUN) {
6203 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6204 s_rule->pdata.lkup_tx_rx.hdr,
6207 goto err_ice_add_adv_rule;
6210 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6211 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6214 goto err_ice_add_adv_rule;
6215 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6216 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6218 status = ICE_ERR_NO_MEMORY;
6219 goto err_ice_add_adv_rule;
6222 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6223 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6224 ICE_NONDMA_TO_NONDMA);
6225 if (!adv_fltr->lkups) {
6226 status = ICE_ERR_NO_MEMORY;
6227 goto err_ice_add_adv_rule;
6230 adv_fltr->lkups_cnt = lkups_cnt;
6231 adv_fltr->rule_info = *rinfo;
6232 adv_fltr->rule_info.fltr_rule_id =
6233 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6234 sw = hw->switch_info;
6235 sw->recp_list[rid].adv_rule = true;
6236 rule_head = &sw->recp_list[rid].filt_rules;
6238 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6239 struct ice_fltr_info tmp_fltr;
6241 tmp_fltr.fltr_rule_id =
6242 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6243 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6244 tmp_fltr.fwd_id.hw_vsi_id =
6245 ice_get_hw_vsi_num(hw, vsi_handle);
6246 tmp_fltr.vsi_handle = vsi_handle;
6247 /* Update the previous switch rule of "forward to VSI" to
6250 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6252 goto err_ice_add_adv_rule;
6253 adv_fltr->vsi_count = 1;
6256 /* Add rule entry to book keeping list */
6257 LIST_ADD(&adv_fltr->list_entry, rule_head);
6259 added_entry->rid = rid;
6260 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6261 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6263 err_ice_add_adv_rule:
6264 if (status && adv_fltr) {
6265 ice_free(hw, adv_fltr->lkups);
6266 ice_free(hw, adv_fltr);
6269 ice_free(hw, s_rule);
6275 * ice_adv_rem_update_vsi_list
6276 * @hw: pointer to the hardware structure
6277 * @vsi_handle: VSI handle of the VSI to remove
6278 * @fm_list: filter management entry for which the VSI list management needs to
6281 static enum ice_status
6282 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6283 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6285 struct ice_vsi_list_map_info *vsi_list_info;
6286 enum ice_sw_lkup_type lkup_type;
6287 enum ice_status status;
6290 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6291 fm_list->vsi_count == 0)
6292 return ICE_ERR_PARAM;
6294 /* A rule with the VSI being removed does not exist */
6295 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6296 return ICE_ERR_DOES_NOT_EXIST;
6298 lkup_type = ICE_SW_LKUP_LAST;
6299 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6300 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6301 ice_aqc_opc_update_sw_rules,
6306 fm_list->vsi_count--;
6307 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6308 vsi_list_info = fm_list->vsi_list_info;
6309 if (fm_list->vsi_count == 1) {
6310 struct ice_fltr_info tmp_fltr;
6313 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6315 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6316 return ICE_ERR_OUT_OF_RANGE;
6318 /* Make sure VSI list is empty before removing it below */
6319 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6321 ice_aqc_opc_update_sw_rules,
6325 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6326 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6327 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6328 tmp_fltr.fwd_id.hw_vsi_id =
6329 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6330 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6331 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6333 /* Update the previous switch rule of "MAC forward to VSI" to
6334 * "MAC fwd to VSI list"
6336 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6338 ice_debug(hw, ICE_DBG_SW,
6339 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6340 tmp_fltr.fwd_id.hw_vsi_id, status);
6344 /* Remove the VSI list since it is no longer used */
6345 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6347 ice_debug(hw, ICE_DBG_SW,
6348 "Failed to remove VSI list %d, error %d\n",
6349 vsi_list_id, status);
6353 LIST_DEL(&vsi_list_info->list_entry);
6354 ice_free(hw, vsi_list_info);
6355 fm_list->vsi_list_info = NULL;
6362 * ice_rem_adv_rule - removes existing advanced switch rule
6363 * @hw: pointer to the hardware structure
6364 * @lkups: information on the words that needs to be looked up. All words
6365 * together makes one recipe
6366 * @lkups_cnt: num of entries in the lkups array
6367 * @rinfo: Its the pointer to the rule information for the rule
6369 * This function can be used to remove 1 rule at a time. The lkups is
6370 * used to describe all the words that forms the "lookup" portion of the
6371 * rule. These words can span multiple protocols. Callers to this function
6372 * need to pass in a list of protocol headers with lookup information along
6373 * and mask that determines which words are valid from the given protocol
6374 * header. rinfo describes other information related to this rule such as
6375 * forwarding IDs, priority of this rule, etc.
6378 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6379 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6381 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6382 struct ice_prot_lkup_ext lkup_exts;
6383 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6384 enum ice_status status = ICE_SUCCESS;
6385 bool remove_rule = false;
6386 u16 i, rid, vsi_handle;
6388 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6389 for (i = 0; i < lkups_cnt; i++) {
6392 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6395 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6400 /* Create any special protocol/offset pairs, such as looking at tunnel
6401 * bits by extracting metadata
6403 status = ice_add_special_words(rinfo, &lkup_exts);
6407 rid = ice_find_recp(hw, &lkup_exts);
6408 /* If did not find a recipe that match the existing criteria */
6409 if (rid == ICE_MAX_NUM_RECIPES)
6410 return ICE_ERR_PARAM;
6412 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6413 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6414 /* the rule is already removed */
6417 ice_acquire_lock(rule_lock);
6418 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6420 } else if (list_elem->vsi_count > 1) {
6421 list_elem->vsi_list_info->ref_cnt--;
6422 remove_rule = false;
6423 vsi_handle = rinfo->sw_act.vsi_handle;
6424 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6426 vsi_handle = rinfo->sw_act.vsi_handle;
6427 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6429 ice_release_lock(rule_lock);
6432 if (list_elem->vsi_count == 0)
6435 ice_release_lock(rule_lock);
6437 struct ice_aqc_sw_rules_elem *s_rule;
6440 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6442 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6445 return ICE_ERR_NO_MEMORY;
6446 s_rule->pdata.lkup_tx_rx.act = 0;
6447 s_rule->pdata.lkup_tx_rx.index =
6448 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6449 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6450 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6452 ice_aqc_opc_remove_sw_rules, NULL);
6453 if (status == ICE_SUCCESS) {
6454 ice_acquire_lock(rule_lock);
6455 LIST_DEL(&list_elem->list_entry);
6456 ice_free(hw, list_elem->lkups);
6457 ice_free(hw, list_elem);
6458 ice_release_lock(rule_lock);
6460 ice_free(hw, s_rule);
6466 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6467 * @hw: pointer to the hardware structure
6468 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6470 * This function is used to remove 1 rule at a time. The removal is based on
6471 * the remove_entry parameter. This function will remove rule for a given
6472 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6475 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6476 struct ice_rule_query_data *remove_entry)
6478 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6479 struct LIST_HEAD_TYPE *list_head;
6480 struct ice_adv_rule_info rinfo;
6481 struct ice_switch_info *sw;
6483 sw = hw->switch_info;
6484 if (!sw->recp_list[remove_entry->rid].recp_created)
6485 return ICE_ERR_PARAM;
6486 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6487 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6489 if (list_itr->rule_info.fltr_rule_id ==
6490 remove_entry->rule_id) {
6491 rinfo = list_itr->rule_info;
6492 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6493 return ice_rem_adv_rule(hw, list_itr->lkups,
6494 list_itr->lkups_cnt, &rinfo);
6497 return ICE_ERR_PARAM;
6501 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6503 * @hw: pointer to the hardware structure
6504 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6506 * This function is used to remove all the rules for a given VSI and as soon
6507 * as removing a rule fails, it will return immediately with the error code,
6508 * else it will return ICE_SUCCESS
6511 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6513 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6514 struct ice_vsi_list_map_info *map_info;
6515 struct LIST_HEAD_TYPE *list_head;
6516 struct ice_adv_rule_info rinfo;
6517 struct ice_switch_info *sw;
6518 enum ice_status status;
6519 u16 vsi_list_id = 0;
6522 sw = hw->switch_info;
6523 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6524 if (!sw->recp_list[rid].recp_created)
6526 if (!sw->recp_list[rid].adv_rule)
6528 list_head = &sw->recp_list[rid].filt_rules;
6530 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6531 ice_adv_fltr_mgmt_list_entry, list_entry) {
6532 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6536 rinfo = list_itr->rule_info;
6537 rinfo.sw_act.vsi_handle = vsi_handle;
6538 status = ice_rem_adv_rule(hw, list_itr->lkups,
6539 list_itr->lkups_cnt, &rinfo);
6549 * ice_replay_fltr - Replay all the filters stored by a specific list head
6550 * @hw: pointer to the hardware structure
6551 * @list_head: list for which filters needs to be replayed
6552 * @recp_id: Recipe ID for which rules need to be replayed
6554 static enum ice_status
6555 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6557 struct ice_fltr_mgmt_list_entry *itr;
6558 struct LIST_HEAD_TYPE l_head;
6559 enum ice_status status = ICE_SUCCESS;
6561 if (LIST_EMPTY(list_head))
6564 /* Move entries from the given list_head to a temporary l_head so that
6565 * they can be replayed. Otherwise when trying to re-add the same
6566 * filter, the function will return already exists
6568 LIST_REPLACE_INIT(list_head, &l_head);
6570 /* Mark the given list_head empty by reinitializing it so filters
6571 * could be added again by *handler
6573 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6575 struct ice_fltr_list_entry f_entry;
6577 f_entry.fltr_info = itr->fltr_info;
6578 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6579 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6580 if (status != ICE_SUCCESS)
6585 /* Add a filter per VSI separately */
6590 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6592 if (!ice_is_vsi_valid(hw, vsi_handle))
6595 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6596 f_entry.fltr_info.vsi_handle = vsi_handle;
6597 f_entry.fltr_info.fwd_id.hw_vsi_id =
6598 ice_get_hw_vsi_num(hw, vsi_handle);
6599 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6600 if (recp_id == ICE_SW_LKUP_VLAN)
6601 status = ice_add_vlan_internal(hw, &f_entry);
6603 status = ice_add_rule_internal(hw, recp_id,
6605 if (status != ICE_SUCCESS)
6610 /* Clear the filter management list */
6611 ice_rem_sw_rule_info(hw, &l_head);
6616 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6617 * @hw: pointer to the hardware structure
6619 * NOTE: This function does not clean up partially added filters on error.
6620 * It is up to caller of the function to issue a reset or fail early.
6622 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6624 struct ice_switch_info *sw = hw->switch_info;
6625 enum ice_status status = ICE_SUCCESS;
6628 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6629 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6631 status = ice_replay_fltr(hw, i, head);
6632 if (status != ICE_SUCCESS)
6639 * ice_replay_vsi_fltr - Replay filters for requested VSI
6640 * @hw: pointer to the hardware structure
6641 * @vsi_handle: driver VSI handle
6642 * @recp_id: Recipe ID for which rules need to be replayed
6643 * @list_head: list for which filters need to be replayed
6645 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6646 * It is required to pass valid VSI handle.
6648 static enum ice_status
6649 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6650 struct LIST_HEAD_TYPE *list_head)
6652 struct ice_fltr_mgmt_list_entry *itr;
6653 enum ice_status status = ICE_SUCCESS;
6656 if (LIST_EMPTY(list_head))
6658 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6660 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6662 struct ice_fltr_list_entry f_entry;
6664 f_entry.fltr_info = itr->fltr_info;
6665 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6666 itr->fltr_info.vsi_handle == vsi_handle) {
6667 /* update the src in case it is VSI num */
6668 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6669 f_entry.fltr_info.src = hw_vsi_id;
6670 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6671 if (status != ICE_SUCCESS)
6675 if (!itr->vsi_list_info ||
6676 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6678 /* Clearing it so that the logic can add it back */
6679 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6680 f_entry.fltr_info.vsi_handle = vsi_handle;
6681 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6682 /* update the src in case it is VSI num */
6683 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6684 f_entry.fltr_info.src = hw_vsi_id;
6685 if (recp_id == ICE_SW_LKUP_VLAN)
6686 status = ice_add_vlan_internal(hw, &f_entry);
6688 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6689 if (status != ICE_SUCCESS)
6697 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6698 * @hw: pointer to the hardware structure
6699 * @vsi_handle: driver VSI handle
6700 * @list_head: list for which filters need to be replayed
6702 * Replay the advanced rule for the given VSI.
6704 static enum ice_status
6705 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6706 struct LIST_HEAD_TYPE *list_head)
6708 struct ice_rule_query_data added_entry = { 0 };
6709 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6710 enum ice_status status = ICE_SUCCESS;
6712 if (LIST_EMPTY(list_head))
6714 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6716 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6717 u16 lk_cnt = adv_fltr->lkups_cnt;
6719 if (vsi_handle != rinfo->sw_act.vsi_handle)
6721 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6730 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6731 * @hw: pointer to the hardware structure
6732 * @vsi_handle: driver VSI handle
6734 * Replays filters for requested VSI via vsi_handle.
6736 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6738 struct ice_switch_info *sw = hw->switch_info;
6739 enum ice_status status;
6742 /* Update the recipes that were created */
6743 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6744 struct LIST_HEAD_TYPE *head;
6746 head = &sw->recp_list[i].filt_replay_rules;
6747 if (!sw->recp_list[i].adv_rule)
6748 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6750 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6751 if (status != ICE_SUCCESS)
6759 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6760 * @hw: pointer to the HW struct
6762 * Deletes the filter replay rules.
6764 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6766 struct ice_switch_info *sw = hw->switch_info;
6772 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6773 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6774 struct LIST_HEAD_TYPE *l_head;
6776 l_head = &sw->recp_list[i].filt_replay_rules;
6777 if (!sw->recp_list[i].adv_rule)
6778 ice_rem_sw_rule_info(hw, l_head);
6780 ice_rem_adv_rule_info(hw, l_head);