1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
108 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
120 u8 dummy_gre_udp_packet[] = {
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_OL 12 */
127 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x2F, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
141 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
148 0x00, 0x08, 0x00, 0x00,
152 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
154 { ICE_ETYPE_OL, 12 },
155 { ICE_IPV4_OFOS, 14 },
159 { ICE_VXLAN_GPE, 42 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
212 { ICE_VXLAN_GPE, 42 },
215 { ICE_UDP_ILOS, 84 },
216 { ICE_PROTOCOL_LAST, 0 },
220 u8 dummy_udp_tun_udp_packet[] = {
221 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x08, 0x00, /* ICE_ETYPE_OL 12 */
227 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
228 0x00, 0x01, 0x00, 0x00,
229 0x00, 0x11, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
234 0x00, 0x3a, 0x00, 0x00,
236 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
237 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
251 0x00, 0x08, 0x00, 0x00,
255 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
257 { ICE_ETYPE_OL, 12 },
258 { ICE_IPV4_OFOS, 14 },
259 { ICE_UDP_ILOS, 34 },
260 { ICE_PROTOCOL_LAST, 0 },
264 dummy_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x08, 0x00, /* ICE_ETYPE_OL 12 */
271 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
272 0x00, 0x01, 0x00, 0x00,
273 0x00, 0x11, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
278 0x00, 0x08, 0x00, 0x00,
280 0x00, 0x00, /* 2 bytes for 4 byte alignment */
284 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
286 { ICE_ETYPE_OL, 12 },
287 { ICE_IPV4_OFOS, 14 },
289 { ICE_PROTOCOL_LAST, 0 },
293 dummy_tcp_packet[] = {
294 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
295 0x00, 0x00, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00,
298 0x08, 0x00, /* ICE_ETYPE_OL 12 */
300 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
301 0x00, 0x01, 0x00, 0x00,
302 0x00, 0x06, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
307 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00,
309 0x50, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, /* 2 bytes for 4 byte alignment */
316 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
318 { ICE_ETYPE_OL, 12 },
319 { ICE_IPV6_OFOS, 14 },
321 { ICE_PROTOCOL_LAST, 0 },
325 dummy_tcp_ipv6_packet[] = {
326 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
332 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
333 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
346 0x50, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, /* 2 bytes for 4 byte alignment */
353 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
355 { ICE_ETYPE_OL, 12 },
356 { ICE_IPV6_OFOS, 14 },
357 { ICE_UDP_ILOS, 54 },
358 { ICE_PROTOCOL_LAST, 0 },
362 dummy_udp_ipv6_packet[] = {
363 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
364 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
367 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
369 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
370 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
381 0x00, 0x08, 0x00, 0x00,
383 0x00, 0x00, /* 2 bytes for 4 byte alignment */
387 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
389 { ICE_IPV4_OFOS, 14 },
392 { ICE_PROTOCOL_LAST, 0 },
396 dummy_udp_gtp_packet[] = {
397 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
402 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x11, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
409 0x00, 0x1c, 0x00, 0x00,
411 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
412 0x00, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x85,
415 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
416 0x00, 0x00, 0x00, 0x00,
420 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
422 { ICE_VLAN_OFOS, 14},
424 { ICE_PROTOCOL_LAST, 0 },
428 dummy_pppoe_packet[] = {
429 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
430 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
436 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 18 */
437 0x00, 0x4e, 0x00, 0x21,
439 0x45, 0x00, 0x00, 0x30, /* PDU */
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x11, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, /* 2 bytes for 4 byte alignment */
448 /* this is a recipe to profile association bitmap */
449 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
450 ICE_MAX_NUM_PROFILES);
452 /* this is a profile to recipe association bitmap */
453 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
454 ICE_MAX_NUM_RECIPES);
456 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
459 * ice_collect_result_idx - copy result index values
460 * @buf: buffer that contains the result index
461 * @recp: the recipe struct to copy data into
463 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
464 struct ice_sw_recipe *recp)
466 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
467 ice_set_bit(buf->content.result_indx &
468 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
472 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
473 * @hw: pointer to hardware structure
474 * @recps: struct that we need to populate
475 * @rid: recipe ID that we are populating
476 * @refresh_required: true if we should get recipe to profile mapping from FW
478 * This function is used to populate all the necessary entries into our
479 * bookkeeping so that we have a current list of all the recipes that are
480 * programmed in the firmware.
482 static enum ice_status
483 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
484 bool *refresh_required)
486 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
487 struct ice_aqc_recipe_data_elem *tmp;
488 u16 num_recps = ICE_MAX_NUM_RECIPES;
489 struct ice_prot_lkup_ext *lkup_exts;
490 u16 i, sub_recps, fv_word_idx = 0;
491 enum ice_status status;
493 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
495 /* we need a buffer big enough to accommodate all the recipes */
496 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
497 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
499 return ICE_ERR_NO_MEMORY;
501 tmp[0].recipe_indx = rid;
502 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
503 /* non-zero status meaning recipe doesn't exist */
507 /* Get recipe to profile map so that we can get the fv from lkups that
508 * we read for a recipe from FW. Since we want to minimize the number of
509 * times we make this FW call, just make one call and cache the copy
510 * until a new recipe is added. This operation is only required the
511 * first time to get the changes from FW. Then to search existing
512 * entries we don't need to update the cache again until another recipe
515 if (*refresh_required) {
516 ice_get_recp_to_prof_map(hw);
517 *refresh_required = false;
520 /* Start populating all the entries for recps[rid] based on lkups from
521 * firmware. Note that we are only creating the root recipe in our
524 lkup_exts = &recps[rid].lkup_exts;
526 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
527 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
528 struct ice_recp_grp_entry *rg_entry;
529 u8 prof, idx, prot = 0;
533 rg_entry = (struct ice_recp_grp_entry *)
534 ice_malloc(hw, sizeof(*rg_entry));
536 status = ICE_ERR_NO_MEMORY;
540 idx = root_bufs.recipe_indx;
541 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
543 /* Mark all result indices in this chain */
544 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
545 ice_set_bit(root_bufs.content.result_indx &
546 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
548 /* get the first profile that is associated with rid */
549 prof = ice_find_first_bit(recipe_to_profile[idx],
550 ICE_MAX_NUM_PROFILES);
551 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
552 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
554 rg_entry->fv_idx[i] = lkup_indx;
555 rg_entry->fv_mask[i] =
556 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
558 /* If the recipe is a chained recipe then all its
559 * child recipe's result will have a result index.
560 * To fill fv_words we should not use those result
561 * index, we only need the protocol ids and offsets.
562 * We will skip all the fv_idx which stores result
563 * index in them. We also need to skip any fv_idx which
564 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
565 * valid offset value.
567 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
568 rg_entry->fv_idx[i]) ||
569 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
570 rg_entry->fv_idx[i] == 0)
573 ice_find_prot_off(hw, ICE_BLK_SW, prof,
574 rg_entry->fv_idx[i], &prot, &off);
575 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
576 lkup_exts->fv_words[fv_word_idx].off = off;
579 /* populate rg_list with the data from the child entry of this
582 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
584 /* Propagate some data to the recipe database */
585 recps[idx].is_root = is_root;
586 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
587 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
588 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
589 recps[idx].chain_idx = root_bufs.content.result_indx &
590 ~ICE_AQ_RECIPE_RESULT_EN;
591 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
593 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
599 /* Only do the following for root recipes entries */
600 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
601 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
602 recps[idx].root_rid = root_bufs.content.rid &
603 ~ICE_AQ_RECIPE_ID_IS_ROOT;
604 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
607 /* Complete initialization of the root recipe entry */
608 lkup_exts->n_val_words = fv_word_idx;
609 recps[rid].big_recp = (num_recps > 1);
610 recps[rid].n_grp_count = num_recps;
611 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
612 ice_memdup(hw, tmp, recps[rid].n_grp_count *
613 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
614 if (!recps[rid].root_buf)
617 /* Copy result indexes */
618 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
619 ICE_NONDMA_TO_NONDMA);
620 recps[rid].recp_created = true;
628 * ice_get_recp_to_prof_map - updates recipe to profile mapping
629 * @hw: pointer to hardware structure
631 * This function is used to populate recipe_to_profile matrix where index to
632 * this array is the recipe ID and the element is the mapping of which profiles
633 * is this recipe mapped to.
636 ice_get_recp_to_prof_map(struct ice_hw *hw)
638 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
641 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
644 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
645 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
646 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
648 ice_memcpy(profile_to_recipe[i], r_bitmap,
649 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
650 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
651 if (ice_is_bit_set(r_bitmap, j))
652 ice_set_bit(i, recipe_to_profile[j]);
657 * ice_init_def_sw_recp - initialize the recipe book keeping tables
658 * @hw: pointer to the HW struct
660 * Allocate memory for the entire recipe table and initialize the structures/
661 * entries corresponding to basic recipes.
663 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
665 struct ice_sw_recipe *recps;
668 recps = (struct ice_sw_recipe *)
669 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
671 return ICE_ERR_NO_MEMORY;
673 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
674 recps[i].root_rid = i;
675 INIT_LIST_HEAD(&recps[i].filt_rules);
676 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
677 INIT_LIST_HEAD(&recps[i].rg_list);
678 ice_init_lock(&recps[i].filt_rule_lock);
681 hw->switch_info->recp_list = recps;
687 * ice_aq_get_sw_cfg - get switch configuration
688 * @hw: pointer to the hardware structure
689 * @buf: pointer to the result buffer
690 * @buf_size: length of the buffer available for response
691 * @req_desc: pointer to requested descriptor
692 * @num_elems: pointer to number of elements
693 * @cd: pointer to command details structure or NULL
695 * Get switch configuration (0x0200) to be placed in 'buff'.
696 * This admin command returns information such as initial VSI/port number
697 * and switch ID it belongs to.
699 * NOTE: *req_desc is both an input/output parameter.
700 * The caller of this function first calls this function with *request_desc set
701 * to 0. If the response from f/w has *req_desc set to 0, all the switch
702 * configuration information has been returned; if non-zero (meaning not all
703 * the information was returned), the caller should call this function again
704 * with *req_desc set to the previous value returned by f/w to get the
705 * next block of switch configuration information.
707 * *num_elems is output only parameter. This reflects the number of elements
708 * in response buffer. The caller of this function to use *num_elems while
709 * parsing the response buffer.
711 static enum ice_status
712 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
713 u16 buf_size, u16 *req_desc, u16 *num_elems,
714 struct ice_sq_cd *cd)
716 struct ice_aqc_get_sw_cfg *cmd;
717 enum ice_status status;
718 struct ice_aq_desc desc;
720 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
721 cmd = &desc.params.get_sw_conf;
722 cmd->element = CPU_TO_LE16(*req_desc);
724 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
726 *req_desc = LE16_TO_CPU(cmd->element);
727 *num_elems = LE16_TO_CPU(cmd->num_elems);
734 * ice_alloc_sw - allocate resources specific to switch
735 * @hw: pointer to the HW struct
736 * @ena_stats: true to turn on VEB stats
737 * @shared_res: true for shared resource, false for dedicated resource
738 * @sw_id: switch ID returned
739 * @counter_id: VEB counter ID returned
741 * allocates switch resources (SWID and VEB counter) (0x0208)
744 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
747 struct ice_aqc_alloc_free_res_elem *sw_buf;
748 struct ice_aqc_res_elem *sw_ele;
749 enum ice_status status;
752 buf_len = sizeof(*sw_buf);
753 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
754 ice_malloc(hw, buf_len);
756 return ICE_ERR_NO_MEMORY;
758 /* Prepare buffer for switch ID.
759 * The number of resource entries in buffer is passed as 1 since only a
760 * single switch/VEB instance is allocated, and hence a single sw_id
763 sw_buf->num_elems = CPU_TO_LE16(1);
765 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
766 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
767 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
769 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
770 ice_aqc_opc_alloc_res, NULL);
773 goto ice_alloc_sw_exit;
775 sw_ele = &sw_buf->elem[0];
776 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
779 /* Prepare buffer for VEB Counter */
780 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
781 struct ice_aqc_alloc_free_res_elem *counter_buf;
782 struct ice_aqc_res_elem *counter_ele;
784 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
785 ice_malloc(hw, buf_len);
787 status = ICE_ERR_NO_MEMORY;
788 goto ice_alloc_sw_exit;
791 /* The number of resource entries in buffer is passed as 1 since
792 * only a single switch/VEB instance is allocated, and hence a
793 * single VEB counter is requested.
795 counter_buf->num_elems = CPU_TO_LE16(1);
796 counter_buf->res_type =
797 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
798 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
799 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
803 ice_free(hw, counter_buf);
804 goto ice_alloc_sw_exit;
806 counter_ele = &counter_buf->elem[0];
807 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
808 ice_free(hw, counter_buf);
812 ice_free(hw, sw_buf);
817 * ice_free_sw - free resources specific to switch
818 * @hw: pointer to the HW struct
819 * @sw_id: switch ID returned
820 * @counter_id: VEB counter ID returned
822 * free switch resources (SWID and VEB counter) (0x0209)
824 * NOTE: This function frees multiple resources. It continues
825 * releasing other resources even after it encounters error.
826 * The error code returned is the last error it encountered.
828 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
830 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
831 enum ice_status status, ret_status;
834 buf_len = sizeof(*sw_buf);
835 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
836 ice_malloc(hw, buf_len);
838 return ICE_ERR_NO_MEMORY;
840 /* Prepare buffer to free for switch ID res.
841 * The number of resource entries in buffer is passed as 1 since only a
842 * single switch/VEB instance is freed, and hence a single sw_id
845 sw_buf->num_elems = CPU_TO_LE16(1);
846 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
847 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
849 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
850 ice_aqc_opc_free_res, NULL);
853 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
855 /* Prepare buffer to free for VEB Counter resource */
856 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
857 ice_malloc(hw, buf_len);
859 ice_free(hw, sw_buf);
860 return ICE_ERR_NO_MEMORY;
863 /* The number of resource entries in buffer is passed as 1 since only a
864 * single switch/VEB instance is freed, and hence a single VEB counter
867 counter_buf->num_elems = CPU_TO_LE16(1);
868 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
869 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
871 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
872 ice_aqc_opc_free_res, NULL);
874 ice_debug(hw, ICE_DBG_SW,
875 "VEB counter resource could not be freed\n");
879 ice_free(hw, counter_buf);
880 ice_free(hw, sw_buf);
886 * @hw: pointer to the HW struct
887 * @vsi_ctx: pointer to a VSI context struct
888 * @cd: pointer to command details structure or NULL
890 * Add a VSI context to the hardware (0x0210)
893 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
894 struct ice_sq_cd *cd)
896 struct ice_aqc_add_update_free_vsi_resp *res;
897 struct ice_aqc_add_get_update_free_vsi *cmd;
898 struct ice_aq_desc desc;
899 enum ice_status status;
901 cmd = &desc.params.vsi_cmd;
902 res = &desc.params.add_update_free_vsi_res;
904 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
906 if (!vsi_ctx->alloc_from_pool)
907 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
908 ICE_AQ_VSI_IS_VALID);
910 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
912 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
914 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
915 sizeof(vsi_ctx->info), cd);
918 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
919 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
920 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
928 * @hw: pointer to the HW struct
929 * @vsi_ctx: pointer to a VSI context struct
930 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
931 * @cd: pointer to command details structure or NULL
933 * Free VSI context info from hardware (0x0213)
936 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
937 bool keep_vsi_alloc, struct ice_sq_cd *cd)
939 struct ice_aqc_add_update_free_vsi_resp *resp;
940 struct ice_aqc_add_get_update_free_vsi *cmd;
941 struct ice_aq_desc desc;
942 enum ice_status status;
944 cmd = &desc.params.vsi_cmd;
945 resp = &desc.params.add_update_free_vsi_res;
947 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
949 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
951 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
953 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
955 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
956 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
964 * @hw: pointer to the HW struct
965 * @vsi_ctx: pointer to a VSI context struct
966 * @cd: pointer to command details structure or NULL
968 * Update VSI context in the hardware (0x0211)
971 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
972 struct ice_sq_cd *cd)
974 struct ice_aqc_add_update_free_vsi_resp *resp;
975 struct ice_aqc_add_get_update_free_vsi *cmd;
976 struct ice_aq_desc desc;
977 enum ice_status status;
979 cmd = &desc.params.vsi_cmd;
980 resp = &desc.params.add_update_free_vsi_res;
982 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
984 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
986 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
988 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
989 sizeof(vsi_ctx->info), cd);
992 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
993 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1000 * ice_is_vsi_valid - check whether the VSI is valid or not
1001 * @hw: pointer to the HW struct
1002 * @vsi_handle: VSI handle
1004 * check whether the VSI is valid or not
1006 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1008 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1012 * ice_get_hw_vsi_num - return the HW VSI number
1013 * @hw: pointer to the HW struct
1014 * @vsi_handle: VSI handle
1016 * return the HW VSI number
1017 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1019 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1021 return hw->vsi_ctx[vsi_handle]->vsi_num;
1025 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1026 * @hw: pointer to the HW struct
1027 * @vsi_handle: VSI handle
1029 * return the VSI context entry for a given VSI handle
1031 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1033 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1037 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1038 * @hw: pointer to the HW struct
1039 * @vsi_handle: VSI handle
1040 * @vsi: VSI context pointer
1042 * save the VSI context entry for a given VSI handle
1045 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1047 hw->vsi_ctx[vsi_handle] = vsi;
1051 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1052 * @hw: pointer to the HW struct
1053 * @vsi_handle: VSI handle
1055 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1057 struct ice_vsi_ctx *vsi;
1060 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1063 ice_for_each_traffic_class(i) {
1064 if (vsi->lan_q_ctx[i]) {
1065 ice_free(hw, vsi->lan_q_ctx[i]);
1066 vsi->lan_q_ctx[i] = NULL;
1072 * ice_clear_vsi_ctx - clear the VSI context entry
1073 * @hw: pointer to the HW struct
1074 * @vsi_handle: VSI handle
1076 * clear the VSI context entry
1078 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1080 struct ice_vsi_ctx *vsi;
1082 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1084 ice_clear_vsi_q_ctx(hw, vsi_handle);
1086 hw->vsi_ctx[vsi_handle] = NULL;
1091 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1092 * @hw: pointer to the HW struct
1094 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1098 for (i = 0; i < ICE_MAX_VSI; i++)
1099 ice_clear_vsi_ctx(hw, i);
1103 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1104 * @hw: pointer to the HW struct
1105 * @vsi_handle: unique VSI handle provided by drivers
1106 * @vsi_ctx: pointer to a VSI context struct
1107 * @cd: pointer to command details structure or NULL
1109 * Add a VSI context to the hardware also add it into the VSI handle list.
1110 * If this function gets called after reset for existing VSIs then update
1111 * with the new HW VSI number in the corresponding VSI handle list entry.
1114 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1115 struct ice_sq_cd *cd)
1117 struct ice_vsi_ctx *tmp_vsi_ctx;
1118 enum ice_status status;
1120 if (vsi_handle >= ICE_MAX_VSI)
1121 return ICE_ERR_PARAM;
1122 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1125 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1127 /* Create a new VSI context */
1128 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1129 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1131 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1132 return ICE_ERR_NO_MEMORY;
1134 *tmp_vsi_ctx = *vsi_ctx;
1136 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1138 /* update with new HW VSI num */
1139 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1146 * ice_free_vsi- free VSI context from hardware and VSI handle list
1147 * @hw: pointer to the HW struct
1148 * @vsi_handle: unique VSI handle
1149 * @vsi_ctx: pointer to a VSI context struct
1150 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1151 * @cd: pointer to command details structure or NULL
1153 * Free VSI context info from hardware as well as from VSI handle list
1156 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1157 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1159 enum ice_status status;
1161 if (!ice_is_vsi_valid(hw, vsi_handle))
1162 return ICE_ERR_PARAM;
1163 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1164 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1166 ice_clear_vsi_ctx(hw, vsi_handle);
1172 * @hw: pointer to the HW struct
1173 * @vsi_handle: unique VSI handle
1174 * @vsi_ctx: pointer to a VSI context struct
1175 * @cd: pointer to command details structure or NULL
1177 * Update VSI context in the hardware
1180 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1181 struct ice_sq_cd *cd)
1183 if (!ice_is_vsi_valid(hw, vsi_handle))
1184 return ICE_ERR_PARAM;
1185 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1186 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1190 * ice_aq_get_vsi_params
1191 * @hw: pointer to the HW struct
1192 * @vsi_ctx: pointer to a VSI context struct
1193 * @cd: pointer to command details structure or NULL
1195 * Get VSI context info from hardware (0x0212)
1198 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1199 struct ice_sq_cd *cd)
1201 struct ice_aqc_add_get_update_free_vsi *cmd;
1202 struct ice_aqc_get_vsi_resp *resp;
1203 struct ice_aq_desc desc;
1204 enum ice_status status;
1206 cmd = &desc.params.vsi_cmd;
1207 resp = &desc.params.get_vsi_resp;
1209 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1211 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1213 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1214 sizeof(vsi_ctx->info), cd);
1216 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1218 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1219 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1226 * ice_aq_add_update_mir_rule - add/update a mirror rule
1227 * @hw: pointer to the HW struct
1228 * @rule_type: Rule Type
1229 * @dest_vsi: VSI number to which packets will be mirrored
1230 * @count: length of the list
1231 * @mr_buf: buffer for list of mirrored VSI numbers
1232 * @cd: pointer to command details structure or NULL
1235 * Add/Update Mirror Rule (0x260).
1238 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1239 u16 count, struct ice_mir_rule_buf *mr_buf,
1240 struct ice_sq_cd *cd, u16 *rule_id)
1242 struct ice_aqc_add_update_mir_rule *cmd;
1243 struct ice_aq_desc desc;
1244 enum ice_status status;
1245 __le16 *mr_list = NULL;
1248 switch (rule_type) {
1249 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1250 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1251 /* Make sure count and mr_buf are set for these rule_types */
1252 if (!(count && mr_buf))
1253 return ICE_ERR_PARAM;
1255 buf_size = count * sizeof(__le16);
1256 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1258 return ICE_ERR_NO_MEMORY;
1260 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1261 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1262 /* Make sure count and mr_buf are not set for these
1265 if (count || mr_buf)
1266 return ICE_ERR_PARAM;
1269 ice_debug(hw, ICE_DBG_SW,
1270 "Error due to unsupported rule_type %u\n", rule_type);
1271 return ICE_ERR_OUT_OF_RANGE;
1274 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1276 /* Pre-process 'mr_buf' items for add/update of virtual port
1277 * ingress/egress mirroring (but not physical port ingress/egress
1283 for (i = 0; i < count; i++) {
1286 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1288 /* Validate specified VSI number, make sure it is less
1289 * than ICE_MAX_VSI, if not return with error.
1291 if (id >= ICE_MAX_VSI) {
1292 ice_debug(hw, ICE_DBG_SW,
1293 "Error VSI index (%u) out-of-range\n",
1295 ice_free(hw, mr_list);
1296 return ICE_ERR_OUT_OF_RANGE;
1299 /* add VSI to mirror rule */
1302 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1303 else /* remove VSI from mirror rule */
1304 mr_list[i] = CPU_TO_LE16(id);
1308 cmd = &desc.params.add_update_rule;
1309 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1310 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1311 ICE_AQC_RULE_ID_VALID_M);
1312 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1313 cmd->num_entries = CPU_TO_LE16(count);
1314 cmd->dest = CPU_TO_LE16(dest_vsi);
1316 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1318 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1320 ice_free(hw, mr_list);
1326 * ice_aq_delete_mir_rule - delete a mirror rule
1327 * @hw: pointer to the HW struct
1328 * @rule_id: Mirror rule ID (to be deleted)
1329 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1330 * otherwise it is returned to the shared pool
1331 * @cd: pointer to command details structure or NULL
1333 * Delete Mirror Rule (0x261).
1336 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1337 struct ice_sq_cd *cd)
1339 struct ice_aqc_delete_mir_rule *cmd;
1340 struct ice_aq_desc desc;
1342 /* rule_id should be in the range 0...63 */
1343 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1344 return ICE_ERR_OUT_OF_RANGE;
1346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1348 cmd = &desc.params.del_rule;
1349 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1350 cmd->rule_id = CPU_TO_LE16(rule_id);
1353 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1355 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1359 * ice_aq_alloc_free_vsi_list
1360 * @hw: pointer to the HW struct
1361 * @vsi_list_id: VSI list ID returned or used for lookup
1362 * @lkup_type: switch rule filter lookup type
1363 * @opc: switch rules population command type - pass in the command opcode
1365 * allocates or free a VSI list resource
1367 static enum ice_status
1368 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1369 enum ice_sw_lkup_type lkup_type,
1370 enum ice_adminq_opc opc)
1372 struct ice_aqc_alloc_free_res_elem *sw_buf;
1373 struct ice_aqc_res_elem *vsi_ele;
1374 enum ice_status status;
1377 buf_len = sizeof(*sw_buf);
1378 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1379 ice_malloc(hw, buf_len);
1381 return ICE_ERR_NO_MEMORY;
1382 sw_buf->num_elems = CPU_TO_LE16(1);
1384 if (lkup_type == ICE_SW_LKUP_MAC ||
1385 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1386 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1387 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1388 lkup_type == ICE_SW_LKUP_PROMISC ||
1389 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1390 lkup_type == ICE_SW_LKUP_LAST) {
1391 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1392 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1394 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1396 status = ICE_ERR_PARAM;
1397 goto ice_aq_alloc_free_vsi_list_exit;
1400 if (opc == ice_aqc_opc_free_res)
1401 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1403 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1405 goto ice_aq_alloc_free_vsi_list_exit;
1407 if (opc == ice_aqc_opc_alloc_res) {
1408 vsi_ele = &sw_buf->elem[0];
1409 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1412 ice_aq_alloc_free_vsi_list_exit:
1413 ice_free(hw, sw_buf);
1418 * ice_aq_set_storm_ctrl - Sets storm control configuration
1419 * @hw: pointer to the HW struct
1420 * @bcast_thresh: represents the upper threshold for broadcast storm control
1421 * @mcast_thresh: represents the upper threshold for multicast storm control
1422 * @ctl_bitmask: storm control control knobs
1424 * Sets the storm control configuration (0x0280)
1427 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1430 struct ice_aqc_storm_cfg *cmd;
1431 struct ice_aq_desc desc;
1433 cmd = &desc.params.storm_conf;
1435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1437 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1438 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1439 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1441 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1445 * ice_aq_get_storm_ctrl - gets storm control configuration
1446 * @hw: pointer to the HW struct
1447 * @bcast_thresh: represents the upper threshold for broadcast storm control
1448 * @mcast_thresh: represents the upper threshold for multicast storm control
1449 * @ctl_bitmask: storm control control knobs
1451 * Gets the storm control configuration (0x0281)
1454 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1457 enum ice_status status;
1458 struct ice_aq_desc desc;
1460 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1462 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1464 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1467 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1470 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1473 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1480 * ice_aq_sw_rules - add/update/remove switch rules
1481 * @hw: pointer to the HW struct
1482 * @rule_list: pointer to switch rule population list
1483 * @rule_list_sz: total size of the rule list in bytes
1484 * @num_rules: number of switch rules in the rule_list
1485 * @opc: switch rules population command type - pass in the command opcode
1486 * @cd: pointer to command details structure or NULL
1488 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1490 static enum ice_status
1491 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1492 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1494 struct ice_aq_desc desc;
1496 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1498 if (opc != ice_aqc_opc_add_sw_rules &&
1499 opc != ice_aqc_opc_update_sw_rules &&
1500 opc != ice_aqc_opc_remove_sw_rules)
1501 return ICE_ERR_PARAM;
1503 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1505 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1506 desc.params.sw_rules.num_rules_fltr_entry_index =
1507 CPU_TO_LE16(num_rules);
1508 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1512 * ice_aq_add_recipe - add switch recipe
1513 * @hw: pointer to the HW struct
1514 * @s_recipe_list: pointer to switch rule population list
1515 * @num_recipes: number of switch recipes in the list
1516 * @cd: pointer to command details structure or NULL
1521 ice_aq_add_recipe(struct ice_hw *hw,
1522 struct ice_aqc_recipe_data_elem *s_recipe_list,
1523 u16 num_recipes, struct ice_sq_cd *cd)
1525 struct ice_aqc_add_get_recipe *cmd;
1526 struct ice_aq_desc desc;
1529 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1530 cmd = &desc.params.add_get_recipe;
1531 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1533 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1534 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1536 buf_size = num_recipes * sizeof(*s_recipe_list);
1538 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1542 * ice_aq_get_recipe - get switch recipe
1543 * @hw: pointer to the HW struct
1544 * @s_recipe_list: pointer to switch rule population list
1545 * @num_recipes: pointer to the number of recipes (input and output)
1546 * @recipe_root: root recipe number of recipe(s) to retrieve
1547 * @cd: pointer to command details structure or NULL
1551 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1552 * On output, *num_recipes will equal the number of entries returned in
1555 * The caller must supply enough space in s_recipe_list to hold all possible
1556 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1559 ice_aq_get_recipe(struct ice_hw *hw,
1560 struct ice_aqc_recipe_data_elem *s_recipe_list,
1561 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1563 struct ice_aqc_add_get_recipe *cmd;
1564 struct ice_aq_desc desc;
1565 enum ice_status status;
1568 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1569 return ICE_ERR_PARAM;
1571 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1572 cmd = &desc.params.add_get_recipe;
1573 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1575 cmd->return_index = CPU_TO_LE16(recipe_root);
1576 cmd->num_sub_recipes = 0;
1578 buf_size = *num_recipes * sizeof(*s_recipe_list);
1580 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1581 /* cppcheck-suppress constArgument */
1582 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1588 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1589 * @hw: pointer to the HW struct
1590 * @profile_id: package profile ID to associate the recipe with
1591 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1592 * @cd: pointer to command details structure or NULL
1593 * Recipe to profile association (0x0291)
1596 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1597 struct ice_sq_cd *cd)
1599 struct ice_aqc_recipe_to_profile *cmd;
1600 struct ice_aq_desc desc;
1602 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1603 cmd = &desc.params.recipe_to_profile;
1604 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1605 cmd->profile_id = CPU_TO_LE16(profile_id);
1606 /* Set the recipe ID bit in the bitmask to let the device know which
1607 * profile we are associating the recipe to
1609 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1610 ICE_NONDMA_TO_NONDMA);
1612 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1616 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1617 * @hw: pointer to the HW struct
1618 * @profile_id: package profile ID to associate the recipe with
1619 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1620 * @cd: pointer to command details structure or NULL
1621 * Associate profile ID with given recipe (0x0293)
1624 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1625 struct ice_sq_cd *cd)
1627 struct ice_aqc_recipe_to_profile *cmd;
1628 struct ice_aq_desc desc;
1629 enum ice_status status;
1631 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1632 cmd = &desc.params.recipe_to_profile;
1633 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1634 cmd->profile_id = CPU_TO_LE16(profile_id);
1636 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1638 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1639 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1645 * ice_alloc_recipe - add recipe resource
1646 * @hw: pointer to the hardware structure
1647 * @rid: recipe ID returned as response to AQ call
1649 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1651 struct ice_aqc_alloc_free_res_elem *sw_buf;
1652 enum ice_status status;
1655 buf_len = sizeof(*sw_buf);
1656 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1658 return ICE_ERR_NO_MEMORY;
1660 sw_buf->num_elems = CPU_TO_LE16(1);
1661 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1662 ICE_AQC_RES_TYPE_S) |
1663 ICE_AQC_RES_TYPE_FLAG_SHARED);
1664 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1665 ice_aqc_opc_alloc_res, NULL);
1667 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1668 ice_free(hw, sw_buf);
1673 /* ice_init_port_info - Initialize port_info with switch configuration data
1674 * @pi: pointer to port_info
1675 * @vsi_port_num: VSI number or port number
1676 * @type: Type of switch element (port or VSI)
1677 * @swid: switch ID of the switch the element is attached to
1678 * @pf_vf_num: PF or VF number
1679 * @is_vf: true if the element is a VF, false otherwise
1682 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1683 u16 swid, u16 pf_vf_num, bool is_vf)
1686 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1687 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1689 pi->pf_vf_num = pf_vf_num;
1691 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1692 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1695 ice_debug(pi->hw, ICE_DBG_SW,
1696 "incorrect VSI/port type received\n");
1701 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1702 * @hw: pointer to the hardware structure
1704 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1706 struct ice_aqc_get_sw_cfg_resp *rbuf;
1707 enum ice_status status;
1708 u16 num_total_ports;
1714 num_total_ports = 1;
1716 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1717 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1720 return ICE_ERR_NO_MEMORY;
1722 /* Multiple calls to ice_aq_get_sw_cfg may be required
1723 * to get all the switch configuration information. The need
1724 * for additional calls is indicated by ice_aq_get_sw_cfg
1725 * writing a non-zero value in req_desc
1728 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1729 &req_desc, &num_elems, NULL);
1734 for (i = 0; i < num_elems; i++) {
1735 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1736 u16 pf_vf_num, swid, vsi_port_num;
1740 ele = rbuf[i].elements;
1741 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1742 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1744 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1745 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1747 swid = LE16_TO_CPU(ele->swid);
1749 if (LE16_TO_CPU(ele->pf_vf_num) &
1750 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1753 type = LE16_TO_CPU(ele->vsi_port_num) >>
1754 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1757 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1758 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1759 if (j == num_total_ports) {
1760 ice_debug(hw, ICE_DBG_SW,
1761 "more ports than expected\n");
1762 status = ICE_ERR_CFG;
1765 ice_init_port_info(hw->port_info,
1766 vsi_port_num, type, swid,
1774 } while (req_desc && !status);
1777 ice_free(hw, (void *)rbuf);
1782 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1783 * @hw: pointer to the hardware structure
1784 * @fi: filter info structure to fill/update
1786 * This helper function populates the lb_en and lan_en elements of the provided
1787 * ice_fltr_info struct using the switch's type and characteristics of the
1788 * switch rule being configured.
1790 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1794 if ((fi->flag & ICE_FLTR_TX) &&
1795 (fi->fltr_act == ICE_FWD_TO_VSI ||
1796 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1797 fi->fltr_act == ICE_FWD_TO_Q ||
1798 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1799 /* Setting LB for prune actions will result in replicated
1800 * packets to the internal switch that will be dropped.
1802 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1805 /* Set lan_en to TRUE if
1806 * 1. The switch is a VEB AND
1808 * 2.1 The lookup is a directional lookup like ethertype,
1809 * promiscuous, ethertype-MAC, promiscuous-VLAN
1810 * and default-port OR
1811 * 2.2 The lookup is VLAN, OR
1812 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1813 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1817 * The switch is a VEPA.
1819 * In all other cases, the LAN enable has to be set to false.
1822 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1823 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1824 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1825 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1826 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1827 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1828 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1829 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1830 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1831 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1840 * ice_fill_sw_rule - Helper function to fill switch rule structure
1841 * @hw: pointer to the hardware structure
1842 * @f_info: entry containing packet forwarding information
1843 * @s_rule: switch rule structure to be filled in based on mac_entry
1844 * @opc: switch rules population command type - pass in the command opcode
1847 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1848 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1850 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1858 if (opc == ice_aqc_opc_remove_sw_rules) {
1859 s_rule->pdata.lkup_tx_rx.act = 0;
1860 s_rule->pdata.lkup_tx_rx.index =
1861 CPU_TO_LE16(f_info->fltr_rule_id);
1862 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1866 eth_hdr_sz = sizeof(dummy_eth_header);
1867 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1869 /* initialize the ether header with a dummy header */
1870 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1871 ice_fill_sw_info(hw, f_info);
1873 switch (f_info->fltr_act) {
1874 case ICE_FWD_TO_VSI:
1875 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1876 ICE_SINGLE_ACT_VSI_ID_M;
1877 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1878 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1879 ICE_SINGLE_ACT_VALID_BIT;
1881 case ICE_FWD_TO_VSI_LIST:
1882 act |= ICE_SINGLE_ACT_VSI_LIST;
1883 act |= (f_info->fwd_id.vsi_list_id <<
1884 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1885 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1886 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1887 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1888 ICE_SINGLE_ACT_VALID_BIT;
1891 act |= ICE_SINGLE_ACT_TO_Q;
1892 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1893 ICE_SINGLE_ACT_Q_INDEX_M;
1895 case ICE_DROP_PACKET:
1896 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1897 ICE_SINGLE_ACT_VALID_BIT;
1899 case ICE_FWD_TO_QGRP:
1900 q_rgn = f_info->qgrp_size > 0 ?
1901 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1902 act |= ICE_SINGLE_ACT_TO_Q;
1903 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1904 ICE_SINGLE_ACT_Q_INDEX_M;
1905 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1906 ICE_SINGLE_ACT_Q_REGION_M;
1913 act |= ICE_SINGLE_ACT_LB_ENABLE;
1915 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1917 switch (f_info->lkup_type) {
1918 case ICE_SW_LKUP_MAC:
1919 daddr = f_info->l_data.mac.mac_addr;
1921 case ICE_SW_LKUP_VLAN:
1922 vlan_id = f_info->l_data.vlan.vlan_id;
1923 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1924 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1925 act |= ICE_SINGLE_ACT_PRUNE;
1926 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1929 case ICE_SW_LKUP_ETHERTYPE_MAC:
1930 daddr = f_info->l_data.ethertype_mac.mac_addr;
1932 case ICE_SW_LKUP_ETHERTYPE:
1933 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1934 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1936 case ICE_SW_LKUP_MAC_VLAN:
1937 daddr = f_info->l_data.mac_vlan.mac_addr;
1938 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1940 case ICE_SW_LKUP_PROMISC_VLAN:
1941 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1943 case ICE_SW_LKUP_PROMISC:
1944 daddr = f_info->l_data.mac_vlan.mac_addr;
1950 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1951 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1952 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1954 /* Recipe set depending on lookup type */
1955 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1956 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1957 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1960 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1961 ICE_NONDMA_TO_NONDMA);
1963 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1964 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1965 *off = CPU_TO_BE16(vlan_id);
1968 /* Create the switch rule with the final dummy Ethernet header */
1969 if (opc != ice_aqc_opc_update_sw_rules)
1970 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1974 * ice_add_marker_act
1975 * @hw: pointer to the hardware structure
1976 * @m_ent: the management entry for which sw marker needs to be added
1977 * @sw_marker: sw marker to tag the Rx descriptor with
1978 * @l_id: large action resource ID
1980 * Create a large action to hold software marker and update the switch rule
1981 * entry pointed by m_ent with newly created large action
1983 static enum ice_status
1984 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1985 u16 sw_marker, u16 l_id)
1987 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1988 /* For software marker we need 3 large actions
1989 * 1. FWD action: FWD TO VSI or VSI LIST
1990 * 2. GENERIC VALUE action to hold the profile ID
1991 * 3. GENERIC VALUE action to hold the software marker ID
1993 const u16 num_lg_acts = 3;
1994 enum ice_status status;
2000 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2001 return ICE_ERR_PARAM;
2003 /* Create two back-to-back switch rules and submit them to the HW using
2004 * one memory buffer:
2008 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2009 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2010 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2012 return ICE_ERR_NO_MEMORY;
2014 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2016 /* Fill in the first switch rule i.e. large action */
2017 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2018 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2019 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2021 /* First action VSI forwarding or VSI list forwarding depending on how
2024 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2025 m_ent->fltr_info.fwd_id.hw_vsi_id;
2027 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2028 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2029 ICE_LG_ACT_VSI_LIST_ID_M;
2030 if (m_ent->vsi_count > 1)
2031 act |= ICE_LG_ACT_VSI_LIST;
2032 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2034 /* Second action descriptor type */
2035 act = ICE_LG_ACT_GENERIC;
2037 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2038 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2040 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2041 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2043 /* Third action Marker value */
2044 act |= ICE_LG_ACT_GENERIC;
2045 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2046 ICE_LG_ACT_GENERIC_VALUE_M;
2048 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2050 /* call the fill switch rule to fill the lookup Tx Rx structure */
2051 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2052 ice_aqc_opc_update_sw_rules);
2054 /* Update the action to point to the large action ID */
2055 rx_tx->pdata.lkup_tx_rx.act =
2056 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2057 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2058 ICE_SINGLE_ACT_PTR_VAL_M));
2060 /* Use the filter rule ID of the previously created rule with single
2061 * act. Once the update happens, hardware will treat this as large
2064 rx_tx->pdata.lkup_tx_rx.index =
2065 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2067 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2068 ice_aqc_opc_update_sw_rules, NULL);
2070 m_ent->lg_act_idx = l_id;
2071 m_ent->sw_marker_id = sw_marker;
2074 ice_free(hw, lg_act);
2079 * ice_add_counter_act - add/update filter rule with counter action
2080 * @hw: pointer to the hardware structure
2081 * @m_ent: the management entry for which counter needs to be added
2082 * @counter_id: VLAN counter ID returned as part of allocate resource
2083 * @l_id: large action resource ID
2085 static enum ice_status
2086 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2087 u16 counter_id, u16 l_id)
2089 struct ice_aqc_sw_rules_elem *lg_act;
2090 struct ice_aqc_sw_rules_elem *rx_tx;
2091 enum ice_status status;
2092 /* 2 actions will be added while adding a large action counter */
2093 const int num_acts = 2;
2100 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2101 return ICE_ERR_PARAM;
2103 /* Create two back-to-back switch rules and submit them to the HW using
2104 * one memory buffer:
2108 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2109 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2110 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2113 return ICE_ERR_NO_MEMORY;
2115 rx_tx = (struct ice_aqc_sw_rules_elem *)
2116 ((u8 *)lg_act + lg_act_size);
2118 /* Fill in the first switch rule i.e. large action */
2119 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2120 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2121 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2123 /* First action VSI forwarding or VSI list forwarding depending on how
2126 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2127 m_ent->fltr_info.fwd_id.hw_vsi_id;
2129 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2130 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2131 ICE_LG_ACT_VSI_LIST_ID_M;
2132 if (m_ent->vsi_count > 1)
2133 act |= ICE_LG_ACT_VSI_LIST;
2134 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2136 /* Second action counter ID */
2137 act = ICE_LG_ACT_STAT_COUNT;
2138 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2139 ICE_LG_ACT_STAT_COUNT_M;
2140 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2142 /* call the fill switch rule to fill the lookup Tx Rx structure */
2143 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2144 ice_aqc_opc_update_sw_rules);
2146 act = ICE_SINGLE_ACT_PTR;
2147 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2148 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2150 /* Use the filter rule ID of the previously created rule with single
2151 * act. Once the update happens, hardware will treat this as large
2154 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2155 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2157 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2158 ice_aqc_opc_update_sw_rules, NULL);
2160 m_ent->lg_act_idx = l_id;
2161 m_ent->counter_index = counter_id;
2164 ice_free(hw, lg_act);
2169 * ice_create_vsi_list_map
2170 * @hw: pointer to the hardware structure
2171 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2172 * @num_vsi: number of VSI handles in the array
2173 * @vsi_list_id: VSI list ID generated as part of allocate resource
2175 * Helper function to create a new entry of VSI list ID to VSI mapping
2176 * using the given VSI list ID
2178 static struct ice_vsi_list_map_info *
2179 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2182 struct ice_switch_info *sw = hw->switch_info;
2183 struct ice_vsi_list_map_info *v_map;
2186 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2191 v_map->vsi_list_id = vsi_list_id;
2193 for (i = 0; i < num_vsi; i++)
2194 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2196 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2201 * ice_update_vsi_list_rule
2202 * @hw: pointer to the hardware structure
2203 * @vsi_handle_arr: array of VSI handles to form a VSI list
2204 * @num_vsi: number of VSI handles in the array
2205 * @vsi_list_id: VSI list ID generated as part of allocate resource
2206 * @remove: Boolean value to indicate if this is a remove action
2207 * @opc: switch rules population command type - pass in the command opcode
2208 * @lkup_type: lookup type of the filter
2210 * Call AQ command to add a new switch rule or update existing switch rule
2211 * using the given VSI list ID
2213 static enum ice_status
2214 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2215 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2216 enum ice_sw_lkup_type lkup_type)
2218 struct ice_aqc_sw_rules_elem *s_rule;
2219 enum ice_status status;
2225 return ICE_ERR_PARAM;
2227 if (lkup_type == ICE_SW_LKUP_MAC ||
2228 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2229 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2230 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2231 lkup_type == ICE_SW_LKUP_PROMISC ||
2232 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2233 lkup_type == ICE_SW_LKUP_LAST)
2234 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2235 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2236 else if (lkup_type == ICE_SW_LKUP_VLAN)
2237 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2238 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2240 return ICE_ERR_PARAM;
2242 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2243 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2245 return ICE_ERR_NO_MEMORY;
2246 for (i = 0; i < num_vsi; i++) {
2247 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2248 status = ICE_ERR_PARAM;
2251 /* AQ call requires hw_vsi_id(s) */
2252 s_rule->pdata.vsi_list.vsi[i] =
2253 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2256 s_rule->type = CPU_TO_LE16(type);
2257 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2258 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2260 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2263 ice_free(hw, s_rule);
2268 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2269 * @hw: pointer to the HW struct
2270 * @vsi_handle_arr: array of VSI handles to form a VSI list
2271 * @num_vsi: number of VSI handles in the array
2272 * @vsi_list_id: stores the ID of the VSI list to be created
2273 * @lkup_type: switch rule filter's lookup type
2275 static enum ice_status
2276 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2277 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2279 enum ice_status status;
2281 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2282 ice_aqc_opc_alloc_res);
2286 /* Update the newly created VSI list to include the specified VSIs */
2287 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2288 *vsi_list_id, false,
2289 ice_aqc_opc_add_sw_rules, lkup_type);
2293 * ice_create_pkt_fwd_rule
2294 * @hw: pointer to the hardware structure
2295 * @f_entry: entry containing packet forwarding information
2297 * Create switch rule with given filter information and add an entry
2298 * to the corresponding filter management list to track this switch rule
2301 static enum ice_status
2302 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2303 struct ice_fltr_list_entry *f_entry)
2305 struct ice_fltr_mgmt_list_entry *fm_entry;
2306 struct ice_aqc_sw_rules_elem *s_rule;
2307 enum ice_sw_lkup_type l_type;
2308 struct ice_sw_recipe *recp;
2309 enum ice_status status;
2311 s_rule = (struct ice_aqc_sw_rules_elem *)
2312 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2314 return ICE_ERR_NO_MEMORY;
2315 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2316 ice_malloc(hw, sizeof(*fm_entry));
2318 status = ICE_ERR_NO_MEMORY;
2319 goto ice_create_pkt_fwd_rule_exit;
2322 fm_entry->fltr_info = f_entry->fltr_info;
2324 /* Initialize all the fields for the management entry */
2325 fm_entry->vsi_count = 1;
2326 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2327 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2328 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2330 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2331 ice_aqc_opc_add_sw_rules);
2333 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2334 ice_aqc_opc_add_sw_rules, NULL);
2336 ice_free(hw, fm_entry);
2337 goto ice_create_pkt_fwd_rule_exit;
2340 f_entry->fltr_info.fltr_rule_id =
2341 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2342 fm_entry->fltr_info.fltr_rule_id =
2343 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2345 /* The book keeping entries will get removed when base driver
2346 * calls remove filter AQ command
2348 l_type = fm_entry->fltr_info.lkup_type;
2349 recp = &hw->switch_info->recp_list[l_type];
2350 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2352 ice_create_pkt_fwd_rule_exit:
2353 ice_free(hw, s_rule);
2358 * ice_update_pkt_fwd_rule
2359 * @hw: pointer to the hardware structure
2360 * @f_info: filter information for switch rule
2362 * Call AQ command to update a previously created switch rule with a
2365 static enum ice_status
2366 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2368 struct ice_aqc_sw_rules_elem *s_rule;
2369 enum ice_status status;
2371 s_rule = (struct ice_aqc_sw_rules_elem *)
2372 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2374 return ICE_ERR_NO_MEMORY;
2376 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2378 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2380 /* Update switch rule with new rule set to forward VSI list */
2381 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2382 ice_aqc_opc_update_sw_rules, NULL);
2384 ice_free(hw, s_rule);
2389 * ice_update_sw_rule_bridge_mode
2390 * @hw: pointer to the HW struct
2392 * Updates unicast switch filter rules based on VEB/VEPA mode
2394 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2396 struct ice_switch_info *sw = hw->switch_info;
2397 struct ice_fltr_mgmt_list_entry *fm_entry;
2398 enum ice_status status = ICE_SUCCESS;
2399 struct LIST_HEAD_TYPE *rule_head;
2400 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2402 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2403 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2405 ice_acquire_lock(rule_lock);
2406 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2408 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2409 u8 *addr = fi->l_data.mac.mac_addr;
2411 /* Update unicast Tx rules to reflect the selected
2414 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2415 (fi->fltr_act == ICE_FWD_TO_VSI ||
2416 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2417 fi->fltr_act == ICE_FWD_TO_Q ||
2418 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2419 status = ice_update_pkt_fwd_rule(hw, fi);
2425 ice_release_lock(rule_lock);
2431 * ice_add_update_vsi_list
2432 * @hw: pointer to the hardware structure
2433 * @m_entry: pointer to current filter management list entry
2434 * @cur_fltr: filter information from the book keeping entry
2435 * @new_fltr: filter information with the new VSI to be added
2437 * Call AQ command to add or update previously created VSI list with new VSI.
2439 * Helper function to do book keeping associated with adding filter information
2440 * The algorithm to do the book keeping is described below :
2441 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2442 * if only one VSI has been added till now
2443 * Allocate a new VSI list and add two VSIs
2444 * to this list using switch rule command
2445 * Update the previously created switch rule with the
2446 * newly created VSI list ID
2447 * if a VSI list was previously created
2448 * Add the new VSI to the previously created VSI list set
2449 * using the update switch rule command
2451 static enum ice_status
2452 ice_add_update_vsi_list(struct ice_hw *hw,
2453 struct ice_fltr_mgmt_list_entry *m_entry,
2454 struct ice_fltr_info *cur_fltr,
2455 struct ice_fltr_info *new_fltr)
2457 enum ice_status status = ICE_SUCCESS;
2458 u16 vsi_list_id = 0;
2460 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2461 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2462 return ICE_ERR_NOT_IMPL;
2464 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2465 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2466 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2467 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2468 return ICE_ERR_NOT_IMPL;
2470 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2471 /* Only one entry existed in the mapping and it was not already
2472 * a part of a VSI list. So, create a VSI list with the old and
2475 struct ice_fltr_info tmp_fltr;
2476 u16 vsi_handle_arr[2];
2478 /* A rule already exists with the new VSI being added */
2479 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2480 return ICE_ERR_ALREADY_EXISTS;
2482 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2483 vsi_handle_arr[1] = new_fltr->vsi_handle;
2484 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2486 new_fltr->lkup_type);
2490 tmp_fltr = *new_fltr;
2491 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2492 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2493 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2494 /* Update the previous switch rule of "MAC forward to VSI" to
2495 * "MAC fwd to VSI list"
2497 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2501 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2502 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2503 m_entry->vsi_list_info =
2504 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2507 /* If this entry was large action then the large action needs
2508 * to be updated to point to FWD to VSI list
2510 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2512 ice_add_marker_act(hw, m_entry,
2513 m_entry->sw_marker_id,
2514 m_entry->lg_act_idx);
2516 u16 vsi_handle = new_fltr->vsi_handle;
2517 enum ice_adminq_opc opcode;
2519 if (!m_entry->vsi_list_info)
2522 /* A rule already exists with the new VSI being added */
2523 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2526 /* Update the previously created VSI list set with
2527 * the new VSI ID passed in
2529 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2530 opcode = ice_aqc_opc_update_sw_rules;
2532 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2533 vsi_list_id, false, opcode,
2534 new_fltr->lkup_type);
2535 /* update VSI list mapping info with new VSI ID */
2537 ice_set_bit(vsi_handle,
2538 m_entry->vsi_list_info->vsi_map);
2541 m_entry->vsi_count++;
2546 * ice_find_rule_entry - Search a rule entry
2547 * @hw: pointer to the hardware structure
2548 * @recp_id: lookup type for which the specified rule needs to be searched
2549 * @f_info: rule information
2551 * Helper function to search for a given rule entry
2552 * Returns pointer to entry storing the rule if found
2554 static struct ice_fltr_mgmt_list_entry *
2555 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2557 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2558 struct ice_switch_info *sw = hw->switch_info;
2559 struct LIST_HEAD_TYPE *list_head;
2561 list_head = &sw->recp_list[recp_id].filt_rules;
2562 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2564 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2565 sizeof(f_info->l_data)) &&
2566 f_info->flag == list_itr->fltr_info.flag) {
2575 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2576 * @hw: pointer to the hardware structure
2577 * @recp_id: lookup type for which VSI lists needs to be searched
2578 * @vsi_handle: VSI handle to be found in VSI list
2579 * @vsi_list_id: VSI list ID found containing vsi_handle
2581 * Helper function to search a VSI list with single entry containing given VSI
2582 * handle element. This can be extended further to search VSI list with more
2583 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2585 static struct ice_vsi_list_map_info *
2586 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2589 struct ice_vsi_list_map_info *map_info = NULL;
2590 struct ice_switch_info *sw = hw->switch_info;
2591 struct LIST_HEAD_TYPE *list_head;
2593 list_head = &sw->recp_list[recp_id].filt_rules;
2594 if (sw->recp_list[recp_id].adv_rule) {
2595 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2597 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2598 ice_adv_fltr_mgmt_list_entry,
2600 if (list_itr->vsi_list_info) {
2601 map_info = list_itr->vsi_list_info;
2602 if (ice_is_bit_set(map_info->vsi_map,
2604 *vsi_list_id = map_info->vsi_list_id;
2610 struct ice_fltr_mgmt_list_entry *list_itr;
2612 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2613 ice_fltr_mgmt_list_entry,
2615 if (list_itr->vsi_count == 1 &&
2616 list_itr->vsi_list_info) {
2617 map_info = list_itr->vsi_list_info;
2618 if (ice_is_bit_set(map_info->vsi_map,
2620 *vsi_list_id = map_info->vsi_list_id;
2630 * ice_add_rule_internal - add rule for a given lookup type
2631 * @hw: pointer to the hardware structure
2632 * @recp_id: lookup type (recipe ID) for which rule has to be added
2633 * @f_entry: structure containing MAC forwarding information
2635 * Adds or updates the rule lists for a given recipe
2637 static enum ice_status
2638 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2639 struct ice_fltr_list_entry *f_entry)
2641 struct ice_switch_info *sw = hw->switch_info;
2642 struct ice_fltr_info *new_fltr, *cur_fltr;
2643 struct ice_fltr_mgmt_list_entry *m_entry;
2644 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2645 enum ice_status status = ICE_SUCCESS;
2647 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2648 return ICE_ERR_PARAM;
2650 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2651 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2652 f_entry->fltr_info.fwd_id.hw_vsi_id =
2653 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2655 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2657 ice_acquire_lock(rule_lock);
2658 new_fltr = &f_entry->fltr_info;
2659 if (new_fltr->flag & ICE_FLTR_RX)
2660 new_fltr->src = hw->port_info->lport;
2661 else if (new_fltr->flag & ICE_FLTR_TX)
2663 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2665 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2667 status = ice_create_pkt_fwd_rule(hw, f_entry);
2668 goto exit_add_rule_internal;
2671 cur_fltr = &m_entry->fltr_info;
2672 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2674 exit_add_rule_internal:
2675 ice_release_lock(rule_lock);
2680 * ice_remove_vsi_list_rule
2681 * @hw: pointer to the hardware structure
2682 * @vsi_list_id: VSI list ID generated as part of allocate resource
2683 * @lkup_type: switch rule filter lookup type
2685 * The VSI list should be emptied before this function is called to remove the
2688 static enum ice_status
2689 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2690 enum ice_sw_lkup_type lkup_type)
2692 struct ice_aqc_sw_rules_elem *s_rule;
2693 enum ice_status status;
2696 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2697 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2699 return ICE_ERR_NO_MEMORY;
2701 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2702 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2704 /* Free the vsi_list resource that we allocated. It is assumed that the
2705 * list is empty at this point.
2707 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2708 ice_aqc_opc_free_res);
2710 ice_free(hw, s_rule);
2715 * ice_rem_update_vsi_list
2716 * @hw: pointer to the hardware structure
2717 * @vsi_handle: VSI handle of the VSI to remove
2718 * @fm_list: filter management entry for which the VSI list management needs to
2721 static enum ice_status
2722 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2723 struct ice_fltr_mgmt_list_entry *fm_list)
2725 enum ice_sw_lkup_type lkup_type;
2726 enum ice_status status = ICE_SUCCESS;
2729 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2730 fm_list->vsi_count == 0)
2731 return ICE_ERR_PARAM;
2733 /* A rule with the VSI being removed does not exist */
2734 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2735 return ICE_ERR_DOES_NOT_EXIST;
2737 lkup_type = fm_list->fltr_info.lkup_type;
2738 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2739 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2740 ice_aqc_opc_update_sw_rules,
2745 fm_list->vsi_count--;
2746 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2748 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2749 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2750 struct ice_vsi_list_map_info *vsi_list_info =
2751 fm_list->vsi_list_info;
2754 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2756 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2757 return ICE_ERR_OUT_OF_RANGE;
2759 /* Make sure VSI list is empty before removing it below */
2760 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2762 ice_aqc_opc_update_sw_rules,
2767 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2768 tmp_fltr_info.fwd_id.hw_vsi_id =
2769 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2770 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2771 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2773 ice_debug(hw, ICE_DBG_SW,
2774 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2775 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2779 fm_list->fltr_info = tmp_fltr_info;
2782 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2783 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2784 struct ice_vsi_list_map_info *vsi_list_info =
2785 fm_list->vsi_list_info;
2787 /* Remove the VSI list since it is no longer used */
2788 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2790 ice_debug(hw, ICE_DBG_SW,
2791 "Failed to remove VSI list %d, error %d\n",
2792 vsi_list_id, status);
2796 LIST_DEL(&vsi_list_info->list_entry);
2797 ice_free(hw, vsi_list_info);
2798 fm_list->vsi_list_info = NULL;
2805 * ice_remove_rule_internal - Remove a filter rule of a given type
2807 * @hw: pointer to the hardware structure
2808 * @recp_id: recipe ID for which the rule needs to removed
2809 * @f_entry: rule entry containing filter information
2811 static enum ice_status
2812 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2813 struct ice_fltr_list_entry *f_entry)
2815 struct ice_switch_info *sw = hw->switch_info;
2816 struct ice_fltr_mgmt_list_entry *list_elem;
2817 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2818 enum ice_status status = ICE_SUCCESS;
2819 bool remove_rule = false;
2822 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2823 return ICE_ERR_PARAM;
2824 f_entry->fltr_info.fwd_id.hw_vsi_id =
2825 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2827 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2828 ice_acquire_lock(rule_lock);
2829 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2831 status = ICE_ERR_DOES_NOT_EXIST;
2835 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2837 } else if (!list_elem->vsi_list_info) {
2838 status = ICE_ERR_DOES_NOT_EXIST;
2840 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2841 /* a ref_cnt > 1 indicates that the vsi_list is being
2842 * shared by multiple rules. Decrement the ref_cnt and
2843 * remove this rule, but do not modify the list, as it
2844 * is in-use by other rules.
2846 list_elem->vsi_list_info->ref_cnt--;
2849 /* a ref_cnt of 1 indicates the vsi_list is only used
2850 * by one rule. However, the original removal request is only
2851 * for a single VSI. Update the vsi_list first, and only
2852 * remove the rule if there are no further VSIs in this list.
2854 vsi_handle = f_entry->fltr_info.vsi_handle;
2855 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2858 /* if VSI count goes to zero after updating the VSI list */
2859 if (list_elem->vsi_count == 0)
2864 /* Remove the lookup rule */
2865 struct ice_aqc_sw_rules_elem *s_rule;
2867 s_rule = (struct ice_aqc_sw_rules_elem *)
2868 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2870 status = ICE_ERR_NO_MEMORY;
2874 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2875 ice_aqc_opc_remove_sw_rules);
2877 status = ice_aq_sw_rules(hw, s_rule,
2878 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2879 ice_aqc_opc_remove_sw_rules, NULL);
2881 /* Remove a book keeping from the list */
2882 ice_free(hw, s_rule);
2887 LIST_DEL(&list_elem->list_entry);
2888 ice_free(hw, list_elem);
2891 ice_release_lock(rule_lock);
2896 * ice_aq_get_res_alloc - get allocated resources
2897 * @hw: pointer to the HW struct
2898 * @num_entries: pointer to u16 to store the number of resource entries returned
2899 * @buf: pointer to user-supplied buffer
2900 * @buf_size: size of buff
2901 * @cd: pointer to command details structure or NULL
2903 * The user-supplied buffer must be large enough to store the resource
2904 * information for all resource types. Each resource type is an
2905 * ice_aqc_get_res_resp_data_elem structure.
2908 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2909 u16 buf_size, struct ice_sq_cd *cd)
2911 struct ice_aqc_get_res_alloc *resp;
2912 enum ice_status status;
2913 struct ice_aq_desc desc;
2916 return ICE_ERR_BAD_PTR;
2918 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2919 return ICE_ERR_INVAL_SIZE;
2921 resp = &desc.params.get_res;
2923 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2924 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2926 if (!status && num_entries)
2927 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2933 * ice_aq_get_res_descs - get allocated resource descriptors
2934 * @hw: pointer to the hardware structure
2935 * @num_entries: number of resource entries in buffer
2936 * @buf: Indirect buffer to hold data parameters and response
2937 * @buf_size: size of buffer for indirect commands
2938 * @res_type: resource type
2939 * @res_shared: is resource shared
2940 * @desc_id: input - first desc ID to start; output - next desc ID
2941 * @cd: pointer to command details structure or NULL
2944 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2945 struct ice_aqc_get_allocd_res_desc_resp *buf,
2946 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2947 struct ice_sq_cd *cd)
2949 struct ice_aqc_get_allocd_res_desc *cmd;
2950 struct ice_aq_desc desc;
2951 enum ice_status status;
2953 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2955 cmd = &desc.params.get_res_desc;
2958 return ICE_ERR_PARAM;
2960 if (buf_size != (num_entries * sizeof(*buf)))
2961 return ICE_ERR_PARAM;
2963 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2965 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2966 ICE_AQC_RES_TYPE_M) | (res_shared ?
2967 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2968 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2970 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2972 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2978 * ice_add_mac - Add a MAC address based filter rule
2979 * @hw: pointer to the hardware structure
2980 * @m_list: list of MAC addresses and forwarding information
2982 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2983 * multiple unicast addresses, the function assumes that all the
2984 * addresses are unique in a given add_mac call. It doesn't
2985 * check for duplicates in this case, removing duplicates from a given
2986 * list should be taken care of in the caller of this function.
2989 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2991 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2992 struct ice_fltr_list_entry *m_list_itr;
2993 struct LIST_HEAD_TYPE *rule_head;
2994 u16 elem_sent, total_elem_left;
2995 struct ice_switch_info *sw;
2996 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2997 enum ice_status status = ICE_SUCCESS;
2998 u16 num_unicast = 0;
3002 return ICE_ERR_PARAM;
3004 sw = hw->switch_info;
3005 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3006 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3008 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3012 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3013 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3014 if (!ice_is_vsi_valid(hw, vsi_handle))
3015 return ICE_ERR_PARAM;
3016 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3017 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3018 /* update the src in case it is VSI num */
3019 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3020 return ICE_ERR_PARAM;
3021 m_list_itr->fltr_info.src = hw_vsi_id;
3022 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3023 IS_ZERO_ETHER_ADDR(add))
3024 return ICE_ERR_PARAM;
3025 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3026 /* Don't overwrite the unicast address */
3027 ice_acquire_lock(rule_lock);
3028 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3029 &m_list_itr->fltr_info)) {
3030 ice_release_lock(rule_lock);
3031 return ICE_ERR_ALREADY_EXISTS;
3033 ice_release_lock(rule_lock);
3035 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3036 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3037 m_list_itr->status =
3038 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3040 if (m_list_itr->status)
3041 return m_list_itr->status;
3045 ice_acquire_lock(rule_lock);
3046 /* Exit if no suitable entries were found for adding bulk switch rule */
3048 status = ICE_SUCCESS;
3049 goto ice_add_mac_exit;
3052 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3054 /* Allocate switch rule buffer for the bulk update for unicast */
3055 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3056 s_rule = (struct ice_aqc_sw_rules_elem *)
3057 ice_calloc(hw, num_unicast, s_rule_size);
3059 status = ICE_ERR_NO_MEMORY;
3060 goto ice_add_mac_exit;
3064 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3066 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3067 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3069 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3070 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3071 ice_aqc_opc_add_sw_rules);
3072 r_iter = (struct ice_aqc_sw_rules_elem *)
3073 ((u8 *)r_iter + s_rule_size);
3077 /* Call AQ bulk switch rule update for all unicast addresses */
3079 /* Call AQ switch rule in AQ_MAX chunk */
3080 for (total_elem_left = num_unicast; total_elem_left > 0;
3081 total_elem_left -= elem_sent) {
3082 struct ice_aqc_sw_rules_elem *entry = r_iter;
3084 elem_sent = min(total_elem_left,
3085 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3086 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3087 elem_sent, ice_aqc_opc_add_sw_rules,
3090 goto ice_add_mac_exit;
3091 r_iter = (struct ice_aqc_sw_rules_elem *)
3092 ((u8 *)r_iter + (elem_sent * s_rule_size));
3095 /* Fill up rule ID based on the value returned from FW */
3097 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3099 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3100 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3101 struct ice_fltr_mgmt_list_entry *fm_entry;
3103 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3104 f_info->fltr_rule_id =
3105 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3106 f_info->fltr_act = ICE_FWD_TO_VSI;
3107 /* Create an entry to track this MAC address */
3108 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3109 ice_malloc(hw, sizeof(*fm_entry));
3111 status = ICE_ERR_NO_MEMORY;
3112 goto ice_add_mac_exit;
3114 fm_entry->fltr_info = *f_info;
3115 fm_entry->vsi_count = 1;
3116 /* The book keeping entries will get removed when
3117 * base driver calls remove filter AQ command
3120 LIST_ADD(&fm_entry->list_entry, rule_head);
3121 r_iter = (struct ice_aqc_sw_rules_elem *)
3122 ((u8 *)r_iter + s_rule_size);
3127 ice_release_lock(rule_lock);
3129 ice_free(hw, s_rule);
3134 * ice_add_vlan_internal - Add one VLAN based filter rule
3135 * @hw: pointer to the hardware structure
3136 * @f_entry: filter entry containing one VLAN information
3138 static enum ice_status
3139 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3141 struct ice_switch_info *sw = hw->switch_info;
3142 struct ice_fltr_mgmt_list_entry *v_list_itr;
3143 struct ice_fltr_info *new_fltr, *cur_fltr;
3144 enum ice_sw_lkup_type lkup_type;
3145 u16 vsi_list_id = 0, vsi_handle;
3146 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3147 enum ice_status status = ICE_SUCCESS;
3149 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3150 return ICE_ERR_PARAM;
3152 f_entry->fltr_info.fwd_id.hw_vsi_id =
3153 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3154 new_fltr = &f_entry->fltr_info;
3156 /* VLAN ID should only be 12 bits */
3157 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3158 return ICE_ERR_PARAM;
3160 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3161 return ICE_ERR_PARAM;
3163 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3164 lkup_type = new_fltr->lkup_type;
3165 vsi_handle = new_fltr->vsi_handle;
3166 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3167 ice_acquire_lock(rule_lock);
3168 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3170 struct ice_vsi_list_map_info *map_info = NULL;
3172 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3173 /* All VLAN pruning rules use a VSI list. Check if
3174 * there is already a VSI list containing VSI that we
3175 * want to add. If found, use the same vsi_list_id for
3176 * this new VLAN rule or else create a new list.
3178 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3182 status = ice_create_vsi_list_rule(hw,
3190 /* Convert the action to forwarding to a VSI list. */
3191 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3192 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3195 status = ice_create_pkt_fwd_rule(hw, f_entry);
3197 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3200 status = ICE_ERR_DOES_NOT_EXIST;
3203 /* reuse VSI list for new rule and increment ref_cnt */
3205 v_list_itr->vsi_list_info = map_info;
3206 map_info->ref_cnt++;
3208 v_list_itr->vsi_list_info =
3209 ice_create_vsi_list_map(hw, &vsi_handle,
3213 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3214 /* Update existing VSI list to add new VSI ID only if it used
3217 cur_fltr = &v_list_itr->fltr_info;
3218 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3221 /* If VLAN rule exists and VSI list being used by this rule is
3222 * referenced by more than 1 VLAN rule. Then create a new VSI
3223 * list appending previous VSI with new VSI and update existing
3224 * VLAN rule to point to new VSI list ID
3226 struct ice_fltr_info tmp_fltr;
3227 u16 vsi_handle_arr[2];
3230 /* Current implementation only supports reusing VSI list with
3231 * one VSI count. We should never hit below condition
3233 if (v_list_itr->vsi_count > 1 &&
3234 v_list_itr->vsi_list_info->ref_cnt > 1) {
3235 ice_debug(hw, ICE_DBG_SW,
3236 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3237 status = ICE_ERR_CFG;
3242 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3245 /* A rule already exists with the new VSI being added */
3246 if (cur_handle == vsi_handle) {
3247 status = ICE_ERR_ALREADY_EXISTS;
3251 vsi_handle_arr[0] = cur_handle;
3252 vsi_handle_arr[1] = vsi_handle;
3253 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3254 &vsi_list_id, lkup_type);
3258 tmp_fltr = v_list_itr->fltr_info;
3259 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3260 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3261 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3262 /* Update the previous switch rule to a new VSI list which
3263 * includes current VSI that is requested
3265 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3269 /* before overriding VSI list map info. decrement ref_cnt of
3272 v_list_itr->vsi_list_info->ref_cnt--;
3274 /* now update to newly created list */
3275 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3276 v_list_itr->vsi_list_info =
3277 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3279 v_list_itr->vsi_count++;
3283 ice_release_lock(rule_lock);
3288 * ice_add_vlan - Add VLAN based filter rule
3289 * @hw: pointer to the hardware structure
3290 * @v_list: list of VLAN entries and forwarding information
3293 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3295 struct ice_fltr_list_entry *v_list_itr;
3298 return ICE_ERR_PARAM;
3300 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3302 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3303 return ICE_ERR_PARAM;
3304 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3305 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3306 if (v_list_itr->status)
3307 return v_list_itr->status;
3313 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3314 * @hw: pointer to the hardware structure
3315 * @mv_list: list of MAC and VLAN filters
3317 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3318 * pruning bits enabled, then it is the responsibility of the caller to make
3319 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3320 * VLAN won't be received on that VSI otherwise.
3323 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3325 struct ice_fltr_list_entry *mv_list_itr;
3327 if (!mv_list || !hw)
3328 return ICE_ERR_PARAM;
3330 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3332 enum ice_sw_lkup_type l_type =
3333 mv_list_itr->fltr_info.lkup_type;
3335 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3336 return ICE_ERR_PARAM;
3337 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3338 mv_list_itr->status =
3339 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3341 if (mv_list_itr->status)
3342 return mv_list_itr->status;
3348 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3349 * @hw: pointer to the hardware structure
3350 * @em_list: list of ether type MAC filter, MAC is optional
3352 * This function requires the caller to populate the entries in
3353 * the filter list with the necessary fields (including flags to
3354 * indicate Tx or Rx rules).
3357 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3359 struct ice_fltr_list_entry *em_list_itr;
3361 if (!em_list || !hw)
3362 return ICE_ERR_PARAM;
3364 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3366 enum ice_sw_lkup_type l_type =
3367 em_list_itr->fltr_info.lkup_type;
3369 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3370 l_type != ICE_SW_LKUP_ETHERTYPE)
3371 return ICE_ERR_PARAM;
3373 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3375 if (em_list_itr->status)
3376 return em_list_itr->status;
3382 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3383 * @hw: pointer to the hardware structure
3384 * @em_list: list of ethertype or ethertype MAC entries
3387 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3389 struct ice_fltr_list_entry *em_list_itr, *tmp;
3391 if (!em_list || !hw)
3392 return ICE_ERR_PARAM;
3394 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3396 enum ice_sw_lkup_type l_type =
3397 em_list_itr->fltr_info.lkup_type;
3399 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3400 l_type != ICE_SW_LKUP_ETHERTYPE)
3401 return ICE_ERR_PARAM;
3403 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3405 if (em_list_itr->status)
3406 return em_list_itr->status;
3412 * ice_rem_sw_rule_info
3413 * @hw: pointer to the hardware structure
3414 * @rule_head: pointer to the switch list structure that we want to delete
3417 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3419 if (!LIST_EMPTY(rule_head)) {
3420 struct ice_fltr_mgmt_list_entry *entry;
3421 struct ice_fltr_mgmt_list_entry *tmp;
3423 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3424 ice_fltr_mgmt_list_entry, list_entry) {
3425 LIST_DEL(&entry->list_entry);
3426 ice_free(hw, entry);
3432 * ice_rem_adv_rule_info
3433 * @hw: pointer to the hardware structure
3434 * @rule_head: pointer to the switch list structure that we want to delete
3437 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3439 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3440 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3442 if (LIST_EMPTY(rule_head))
3445 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3446 ice_adv_fltr_mgmt_list_entry, list_entry) {
3447 LIST_DEL(&lst_itr->list_entry);
3448 ice_free(hw, lst_itr->lkups);
3449 ice_free(hw, lst_itr);
3454 * ice_rem_all_sw_rules_info
3455 * @hw: pointer to the hardware structure
3457 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3459 struct ice_switch_info *sw = hw->switch_info;
3462 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3463 struct LIST_HEAD_TYPE *rule_head;
3465 rule_head = &sw->recp_list[i].filt_rules;
3466 if (!sw->recp_list[i].adv_rule)
3467 ice_rem_sw_rule_info(hw, rule_head);
3469 ice_rem_adv_rule_info(hw, rule_head);
3474 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3475 * @pi: pointer to the port_info structure
3476 * @vsi_handle: VSI handle to set as default
3477 * @set: true to add the above mentioned switch rule, false to remove it
3478 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3480 * add filter rule to set/unset given VSI as default VSI for the switch
3481 * (represented by swid)
3484 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3487 struct ice_aqc_sw_rules_elem *s_rule;
3488 struct ice_fltr_info f_info;
3489 struct ice_hw *hw = pi->hw;
3490 enum ice_adminq_opc opcode;
3491 enum ice_status status;
3495 if (!ice_is_vsi_valid(hw, vsi_handle))
3496 return ICE_ERR_PARAM;
3497 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3499 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3500 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3501 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3503 return ICE_ERR_NO_MEMORY;
3505 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3507 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3508 f_info.flag = direction;
3509 f_info.fltr_act = ICE_FWD_TO_VSI;
3510 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3512 if (f_info.flag & ICE_FLTR_RX) {
3513 f_info.src = pi->lport;
3514 f_info.src_id = ICE_SRC_ID_LPORT;
3516 f_info.fltr_rule_id =
3517 pi->dflt_rx_vsi_rule_id;
3518 } else if (f_info.flag & ICE_FLTR_TX) {
3519 f_info.src_id = ICE_SRC_ID_VSI;
3520 f_info.src = hw_vsi_id;
3522 f_info.fltr_rule_id =
3523 pi->dflt_tx_vsi_rule_id;
3527 opcode = ice_aqc_opc_add_sw_rules;
3529 opcode = ice_aqc_opc_remove_sw_rules;
3531 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3533 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3534 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3537 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3539 if (f_info.flag & ICE_FLTR_TX) {
3540 pi->dflt_tx_vsi_num = hw_vsi_id;
3541 pi->dflt_tx_vsi_rule_id = index;
3542 } else if (f_info.flag & ICE_FLTR_RX) {
3543 pi->dflt_rx_vsi_num = hw_vsi_id;
3544 pi->dflt_rx_vsi_rule_id = index;
3547 if (f_info.flag & ICE_FLTR_TX) {
3548 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3549 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3550 } else if (f_info.flag & ICE_FLTR_RX) {
3551 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3552 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3557 ice_free(hw, s_rule);
3562 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3563 * @hw: pointer to the hardware structure
3564 * @recp_id: lookup type for which the specified rule needs to be searched
3565 * @f_info: rule information
3567 * Helper function to search for a unicast rule entry - this is to be used
3568 * to remove unicast MAC filter that is not shared with other VSIs on the
3571 * Returns pointer to entry storing the rule if found
3573 static struct ice_fltr_mgmt_list_entry *
3574 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3575 struct ice_fltr_info *f_info)
3577 struct ice_switch_info *sw = hw->switch_info;
3578 struct ice_fltr_mgmt_list_entry *list_itr;
3579 struct LIST_HEAD_TYPE *list_head;
3581 list_head = &sw->recp_list[recp_id].filt_rules;
3582 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3584 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3585 sizeof(f_info->l_data)) &&
3586 f_info->fwd_id.hw_vsi_id ==
3587 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3588 f_info->flag == list_itr->fltr_info.flag)
3595 * ice_remove_mac - remove a MAC address based filter rule
3596 * @hw: pointer to the hardware structure
3597 * @m_list: list of MAC addresses and forwarding information
3599 * This function removes either a MAC filter rule or a specific VSI from a
3600 * VSI list for a multicast MAC address.
3602 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3603 * ice_add_mac. Caller should be aware that this call will only work if all
3604 * the entries passed into m_list were added previously. It will not attempt to
3605 * do a partial remove of entries that were found.
3608 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3610 struct ice_fltr_list_entry *list_itr, *tmp;
3611 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3614 return ICE_ERR_PARAM;
3616 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3617 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3619 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3620 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3623 if (l_type != ICE_SW_LKUP_MAC)
3624 return ICE_ERR_PARAM;
3626 vsi_handle = list_itr->fltr_info.vsi_handle;
3627 if (!ice_is_vsi_valid(hw, vsi_handle))
3628 return ICE_ERR_PARAM;
3630 list_itr->fltr_info.fwd_id.hw_vsi_id =
3631 ice_get_hw_vsi_num(hw, vsi_handle);
3632 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3633 /* Don't remove the unicast address that belongs to
3634 * another VSI on the switch, since it is not being
3637 ice_acquire_lock(rule_lock);
3638 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3639 &list_itr->fltr_info)) {
3640 ice_release_lock(rule_lock);
3641 return ICE_ERR_DOES_NOT_EXIST;
3643 ice_release_lock(rule_lock);
3645 list_itr->status = ice_remove_rule_internal(hw,
3648 if (list_itr->status)
3649 return list_itr->status;
3655 * ice_remove_vlan - Remove VLAN based filter rule
3656 * @hw: pointer to the hardware structure
3657 * @v_list: list of VLAN entries and forwarding information
3660 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3662 struct ice_fltr_list_entry *v_list_itr, *tmp;
3665 return ICE_ERR_PARAM;
3667 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3669 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3671 if (l_type != ICE_SW_LKUP_VLAN)
3672 return ICE_ERR_PARAM;
3673 v_list_itr->status = ice_remove_rule_internal(hw,
3676 if (v_list_itr->status)
3677 return v_list_itr->status;
3683 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3684 * @hw: pointer to the hardware structure
3685 * @v_list: list of MAC VLAN entries and forwarding information
3688 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3690 struct ice_fltr_list_entry *v_list_itr, *tmp;
3693 return ICE_ERR_PARAM;
3695 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3697 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3699 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3700 return ICE_ERR_PARAM;
3701 v_list_itr->status =
3702 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3704 if (v_list_itr->status)
3705 return v_list_itr->status;
3711 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3712 * @fm_entry: filter entry to inspect
3713 * @vsi_handle: VSI handle to compare with filter info
3716 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3718 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3719 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3720 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3721 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3726 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3727 * @hw: pointer to the hardware structure
3728 * @vsi_handle: VSI handle to remove filters from
3729 * @vsi_list_head: pointer to the list to add entry to
3730 * @fi: pointer to fltr_info of filter entry to copy & add
3732 * Helper function, used when creating a list of filters to remove from
3733 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3734 * original filter entry, with the exception of fltr_info.fltr_act and
3735 * fltr_info.fwd_id fields. These are set such that later logic can
3736 * extract which VSI to remove the fltr from, and pass on that information.
3738 static enum ice_status
3739 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3740 struct LIST_HEAD_TYPE *vsi_list_head,
3741 struct ice_fltr_info *fi)
3743 struct ice_fltr_list_entry *tmp;
3745 /* this memory is freed up in the caller function
3746 * once filters for this VSI are removed
3748 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3750 return ICE_ERR_NO_MEMORY;
3752 tmp->fltr_info = *fi;
3754 /* Overwrite these fields to indicate which VSI to remove filter from,
3755 * so find and remove logic can extract the information from the
3756 * list entries. Note that original entries will still have proper
3759 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3760 tmp->fltr_info.vsi_handle = vsi_handle;
3761 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3763 LIST_ADD(&tmp->list_entry, vsi_list_head);
3769 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3770 * @hw: pointer to the hardware structure
3771 * @vsi_handle: VSI handle to remove filters from
3772 * @lkup_list_head: pointer to the list that has certain lookup type filters
3773 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3775 * Locates all filters in lkup_list_head that are used by the given VSI,
3776 * and adds COPIES of those entries to vsi_list_head (intended to be used
3777 * to remove the listed filters).
3778 * Note that this means all entries in vsi_list_head must be explicitly
3779 * deallocated by the caller when done with list.
3781 static enum ice_status
3782 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3783 struct LIST_HEAD_TYPE *lkup_list_head,
3784 struct LIST_HEAD_TYPE *vsi_list_head)
3786 struct ice_fltr_mgmt_list_entry *fm_entry;
3787 enum ice_status status = ICE_SUCCESS;
3789 /* check to make sure VSI ID is valid and within boundary */
3790 if (!ice_is_vsi_valid(hw, vsi_handle))
3791 return ICE_ERR_PARAM;
3793 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3794 ice_fltr_mgmt_list_entry, list_entry) {
3795 struct ice_fltr_info *fi;
3797 fi = &fm_entry->fltr_info;
3798 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3801 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3810 * ice_determine_promisc_mask
3811 * @fi: filter info to parse
3813 * Helper function to determine which ICE_PROMISC_ mask corresponds
3814 * to given filter into.
3816 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3818 u16 vid = fi->l_data.mac_vlan.vlan_id;
3819 u8 *macaddr = fi->l_data.mac.mac_addr;
3820 bool is_tx_fltr = false;
3821 u8 promisc_mask = 0;
3823 if (fi->flag == ICE_FLTR_TX)
3826 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3827 promisc_mask |= is_tx_fltr ?
3828 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3829 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3830 promisc_mask |= is_tx_fltr ?
3831 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3832 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3833 promisc_mask |= is_tx_fltr ?
3834 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3836 promisc_mask |= is_tx_fltr ?
3837 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3839 return promisc_mask;
3843 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3844 * @hw: pointer to the hardware structure
3845 * @vsi_handle: VSI handle to retrieve info from
3846 * @promisc_mask: pointer to mask to be filled in
3847 * @vid: VLAN ID of promisc VLAN VSI
3850 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3853 struct ice_switch_info *sw = hw->switch_info;
3854 struct ice_fltr_mgmt_list_entry *itr;
3855 struct LIST_HEAD_TYPE *rule_head;
3856 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3858 if (!ice_is_vsi_valid(hw, vsi_handle))
3859 return ICE_ERR_PARAM;
3863 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3864 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3866 ice_acquire_lock(rule_lock);
3867 LIST_FOR_EACH_ENTRY(itr, rule_head,
3868 ice_fltr_mgmt_list_entry, list_entry) {
3869 /* Continue if this filter doesn't apply to this VSI or the
3870 * VSI ID is not in the VSI map for this filter
3872 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3875 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3877 ice_release_lock(rule_lock);
3883 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3884 * @hw: pointer to the hardware structure
3885 * @vsi_handle: VSI handle to retrieve info from
3886 * @promisc_mask: pointer to mask to be filled in
3887 * @vid: VLAN ID of promisc VLAN VSI
3890 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3893 struct ice_switch_info *sw = hw->switch_info;
3894 struct ice_fltr_mgmt_list_entry *itr;
3895 struct LIST_HEAD_TYPE *rule_head;
3896 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3898 if (!ice_is_vsi_valid(hw, vsi_handle))
3899 return ICE_ERR_PARAM;
3903 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3904 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3906 ice_acquire_lock(rule_lock);
3907 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3909 /* Continue if this filter doesn't apply to this VSI or the
3910 * VSI ID is not in the VSI map for this filter
3912 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3915 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3917 ice_release_lock(rule_lock);
3923 * ice_remove_promisc - Remove promisc based filter rules
3924 * @hw: pointer to the hardware structure
3925 * @recp_id: recipe ID for which the rule needs to removed
3926 * @v_list: list of promisc entries
3928 static enum ice_status
3929 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3930 struct LIST_HEAD_TYPE *v_list)
3932 struct ice_fltr_list_entry *v_list_itr, *tmp;
3934 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3936 v_list_itr->status =
3937 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3938 if (v_list_itr->status)
3939 return v_list_itr->status;
3945 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3946 * @hw: pointer to the hardware structure
3947 * @vsi_handle: VSI handle to clear mode
3948 * @promisc_mask: mask of promiscuous config bits to clear
3949 * @vid: VLAN ID to clear VLAN promiscuous
3952 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3955 struct ice_switch_info *sw = hw->switch_info;
3956 struct ice_fltr_list_entry *fm_entry, *tmp;
3957 struct LIST_HEAD_TYPE remove_list_head;
3958 struct ice_fltr_mgmt_list_entry *itr;
3959 struct LIST_HEAD_TYPE *rule_head;
3960 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3961 enum ice_status status = ICE_SUCCESS;
3964 if (!ice_is_vsi_valid(hw, vsi_handle))
3965 return ICE_ERR_PARAM;
3967 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3968 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3970 recipe_id = ICE_SW_LKUP_PROMISC;
3972 rule_head = &sw->recp_list[recipe_id].filt_rules;
3973 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3975 INIT_LIST_HEAD(&remove_list_head);
3977 ice_acquire_lock(rule_lock);
3978 LIST_FOR_EACH_ENTRY(itr, rule_head,
3979 ice_fltr_mgmt_list_entry, list_entry) {
3980 struct ice_fltr_info *fltr_info;
3981 u8 fltr_promisc_mask = 0;
3983 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3985 fltr_info = &itr->fltr_info;
3987 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
3988 vid != fltr_info->l_data.mac_vlan.vlan_id)
3991 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
3993 /* Skip if filter is not completely specified by given mask */
3994 if (fltr_promisc_mask & ~promisc_mask)
3997 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4001 ice_release_lock(rule_lock);
4002 goto free_fltr_list;
4005 ice_release_lock(rule_lock);
4007 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4010 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4011 ice_fltr_list_entry, list_entry) {
4012 LIST_DEL(&fm_entry->list_entry);
4013 ice_free(hw, fm_entry);
4020 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4021 * @hw: pointer to the hardware structure
4022 * @vsi_handle: VSI handle to configure
4023 * @promisc_mask: mask of promiscuous config bits
4024 * @vid: VLAN ID to set VLAN promiscuous
4027 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4029 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4030 struct ice_fltr_list_entry f_list_entry;
4031 struct ice_fltr_info new_fltr;
4032 enum ice_status status = ICE_SUCCESS;
4038 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4040 if (!ice_is_vsi_valid(hw, vsi_handle))
4041 return ICE_ERR_PARAM;
4042 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4044 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4046 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4047 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4048 new_fltr.l_data.mac_vlan.vlan_id = vid;
4049 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4051 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4052 recipe_id = ICE_SW_LKUP_PROMISC;
4055 /* Separate filters must be set for each direction/packet type
4056 * combination, so we will loop over the mask value, store the
4057 * individual type, and clear it out in the input mask as it
4060 while (promisc_mask) {
4066 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4067 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4068 pkt_type = UCAST_FLTR;
4069 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4070 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4071 pkt_type = UCAST_FLTR;
4073 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4074 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4075 pkt_type = MCAST_FLTR;
4076 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4077 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4078 pkt_type = MCAST_FLTR;
4080 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4081 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4082 pkt_type = BCAST_FLTR;
4083 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4084 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4085 pkt_type = BCAST_FLTR;
4089 /* Check for VLAN promiscuous flag */
4090 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4091 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4092 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4093 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4097 /* Set filter DA based on packet type */
4098 mac_addr = new_fltr.l_data.mac.mac_addr;
4099 if (pkt_type == BCAST_FLTR) {
4100 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4101 } else if (pkt_type == MCAST_FLTR ||
4102 pkt_type == UCAST_FLTR) {
4103 /* Use the dummy ether header DA */
4104 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4105 ICE_NONDMA_TO_NONDMA);
4106 if (pkt_type == MCAST_FLTR)
4107 mac_addr[0] |= 0x1; /* Set multicast bit */
4110 /* Need to reset this to zero for all iterations */
4113 new_fltr.flag |= ICE_FLTR_TX;
4114 new_fltr.src = hw_vsi_id;
4116 new_fltr.flag |= ICE_FLTR_RX;
4117 new_fltr.src = hw->port_info->lport;
4120 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4121 new_fltr.vsi_handle = vsi_handle;
4122 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4123 f_list_entry.fltr_info = new_fltr;
4125 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4126 if (status != ICE_SUCCESS)
4127 goto set_promisc_exit;
4135 * ice_set_vlan_vsi_promisc
4136 * @hw: pointer to the hardware structure
4137 * @vsi_handle: VSI handle to configure
4138 * @promisc_mask: mask of promiscuous config bits
4139 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4141 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4144 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4145 bool rm_vlan_promisc)
4147 struct ice_switch_info *sw = hw->switch_info;
4148 struct ice_fltr_list_entry *list_itr, *tmp;
4149 struct LIST_HEAD_TYPE vsi_list_head;
4150 struct LIST_HEAD_TYPE *vlan_head;
4151 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4152 enum ice_status status;
4155 INIT_LIST_HEAD(&vsi_list_head);
4156 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4157 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4158 ice_acquire_lock(vlan_lock);
4159 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4161 ice_release_lock(vlan_lock);
4163 goto free_fltr_list;
4165 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4167 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4168 if (rm_vlan_promisc)
4169 status = ice_clear_vsi_promisc(hw, vsi_handle,
4170 promisc_mask, vlan_id);
4172 status = ice_set_vsi_promisc(hw, vsi_handle,
4173 promisc_mask, vlan_id);
4179 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4180 ice_fltr_list_entry, list_entry) {
4181 LIST_DEL(&list_itr->list_entry);
4182 ice_free(hw, list_itr);
4188 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4189 * @hw: pointer to the hardware structure
4190 * @vsi_handle: VSI handle to remove filters from
4191 * @lkup: switch rule filter lookup type
4194 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4195 enum ice_sw_lkup_type lkup)
4197 struct ice_switch_info *sw = hw->switch_info;
4198 struct ice_fltr_list_entry *fm_entry;
4199 struct LIST_HEAD_TYPE remove_list_head;
4200 struct LIST_HEAD_TYPE *rule_head;
4201 struct ice_fltr_list_entry *tmp;
4202 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4203 enum ice_status status;
4205 INIT_LIST_HEAD(&remove_list_head);
4206 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4207 rule_head = &sw->recp_list[lkup].filt_rules;
4208 ice_acquire_lock(rule_lock);
4209 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4211 ice_release_lock(rule_lock);
4216 case ICE_SW_LKUP_MAC:
4217 ice_remove_mac(hw, &remove_list_head);
4219 case ICE_SW_LKUP_VLAN:
4220 ice_remove_vlan(hw, &remove_list_head);
4222 case ICE_SW_LKUP_PROMISC:
4223 case ICE_SW_LKUP_PROMISC_VLAN:
4224 ice_remove_promisc(hw, lkup, &remove_list_head);
4226 case ICE_SW_LKUP_MAC_VLAN:
4227 ice_remove_mac_vlan(hw, &remove_list_head);
4229 case ICE_SW_LKUP_ETHERTYPE:
4230 case ICE_SW_LKUP_ETHERTYPE_MAC:
4231 ice_remove_eth_mac(hw, &remove_list_head);
4233 case ICE_SW_LKUP_DFLT:
4234 ice_debug(hw, ICE_DBG_SW,
4235 "Remove filters for this lookup type hasn't been implemented yet\n");
4237 case ICE_SW_LKUP_LAST:
4238 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4242 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4243 ice_fltr_list_entry, list_entry) {
4244 LIST_DEL(&fm_entry->list_entry);
4245 ice_free(hw, fm_entry);
4250 * ice_remove_vsi_fltr - Remove all filters for a VSI
4251 * @hw: pointer to the hardware structure
4252 * @vsi_handle: VSI handle to remove filters from
4254 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4256 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4258 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4259 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4260 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4261 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4262 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4263 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4264 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4265 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4269 * ice_alloc_res_cntr - allocating resource counter
4270 * @hw: pointer to the hardware structure
4271 * @type: type of resource
4272 * @alloc_shared: if set it is shared else dedicated
4273 * @num_items: number of entries requested for FD resource type
4274 * @counter_id: counter index returned by AQ call
4277 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4280 struct ice_aqc_alloc_free_res_elem *buf;
4281 enum ice_status status;
4284 /* Allocate resource */
4285 buf_len = sizeof(*buf);
4286 buf = (struct ice_aqc_alloc_free_res_elem *)
4287 ice_malloc(hw, buf_len);
4289 return ICE_ERR_NO_MEMORY;
4291 buf->num_elems = CPU_TO_LE16(num_items);
4292 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4293 ICE_AQC_RES_TYPE_M) | alloc_shared);
4295 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4296 ice_aqc_opc_alloc_res, NULL);
4300 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4308 * ice_free_res_cntr - free resource counter
4309 * @hw: pointer to the hardware structure
4310 * @type: type of resource
4311 * @alloc_shared: if set it is shared else dedicated
4312 * @num_items: number of entries to be freed for FD resource type
4313 * @counter_id: counter ID resource which needs to be freed
4316 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4319 struct ice_aqc_alloc_free_res_elem *buf;
4320 enum ice_status status;
4324 buf_len = sizeof(*buf);
4325 buf = (struct ice_aqc_alloc_free_res_elem *)
4326 ice_malloc(hw, buf_len);
4328 return ICE_ERR_NO_MEMORY;
4330 buf->num_elems = CPU_TO_LE16(num_items);
4331 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4332 ICE_AQC_RES_TYPE_M) | alloc_shared);
4333 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4335 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4336 ice_aqc_opc_free_res, NULL);
4338 ice_debug(hw, ICE_DBG_SW,
4339 "counter resource could not be freed\n");
4346 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4347 * @hw: pointer to the hardware structure
4348 * @counter_id: returns counter index
4350 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4352 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4353 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4358 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4359 * @hw: pointer to the hardware structure
4360 * @counter_id: counter index to be freed
4362 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4364 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4365 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4370 * ice_alloc_res_lg_act - add large action resource
4371 * @hw: pointer to the hardware structure
4372 * @l_id: large action ID to fill it in
4373 * @num_acts: number of actions to hold with a large action entry
4375 static enum ice_status
4376 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4378 struct ice_aqc_alloc_free_res_elem *sw_buf;
4379 enum ice_status status;
4382 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4383 return ICE_ERR_PARAM;
4385 /* Allocate resource for large action */
4386 buf_len = sizeof(*sw_buf);
4387 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4388 ice_malloc(hw, buf_len);
4390 return ICE_ERR_NO_MEMORY;
4392 sw_buf->num_elems = CPU_TO_LE16(1);
4394 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4395 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4396 * If num_acts is greater than 2, then use
4397 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4398 * The num_acts cannot exceed 4. This was ensured at the
4399 * beginning of the function.
4402 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4403 else if (num_acts == 2)
4404 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4406 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4408 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4409 ice_aqc_opc_alloc_res, NULL);
4411 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4413 ice_free(hw, sw_buf);
4418 * ice_add_mac_with_sw_marker - add filter with sw marker
4419 * @hw: pointer to the hardware structure
4420 * @f_info: filter info structure containing the MAC filter information
4421 * @sw_marker: sw marker to tag the Rx descriptor with
4424 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4427 struct ice_switch_info *sw = hw->switch_info;
4428 struct ice_fltr_mgmt_list_entry *m_entry;
4429 struct ice_fltr_list_entry fl_info;
4430 struct LIST_HEAD_TYPE l_head;
4431 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4432 enum ice_status ret;
4436 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4437 return ICE_ERR_PARAM;
4439 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4440 return ICE_ERR_PARAM;
4442 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4443 return ICE_ERR_PARAM;
4445 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4446 return ICE_ERR_PARAM;
4447 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4449 /* Add filter if it doesn't exist so then the adding of large
4450 * action always results in update
4453 INIT_LIST_HEAD(&l_head);
4454 fl_info.fltr_info = *f_info;
4455 LIST_ADD(&fl_info.list_entry, &l_head);
4457 entry_exists = false;
4458 ret = ice_add_mac(hw, &l_head);
4459 if (ret == ICE_ERR_ALREADY_EXISTS)
4460 entry_exists = true;
4464 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4465 ice_acquire_lock(rule_lock);
4466 /* Get the book keeping entry for the filter */
4467 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4471 /* If counter action was enabled for this rule then don't enable
4472 * sw marker large action
4474 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4475 ret = ICE_ERR_PARAM;
4479 /* if same marker was added before */
4480 if (m_entry->sw_marker_id == sw_marker) {
4481 ret = ICE_ERR_ALREADY_EXISTS;
4485 /* Allocate a hardware table entry to hold large act. Three actions
4486 * for marker based large action
4488 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4492 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4495 /* Update the switch rule to add the marker action */
4496 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4498 ice_release_lock(rule_lock);
4503 ice_release_lock(rule_lock);
4504 /* only remove entry if it did not exist previously */
4506 ret = ice_remove_mac(hw, &l_head);
4512 * ice_add_mac_with_counter - add filter with counter enabled
4513 * @hw: pointer to the hardware structure
4514 * @f_info: pointer to filter info structure containing the MAC filter
4518 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4520 struct ice_switch_info *sw = hw->switch_info;
4521 struct ice_fltr_mgmt_list_entry *m_entry;
4522 struct ice_fltr_list_entry fl_info;
4523 struct LIST_HEAD_TYPE l_head;
4524 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4525 enum ice_status ret;
4530 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4531 return ICE_ERR_PARAM;
4533 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4534 return ICE_ERR_PARAM;
4536 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4537 return ICE_ERR_PARAM;
4538 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4540 entry_exist = false;
4542 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4544 /* Add filter if it doesn't exist so then the adding of large
4545 * action always results in update
4547 INIT_LIST_HEAD(&l_head);
4549 fl_info.fltr_info = *f_info;
4550 LIST_ADD(&fl_info.list_entry, &l_head);
4552 ret = ice_add_mac(hw, &l_head);
4553 if (ret == ICE_ERR_ALREADY_EXISTS)
4558 ice_acquire_lock(rule_lock);
4559 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4561 ret = ICE_ERR_BAD_PTR;
4565 /* Don't enable counter for a filter for which sw marker was enabled */
4566 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4567 ret = ICE_ERR_PARAM;
4571 /* If a counter was already enabled then don't need to add again */
4572 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4573 ret = ICE_ERR_ALREADY_EXISTS;
4577 /* Allocate a hardware table entry to VLAN counter */
4578 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4582 /* Allocate a hardware table entry to hold large act. Two actions for
4583 * counter based large action
4585 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4589 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4592 /* Update the switch rule to add the counter action */
4593 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4595 ice_release_lock(rule_lock);
4600 ice_release_lock(rule_lock);
4601 /* only remove entry if it did not exist previously */
4603 ret = ice_remove_mac(hw, &l_head);
4608 /* This is mapping table entry that maps every word within a given protocol
4609 * structure to the real byte offset as per the specification of that
4611 * for example dst address is 3 words in ethertype header and corresponding
4612 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4613 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4614 * matching entry describing its field. This needs to be updated if new
4615 * structure is added to that union.
4617 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4618 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4619 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4620 { ICE_ETYPE_OL, { 0 } },
4621 { ICE_VLAN_OFOS, { 0, 2 } },
4622 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4623 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4624 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4625 26, 28, 30, 32, 34, 36, 38 } },
4626 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4627 26, 28, 30, 32, 34, 36, 38 } },
4628 { ICE_TCP_IL, { 0, 2 } },
4629 { ICE_UDP_OF, { 0, 2 } },
4630 { ICE_UDP_ILOS, { 0, 2 } },
4631 { ICE_SCTP_IL, { 0, 2 } },
4632 { ICE_VXLAN, { 8, 10, 12, 14 } },
4633 { ICE_GENEVE, { 8, 10, 12, 14 } },
4634 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4635 { ICE_NVGRE, { 0, 2, 4, 6 } },
4636 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4637 { ICE_PPPOE, { 0, 2, 4, 6 } },
4640 /* The following table describes preferred grouping of recipes.
4641 * If a recipe that needs to be programmed is a superset or matches one of the
4642 * following combinations, then the recipe needs to be chained as per the
4646 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4647 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4648 { ICE_MAC_IL, ICE_MAC_IL_HW },
4649 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4650 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4651 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4652 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4653 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4654 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4655 { ICE_TCP_IL, ICE_TCP_IL_HW },
4656 { ICE_UDP_OF, ICE_UDP_OF_HW },
4657 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4658 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4659 { ICE_VXLAN, ICE_UDP_OF_HW },
4660 { ICE_GENEVE, ICE_UDP_OF_HW },
4661 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4662 { ICE_NVGRE, ICE_GRE_OF_HW },
4663 { ICE_GTP, ICE_UDP_OF_HW },
4664 { ICE_PPPOE, ICE_PPPOE_HW },
4668 * ice_find_recp - find a recipe
4669 * @hw: pointer to the hardware structure
4670 * @lkup_exts: extension sequence to match
4672 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4674 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4676 bool refresh_required = true;
4677 struct ice_sw_recipe *recp;
4680 /* Walk through existing recipes to find a match */
4681 recp = hw->switch_info->recp_list;
4682 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4683 /* If recipe was not created for this ID, in SW bookkeeping,
4684 * check if FW has an entry for this recipe. If the FW has an
4685 * entry update it in our SW bookkeeping and continue with the
4688 if (!recp[i].recp_created)
4689 if (ice_get_recp_frm_fw(hw,
4690 hw->switch_info->recp_list, i,
4694 /* Skip inverse action recipes */
4695 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4696 ICE_AQ_RECIPE_ACT_INV_ACT)
4699 /* if number of words we are looking for match */
4700 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4701 struct ice_fv_word *a = lkup_exts->fv_words;
4702 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4706 for (p = 0; p < lkup_exts->n_val_words; p++) {
4707 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4709 if (a[p].off == b[q].off &&
4710 a[p].prot_id == b[q].prot_id)
4711 /* Found the "p"th word in the
4716 /* After walking through all the words in the
4717 * "i"th recipe if "p"th word was not found then
4718 * this recipe is not what we are looking for.
4719 * So break out from this loop and try the next
4722 if (q >= recp[i].lkup_exts.n_val_words) {
4727 /* If for "i"th recipe the found was never set to false
4728 * then it means we found our match
4731 return i; /* Return the recipe ID */
4734 return ICE_MAX_NUM_RECIPES;
4738 * ice_prot_type_to_id - get protocol ID from protocol type
4739 * @type: protocol type
4740 * @id: pointer to variable that will receive the ID
4742 * Returns true if found, false otherwise
4744 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4748 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4749 if (ice_prot_id_tbl[i].type == type) {
4750 *id = ice_prot_id_tbl[i].protocol_id;
4757 * ice_find_valid_words - count valid words
4758 * @rule: advanced rule with lookup information
4759 * @lkup_exts: byte offset extractions of the words that are valid
4761 * calculate valid words in a lookup rule using mask value
4764 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4765 struct ice_prot_lkup_ext *lkup_exts)
4771 if (!ice_prot_type_to_id(rule->type, &prot_id))
4774 word = lkup_exts->n_val_words;
4776 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4777 if (((u16 *)&rule->m_u)[j] &&
4778 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4779 /* No more space to accommodate */
4780 if (word >= ICE_MAX_CHAIN_WORDS)
4782 lkup_exts->fv_words[word].off =
4783 ice_prot_ext[rule->type].offs[j];
4784 lkup_exts->fv_words[word].prot_id =
4785 ice_prot_id_tbl[rule->type].protocol_id;
4786 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4790 ret_val = word - lkup_exts->n_val_words;
4791 lkup_exts->n_val_words = word;
4797 * ice_create_first_fit_recp_def - Create a recipe grouping
4798 * @hw: pointer to the hardware structure
4799 * @lkup_exts: an array of protocol header extractions
4800 * @rg_list: pointer to a list that stores new recipe groups
4801 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4803 * Using first fit algorithm, take all the words that are still not done
4804 * and start grouping them in 4-word groups. Each group makes up one
4807 static enum ice_status
4808 ice_create_first_fit_recp_def(struct ice_hw *hw,
4809 struct ice_prot_lkup_ext *lkup_exts,
4810 struct LIST_HEAD_TYPE *rg_list,
4813 struct ice_pref_recipe_group *grp = NULL;
4818 /* Walk through every word in the rule to check if it is not done. If so
4819 * then this word needs to be part of a new recipe.
4821 for (j = 0; j < lkup_exts->n_val_words; j++)
4822 if (!ice_is_bit_set(lkup_exts->done, j)) {
4824 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4825 struct ice_recp_grp_entry *entry;
4827 entry = (struct ice_recp_grp_entry *)
4828 ice_malloc(hw, sizeof(*entry));
4830 return ICE_ERR_NO_MEMORY;
4831 LIST_ADD(&entry->l_entry, rg_list);
4832 grp = &entry->r_group;
4836 grp->pairs[grp->n_val_pairs].prot_id =
4837 lkup_exts->fv_words[j].prot_id;
4838 grp->pairs[grp->n_val_pairs].off =
4839 lkup_exts->fv_words[j].off;
4840 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4848 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4849 * @hw: pointer to the hardware structure
4850 * @fv_list: field vector with the extraction sequence information
4851 * @rg_list: recipe groupings with protocol-offset pairs
4853 * Helper function to fill in the field vector indices for protocol-offset
4854 * pairs. These indexes are then ultimately programmed into a recipe.
4856 static enum ice_status
4857 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4858 struct LIST_HEAD_TYPE *rg_list)
4860 struct ice_sw_fv_list_entry *fv;
4861 struct ice_recp_grp_entry *rg;
4862 struct ice_fv_word *fv_ext;
4864 if (LIST_EMPTY(fv_list))
4867 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4868 fv_ext = fv->fv_ptr->ew;
4870 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4873 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4874 struct ice_fv_word *pr;
4879 pr = &rg->r_group.pairs[i];
4880 mask = rg->r_group.mask[i];
4882 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4883 if (fv_ext[j].prot_id == pr->prot_id &&
4884 fv_ext[j].off == pr->off) {
4887 /* Store index of field vector */
4889 /* Mask is given by caller as big
4890 * endian, but sent to FW as little
4893 rg->fv_mask[i] = mask << 8 | mask >> 8;
4897 /* Protocol/offset could not be found, caller gave an
4901 return ICE_ERR_PARAM;
4909 * ice_find_free_recp_res_idx - find free result indexes for recipe
4910 * @hw: pointer to hardware structure
4911 * @profiles: bitmap of profiles that will be associated with the new recipe
4912 * @free_idx: pointer to variable to receive the free index bitmap
4914 * The algorithm used here is:
4915 * 1. When creating a new recipe, create a set P which contains all
4916 * Profiles that will be associated with our new recipe
4918 * 2. For each Profile p in set P:
4919 * a. Add all recipes associated with Profile p into set R
4920 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4921 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4922 * i. Or just assume they all have the same possible indexes:
4924 * i.e., PossibleIndexes = 0x0000F00000000000
4926 * 3. For each Recipe r in set R:
4927 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4928 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4930 * FreeIndexes will contain the bits indicating the indexes free for use,
4931 * then the code needs to update the recipe[r].used_result_idx_bits to
4932 * indicate which indexes were selected for use by this recipe.
4935 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4936 ice_bitmap_t *free_idx)
4938 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4939 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4940 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4944 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4945 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4946 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4947 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4949 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
4950 ice_set_bit(count, possible_idx);
4952 /* For each profile we are going to associate the recipe with, add the
4953 * recipes that are associated with that profile. This will give us
4954 * the set of recipes that our recipe may collide with. Also, determine
4955 * what possible result indexes are usable given this set of profiles.
4958 while (ICE_MAX_NUM_PROFILES >
4959 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4960 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4961 ICE_MAX_NUM_RECIPES);
4962 ice_and_bitmap(possible_idx, possible_idx,
4963 hw->switch_info->prof_res_bm[bit],
4968 /* For each recipe that our new recipe may collide with, determine
4969 * which indexes have been used.
4971 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4972 if (ice_is_bit_set(recipes, bit)) {
4973 ice_or_bitmap(used_idx, used_idx,
4974 hw->switch_info->recp_list[bit].res_idxs,
4978 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4980 /* return number of free indexes */
4983 while (ICE_MAX_FV_WORDS >
4984 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
4993 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4994 * @hw: pointer to hardware structure
4995 * @rm: recipe management list entry
4996 * @match_tun: if field vector index for tunnel needs to be programmed
4997 * @profiles: bitmap of profiles that will be assocated.
4999 static enum ice_status
5000 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5001 bool match_tun, ice_bitmap_t *profiles)
5003 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5004 struct ice_aqc_recipe_data_elem *tmp;
5005 struct ice_aqc_recipe_data_elem *buf;
5006 struct ice_recp_grp_entry *entry;
5007 enum ice_status status;
5013 /* When more than one recipe are required, another recipe is needed to
5014 * chain them together. Matching a tunnel metadata ID takes up one of
5015 * the match fields in the chaining recipe reducing the number of
5016 * chained recipes by one.
5018 /* check number of free result indices */
5019 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5020 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5022 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5023 free_res_idx, rm->n_grp_count);
5025 if (rm->n_grp_count > 1) {
5026 if (rm->n_grp_count > free_res_idx)
5027 return ICE_ERR_MAX_LIMIT;
5032 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5033 ICE_MAX_NUM_RECIPES,
5036 return ICE_ERR_NO_MEMORY;
5038 buf = (struct ice_aqc_recipe_data_elem *)
5039 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5041 status = ICE_ERR_NO_MEMORY;
5045 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5046 recipe_count = ICE_MAX_NUM_RECIPES;
5047 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5049 if (status || recipe_count == 0)
5052 /* Allocate the recipe resources, and configure them according to the
5053 * match fields from protocol headers and extracted field vectors.
5055 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5056 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5059 status = ice_alloc_recipe(hw, &entry->rid);
5063 /* Clear the result index of the located recipe, as this will be
5064 * updated, if needed, later in the recipe creation process.
5066 tmp[0].content.result_indx = 0;
5068 buf[recps] = tmp[0];
5069 buf[recps].recipe_indx = (u8)entry->rid;
5070 /* if the recipe is a non-root recipe RID should be programmed
5071 * as 0 for the rules to be applied correctly.
5073 buf[recps].content.rid = 0;
5074 ice_memset(&buf[recps].content.lkup_indx, 0,
5075 sizeof(buf[recps].content.lkup_indx),
5078 /* All recipes use look-up index 0 to match switch ID. */
5079 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5080 buf[recps].content.mask[0] =
5081 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5082 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5085 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5086 buf[recps].content.lkup_indx[i] = 0x80;
5087 buf[recps].content.mask[i] = 0;
5090 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5091 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5092 buf[recps].content.mask[i + 1] =
5093 CPU_TO_LE16(entry->fv_mask[i]);
5096 if (rm->n_grp_count > 1) {
5097 /* Checks to see if there really is a valid result index
5100 if (chain_idx >= ICE_MAX_FV_WORDS) {
5101 ice_debug(hw, ICE_DBG_SW,
5102 "No chain index available\n");
5103 status = ICE_ERR_MAX_LIMIT;
5107 entry->chain_idx = chain_idx;
5108 buf[recps].content.result_indx =
5109 ICE_AQ_RECIPE_RESULT_EN |
5110 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5111 ICE_AQ_RECIPE_RESULT_DATA_M);
5112 ice_clear_bit(chain_idx, result_idx_bm);
5113 chain_idx = ice_find_first_bit(result_idx_bm,
5117 /* fill recipe dependencies */
5118 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5119 ICE_MAX_NUM_RECIPES);
5120 ice_set_bit(buf[recps].recipe_indx,
5121 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5122 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5126 if (rm->n_grp_count == 1) {
5127 rm->root_rid = buf[0].recipe_indx;
5128 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5129 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5130 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5131 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5132 sizeof(buf[0].recipe_bitmap),
5133 ICE_NONDMA_TO_NONDMA);
5135 status = ICE_ERR_BAD_PTR;
5138 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5139 * the recipe which is getting created if specified
5140 * by user. Usually any advanced switch filter, which results
5141 * into new extraction sequence, ended up creating a new recipe
5142 * of type ROOT and usually recipes are associated with profiles
5143 * Switch rule referreing newly created recipe, needs to have
5144 * either/or 'fwd' or 'join' priority, otherwise switch rule
5145 * evaluation will not happen correctly. In other words, if
5146 * switch rule to be evaluated on priority basis, then recipe
5147 * needs to have priority, otherwise it will be evaluated last.
5149 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5151 struct ice_recp_grp_entry *last_chain_entry;
5154 /* Allocate the last recipe that will chain the outcomes of the
5155 * other recipes together
5157 status = ice_alloc_recipe(hw, &rid);
5161 buf[recps].recipe_indx = (u8)rid;
5162 buf[recps].content.rid = (u8)rid;
5163 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5164 /* the new entry created should also be part of rg_list to
5165 * make sure we have complete recipe
5167 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5168 sizeof(*last_chain_entry));
5169 if (!last_chain_entry) {
5170 status = ICE_ERR_NO_MEMORY;
5173 last_chain_entry->rid = rid;
5174 ice_memset(&buf[recps].content.lkup_indx, 0,
5175 sizeof(buf[recps].content.lkup_indx),
5177 /* All recipes use look-up index 0 to match switch ID. */
5178 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5179 buf[recps].content.mask[0] =
5180 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5181 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5182 buf[recps].content.lkup_indx[i] =
5183 ICE_AQ_RECIPE_LKUP_IGNORE;
5184 buf[recps].content.mask[i] = 0;
5188 /* update r_bitmap with the recp that is used for chaining */
5189 ice_set_bit(rid, rm->r_bitmap);
5190 /* this is the recipe that chains all the other recipes so it
5191 * should not have a chaining ID to indicate the same
5193 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5194 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5196 last_chain_entry->fv_idx[i] = entry->chain_idx;
5197 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5198 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5199 ice_set_bit(entry->rid, rm->r_bitmap);
5201 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5202 if (sizeof(buf[recps].recipe_bitmap) >=
5203 sizeof(rm->r_bitmap)) {
5204 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5205 sizeof(buf[recps].recipe_bitmap),
5206 ICE_NONDMA_TO_NONDMA);
5208 status = ICE_ERR_BAD_PTR;
5211 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5213 /* To differentiate among different UDP tunnels, a meta data ID
5217 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5218 buf[recps].content.mask[i] =
5219 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5223 rm->root_rid = (u8)rid;
5225 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5229 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5230 ice_release_change_lock(hw);
5234 /* Every recipe that just got created add it to the recipe
5237 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5238 struct ice_switch_info *sw = hw->switch_info;
5239 bool is_root, idx_found = false;
5240 struct ice_sw_recipe *recp;
5241 u16 idx, buf_idx = 0;
5243 /* find buffer index for copying some data */
5244 for (idx = 0; idx < rm->n_grp_count; idx++)
5245 if (buf[idx].recipe_indx == entry->rid) {
5251 status = ICE_ERR_OUT_OF_RANGE;
5255 recp = &sw->recp_list[entry->rid];
5256 is_root = (rm->root_rid == entry->rid);
5257 recp->is_root = is_root;
5259 recp->root_rid = entry->rid;
5260 recp->big_recp = (is_root && rm->n_grp_count > 1);
5262 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5263 entry->r_group.n_val_pairs *
5264 sizeof(struct ice_fv_word),
5265 ICE_NONDMA_TO_NONDMA);
5267 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5268 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5270 /* Copy non-result fv index values and masks to recipe. This
5271 * call will also update the result recipe bitmask.
5273 ice_collect_result_idx(&buf[buf_idx], recp);
5275 /* for non-root recipes, also copy to the root, this allows
5276 * easier matching of a complete chained recipe
5279 ice_collect_result_idx(&buf[buf_idx],
5280 &sw->recp_list[rm->root_rid]);
5282 recp->n_ext_words = entry->r_group.n_val_pairs;
5283 recp->chain_idx = entry->chain_idx;
5284 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5285 recp->n_grp_count = rm->n_grp_count;
5286 recp->tun_type = rm->tun_type;
5287 recp->recp_created = true;
5302 * ice_create_recipe_group - creates recipe group
5303 * @hw: pointer to hardware structure
5304 * @rm: recipe management list entry
5305 * @lkup_exts: lookup elements
5307 static enum ice_status
5308 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5309 struct ice_prot_lkup_ext *lkup_exts)
5311 enum ice_status status;
5314 rm->n_grp_count = 0;
5316 /* Create recipes for words that are marked not done by packing them
5319 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5320 &rm->rg_list, &recp_count);
5322 rm->n_grp_count += recp_count;
5323 rm->n_ext_words = lkup_exts->n_val_words;
5324 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5325 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5326 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5327 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5334 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5335 * @hw: pointer to hardware structure
5336 * @lkups: lookup elements or match criteria for the advanced recipe, one
5337 * structure per protocol header
5338 * @lkups_cnt: number of protocols
5339 * @bm: bitmap of field vectors to consider
5340 * @fv_list: pointer to a list that holds the returned field vectors
5342 static enum ice_status
5343 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5344 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5346 enum ice_status status;
5350 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5352 return ICE_ERR_NO_MEMORY;
5354 for (i = 0; i < lkups_cnt; i++)
5355 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5356 status = ICE_ERR_CFG;
5360 /* Find field vectors that include all specified protocol types */
5361 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5364 ice_free(hw, prot_ids);
5369 * ice_add_special_words - Add words that are not protocols, such as metadata
5370 * @rinfo: other information regarding the rule e.g. priority and action info
5371 * @lkup_exts: lookup word structure
5373 static enum ice_status
5374 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5375 struct ice_prot_lkup_ext *lkup_exts)
5377 /* If this is a tunneled packet, then add recipe index to match the
5378 * tunnel bit in the packet metadata flags.
5380 if (rinfo->tun_type != ICE_NON_TUN) {
5381 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5382 u8 word = lkup_exts->n_val_words++;
5384 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5385 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5387 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5389 return ICE_ERR_MAX_LIMIT;
5396 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5397 * @hw: pointer to hardware structure
5398 * @rinfo: other information regarding the rule e.g. priority and action info
5399 * @bm: pointer to memory for returning the bitmap of field vectors
5402 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5405 enum ice_prof_type type;
5407 switch (rinfo->tun_type) {
5409 type = ICE_PROF_NON_TUN;
5411 case ICE_ALL_TUNNELS:
5412 type = ICE_PROF_TUN_ALL;
5414 case ICE_SW_TUN_VXLAN_GPE:
5415 case ICE_SW_TUN_GENEVE:
5416 case ICE_SW_TUN_VXLAN:
5417 case ICE_SW_TUN_UDP:
5418 case ICE_SW_TUN_GTP:
5419 type = ICE_PROF_TUN_UDP;
5421 case ICE_SW_TUN_NVGRE:
5422 type = ICE_PROF_TUN_GRE;
5424 case ICE_SW_TUN_PPPOE:
5425 type = ICE_PROF_TUN_PPPOE;
5427 case ICE_SW_TUN_AND_NON_TUN:
5429 type = ICE_PROF_ALL;
5433 ice_get_sw_fv_bitmap(hw, type, bm);
5437 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5438 * @hw: pointer to hardware structure
5439 * @lkups: lookup elements or match criteria for the advanced recipe, one
5440 * structure per protocol header
5441 * @lkups_cnt: number of protocols
5442 * @rinfo: other information regarding the rule e.g. priority and action info
5443 * @rid: return the recipe ID of the recipe created
5445 static enum ice_status
5446 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5447 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5449 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5450 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5451 struct ice_prot_lkup_ext *lkup_exts;
5452 struct ice_recp_grp_entry *r_entry;
5453 struct ice_sw_fv_list_entry *fvit;
5454 struct ice_recp_grp_entry *r_tmp;
5455 struct ice_sw_fv_list_entry *tmp;
5456 enum ice_status status = ICE_SUCCESS;
5457 struct ice_sw_recipe *rm;
5458 bool match_tun = false;
5462 return ICE_ERR_PARAM;
5464 lkup_exts = (struct ice_prot_lkup_ext *)
5465 ice_malloc(hw, sizeof(*lkup_exts));
5467 return ICE_ERR_NO_MEMORY;
5469 /* Determine the number of words to be matched and if it exceeds a
5470 * recipe's restrictions
5472 for (i = 0; i < lkups_cnt; i++) {
5475 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5476 status = ICE_ERR_CFG;
5477 goto err_free_lkup_exts;
5480 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5482 status = ICE_ERR_CFG;
5483 goto err_free_lkup_exts;
5487 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5489 status = ICE_ERR_NO_MEMORY;
5490 goto err_free_lkup_exts;
5493 /* Get field vectors that contain fields extracted from all the protocol
5494 * headers being programmed.
5496 INIT_LIST_HEAD(&rm->fv_list);
5497 INIT_LIST_HEAD(&rm->rg_list);
5499 /* Get bitmap of field vectors (profiles) that are compatible with the
5500 * rule request; only these will be searched in the subsequent call to
5503 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5505 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5509 /* Group match words into recipes using preferred recipe grouping
5512 status = ice_create_recipe_group(hw, rm, lkup_exts);
5516 /* There is only profile for UDP tunnels. So, it is necessary to use a
5517 * metadata ID flag to differentiate different tunnel types. A separate
5518 * recipe needs to be used for the metadata.
5520 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5521 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5522 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5525 /* set the recipe priority if specified */
5526 rm->priority = rinfo->priority ? rinfo->priority : 0;
5528 /* Find offsets from the field vector. Pick the first one for all the
5531 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5535 /* get bitmap of all profiles the recipe will be associated with */
5536 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5537 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5539 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5540 ice_set_bit((u16)fvit->profile_id, profiles);
5543 /* Create any special protocol/offset pairs, such as looking at tunnel
5544 * bits by extracting metadata
5546 status = ice_add_special_words(rinfo, lkup_exts);
5548 goto err_free_lkup_exts;
5550 /* Look for a recipe which matches our requested fv / mask list */
5551 *rid = ice_find_recp(hw, lkup_exts);
5552 if (*rid < ICE_MAX_NUM_RECIPES)
5553 /* Success if found a recipe that match the existing criteria */
5556 /* Recipe we need does not exist, add a recipe */
5557 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5561 /* Associate all the recipes created with all the profiles in the
5562 * common field vector.
5564 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5566 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5569 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5570 (u8 *)r_bitmap, NULL);
5574 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5575 ICE_MAX_NUM_RECIPES);
5576 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5580 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5583 ice_release_change_lock(hw);
5588 /* Update profile to recipe bitmap array */
5589 ice_memcpy(profile_to_recipe[fvit->profile_id], r_bitmap,
5590 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
5592 /* Update recipe to profile bitmap array */
5593 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5594 if (ice_is_bit_set(r_bitmap, j))
5595 ice_set_bit((u16)fvit->profile_id,
5596 recipe_to_profile[j]);
5599 *rid = rm->root_rid;
5600 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5601 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5603 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5604 ice_recp_grp_entry, l_entry) {
5605 LIST_DEL(&r_entry->l_entry);
5606 ice_free(hw, r_entry);
5609 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5611 LIST_DEL(&fvit->list_entry);
5616 ice_free(hw, rm->root_buf);
5621 ice_free(hw, lkup_exts);
5627 * ice_find_dummy_packet - find dummy packet by tunnel type
5629 * @lkups: lookup elements or match criteria for the advanced recipe, one
5630 * structure per protocol header
5631 * @lkups_cnt: number of protocols
5632 * @tun_type: tunnel type from the match criteria
5633 * @pkt: dummy packet to fill according to filter match criteria
5634 * @pkt_len: packet length of dummy packet
5635 * @offsets: pointer to receive the pointer to the offsets for the packet
5638 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5639 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5641 const struct ice_dummy_pkt_offsets **offsets)
5643 bool tcp = false, udp = false, ipv6 = false;
5646 if (tun_type == ICE_SW_TUN_GTP) {
5647 *pkt = dummy_udp_gtp_packet;
5648 *pkt_len = sizeof(dummy_udp_gtp_packet);
5649 *offsets = dummy_udp_gtp_packet_offsets;
5652 if (tun_type == ICE_SW_TUN_PPPOE) {
5653 *pkt = dummy_pppoe_packet;
5654 *pkt_len = sizeof(dummy_pppoe_packet);
5655 *offsets = dummy_pppoe_packet_offsets;
5658 for (i = 0; i < lkups_cnt; i++) {
5659 if (lkups[i].type == ICE_UDP_ILOS)
5661 else if (lkups[i].type == ICE_TCP_IL)
5663 else if (lkups[i].type == ICE_IPV6_OFOS)
5667 if (tun_type == ICE_ALL_TUNNELS) {
5668 *pkt = dummy_gre_udp_packet;
5669 *pkt_len = sizeof(dummy_gre_udp_packet);
5670 *offsets = dummy_gre_udp_packet_offsets;
5674 if (tun_type == ICE_SW_TUN_NVGRE) {
5676 *pkt = dummy_gre_tcp_packet;
5677 *pkt_len = sizeof(dummy_gre_tcp_packet);
5678 *offsets = dummy_gre_tcp_packet_offsets;
5682 *pkt = dummy_gre_udp_packet;
5683 *pkt_len = sizeof(dummy_gre_udp_packet);
5684 *offsets = dummy_gre_udp_packet_offsets;
5688 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5689 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5691 *pkt = dummy_udp_tun_tcp_packet;
5692 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5693 *offsets = dummy_udp_tun_tcp_packet_offsets;
5697 *pkt = dummy_udp_tun_udp_packet;
5698 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5699 *offsets = dummy_udp_tun_udp_packet_offsets;
5704 *pkt = dummy_udp_packet;
5705 *pkt_len = sizeof(dummy_udp_packet);
5706 *offsets = dummy_udp_packet_offsets;
5708 } else if (udp && ipv6) {
5709 *pkt = dummy_udp_ipv6_packet;
5710 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5711 *offsets = dummy_udp_ipv6_packet_offsets;
5713 } else if ((tcp && ipv6) || ipv6) {
5714 *pkt = dummy_tcp_ipv6_packet;
5715 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5716 *offsets = dummy_tcp_ipv6_packet_offsets;
5720 *pkt = dummy_tcp_packet;
5721 *pkt_len = sizeof(dummy_tcp_packet);
5722 *offsets = dummy_tcp_packet_offsets;
5726 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5728 * @lkups: lookup elements or match criteria for the advanced recipe, one
5729 * structure per protocol header
5730 * @lkups_cnt: number of protocols
5731 * @s_rule: stores rule information from the match criteria
5732 * @dummy_pkt: dummy packet to fill according to filter match criteria
5733 * @pkt_len: packet length of dummy packet
5734 * @offsets: offset info for the dummy packet
5736 static enum ice_status
5737 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5738 struct ice_aqc_sw_rules_elem *s_rule,
5739 const u8 *dummy_pkt, u16 pkt_len,
5740 const struct ice_dummy_pkt_offsets *offsets)
5745 /* Start with a packet with a pre-defined/dummy content. Then, fill
5746 * in the header values to be looked up or matched.
5748 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5750 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5752 for (i = 0; i < lkups_cnt; i++) {
5753 enum ice_protocol_type type;
5754 u16 offset = 0, len = 0, j;
5757 /* find the start of this layer; it should be found since this
5758 * was already checked when search for the dummy packet
5760 type = lkups[i].type;
5761 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5762 if (type == offsets[j].type) {
5763 offset = offsets[j].offset;
5768 /* this should never happen in a correct calling sequence */
5770 return ICE_ERR_PARAM;
5772 switch (lkups[i].type) {
5775 len = sizeof(struct ice_ether_hdr);
5778 len = sizeof(struct ice_ethtype_hdr);
5781 len = sizeof(struct ice_vlan_hdr);
5785 len = sizeof(struct ice_ipv4_hdr);
5789 len = sizeof(struct ice_ipv6_hdr);
5794 len = sizeof(struct ice_l4_hdr);
5797 len = sizeof(struct ice_sctp_hdr);
5800 len = sizeof(struct ice_nvgre);
5805 len = sizeof(struct ice_udp_tnl_hdr);
5809 len = sizeof(struct ice_udp_gtp_hdr);
5812 len = sizeof(struct ice_pppoe_hdr);
5815 return ICE_ERR_PARAM;
5818 /* the length should be a word multiple */
5819 if (len % ICE_BYTES_PER_WORD)
5822 /* We have the offset to the header start, the length, the
5823 * caller's header values and mask. Use this information to
5824 * copy the data into the dummy packet appropriately based on
5825 * the mask. Note that we need to only write the bits as
5826 * indicated by the mask to make sure we don't improperly write
5827 * over any significant packet data.
5829 for (j = 0; j < len / sizeof(u16); j++)
5830 if (((u16 *)&lkups[i].m_u)[j])
5831 ((u16 *)(pkt + offset))[j] =
5832 (((u16 *)(pkt + offset))[j] &
5833 ~((u16 *)&lkups[i].m_u)[j]) |
5834 (((u16 *)&lkups[i].h_u)[j] &
5835 ((u16 *)&lkups[i].m_u)[j]);
5838 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5844 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5845 * @hw: pointer to the hardware structure
5846 * @tun_type: tunnel type
5847 * @pkt: dummy packet to fill in
5848 * @offsets: offset info for the dummy packet
5850 static enum ice_status
5851 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5852 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5857 case ICE_SW_TUN_AND_NON_TUN:
5858 case ICE_SW_TUN_VXLAN_GPE:
5859 case ICE_SW_TUN_VXLAN:
5860 case ICE_SW_TUN_UDP:
5861 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5865 case ICE_SW_TUN_GENEVE:
5866 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5871 /* Nothing needs to be done for this tunnel type */
5875 /* Find the outer UDP protocol header and insert the port number */
5876 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5877 if (offsets[i].type == ICE_UDP_OF) {
5878 struct ice_l4_hdr *hdr;
5881 offset = offsets[i].offset;
5882 hdr = (struct ice_l4_hdr *)&pkt[offset];
5883 hdr->dst_port = open_port << 8 | open_port >> 8;
5893 * ice_find_adv_rule_entry - Search a rule entry
5894 * @hw: pointer to the hardware structure
5895 * @lkups: lookup elements or match criteria for the advanced recipe, one
5896 * structure per protocol header
5897 * @lkups_cnt: number of protocols
5898 * @recp_id: recipe ID for which we are finding the rule
5899 * @rinfo: other information regarding the rule e.g. priority and action info
5901 * Helper function to search for a given advance rule entry
5902 * Returns pointer to entry storing the rule if found
5904 static struct ice_adv_fltr_mgmt_list_entry *
5905 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5906 u16 lkups_cnt, u8 recp_id,
5907 struct ice_adv_rule_info *rinfo)
5909 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5910 struct ice_switch_info *sw = hw->switch_info;
5913 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5914 ice_adv_fltr_mgmt_list_entry, list_entry) {
5915 bool lkups_matched = true;
5917 if (lkups_cnt != list_itr->lkups_cnt)
5919 for (i = 0; i < list_itr->lkups_cnt; i++)
5920 if (memcmp(&list_itr->lkups[i], &lkups[i],
5922 lkups_matched = false;
5925 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5926 rinfo->tun_type == list_itr->rule_info.tun_type &&
5934 * ice_adv_add_update_vsi_list
5935 * @hw: pointer to the hardware structure
5936 * @m_entry: pointer to current adv filter management list entry
5937 * @cur_fltr: filter information from the book keeping entry
5938 * @new_fltr: filter information with the new VSI to be added
5940 * Call AQ command to add or update previously created VSI list with new VSI.
5942 * Helper function to do book keeping associated with adding filter information
5943 * The algorithm to do the booking keeping is described below :
5944 * When a VSI needs to subscribe to a given advanced filter
5945 * if only one VSI has been added till now
5946 * Allocate a new VSI list and add two VSIs
5947 * to this list using switch rule command
5948 * Update the previously created switch rule with the
5949 * newly created VSI list ID
5950 * if a VSI list was previously created
5951 * Add the new VSI to the previously created VSI list set
5952 * using the update switch rule command
5954 static enum ice_status
5955 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5956 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5957 struct ice_adv_rule_info *cur_fltr,
5958 struct ice_adv_rule_info *new_fltr)
5960 enum ice_status status;
5961 u16 vsi_list_id = 0;
5963 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5964 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5965 return ICE_ERR_NOT_IMPL;
5967 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5968 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5969 return ICE_ERR_ALREADY_EXISTS;
5971 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5972 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5973 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5974 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5975 return ICE_ERR_NOT_IMPL;
5977 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5978 /* Only one entry existed in the mapping and it was not already
5979 * a part of a VSI list. So, create a VSI list with the old and
5982 struct ice_fltr_info tmp_fltr;
5983 u16 vsi_handle_arr[2];
5985 /* A rule already exists with the new VSI being added */
5986 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5987 new_fltr->sw_act.fwd_id.hw_vsi_id)
5988 return ICE_ERR_ALREADY_EXISTS;
5990 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5991 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5992 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5998 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5999 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6000 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6001 /* Update the previous switch rule of "forward to VSI" to
6004 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6008 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6009 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6010 m_entry->vsi_list_info =
6011 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6014 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6016 if (!m_entry->vsi_list_info)
6019 /* A rule already exists with the new VSI being added */
6020 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6023 /* Update the previously created VSI list set with
6024 * the new VSI ID passed in
6026 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6028 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6030 ice_aqc_opc_update_sw_rules,
6032 /* update VSI list mapping info with new VSI ID */
6034 ice_set_bit(vsi_handle,
6035 m_entry->vsi_list_info->vsi_map);
6038 m_entry->vsi_count++;
6043 * ice_add_adv_rule - helper function to create an advanced switch rule
6044 * @hw: pointer to the hardware structure
6045 * @lkups: information on the words that needs to be looked up. All words
6046 * together makes one recipe
6047 * @lkups_cnt: num of entries in the lkups array
6048 * @rinfo: other information related to the rule that needs to be programmed
6049 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6050 * ignored is case of error.
6052 * This function can program only 1 rule at a time. The lkups is used to
6053 * describe the all the words that forms the "lookup" portion of the recipe.
6054 * These words can span multiple protocols. Callers to this function need to
6055 * pass in a list of protocol headers with lookup information along and mask
6056 * that determines which words are valid from the given protocol header.
6057 * rinfo describes other information related to this rule such as forwarding
6058 * IDs, priority of this rule, etc.
6061 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6062 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6063 struct ice_rule_query_data *added_entry)
6065 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6066 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6067 const struct ice_dummy_pkt_offsets *pkt_offsets;
6068 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6069 struct LIST_HEAD_TYPE *rule_head;
6070 struct ice_switch_info *sw;
6071 enum ice_status status;
6072 const u8 *pkt = NULL;
6077 /* Initialize profile to result index bitmap */
6078 if (!hw->switch_info->prof_res_bm_init) {
6079 hw->switch_info->prof_res_bm_init = 1;
6080 ice_init_prof_result_bm(hw);
6084 return ICE_ERR_PARAM;
6086 /* get # of words we need to match */
6088 for (i = 0; i < lkups_cnt; i++) {
6091 ptr = (u16 *)&lkups[i].m_u;
6092 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6096 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6097 return ICE_ERR_PARAM;
6099 /* make sure that we can locate a dummy packet */
6100 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6103 status = ICE_ERR_PARAM;
6104 goto err_ice_add_adv_rule;
6107 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6108 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6109 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6110 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6113 vsi_handle = rinfo->sw_act.vsi_handle;
6114 if (!ice_is_vsi_valid(hw, vsi_handle))
6115 return ICE_ERR_PARAM;
6117 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6118 rinfo->sw_act.fwd_id.hw_vsi_id =
6119 ice_get_hw_vsi_num(hw, vsi_handle);
6120 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6121 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6123 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6126 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6128 /* we have to add VSI to VSI_LIST and increment vsi_count.
6129 * Also Update VSI list so that we can change forwarding rule
6130 * if the rule already exists, we will check if it exists with
6131 * same vsi_id, if not then add it to the VSI list if it already
6132 * exists if not then create a VSI list and add the existing VSI
6133 * ID and the new VSI ID to the list
6134 * We will add that VSI to the list
6136 status = ice_adv_add_update_vsi_list(hw, m_entry,
6137 &m_entry->rule_info,
6140 added_entry->rid = rid;
6141 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6142 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6146 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6147 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6149 return ICE_ERR_NO_MEMORY;
6150 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6151 switch (rinfo->sw_act.fltr_act) {
6152 case ICE_FWD_TO_VSI:
6153 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6154 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6155 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6158 act |= ICE_SINGLE_ACT_TO_Q;
6159 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6160 ICE_SINGLE_ACT_Q_INDEX_M;
6162 case ICE_FWD_TO_QGRP:
6163 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6164 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6165 act |= ICE_SINGLE_ACT_TO_Q;
6166 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6167 ICE_SINGLE_ACT_Q_INDEX_M;
6168 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6169 ICE_SINGLE_ACT_Q_REGION_M;
6171 case ICE_DROP_PACKET:
6172 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6173 ICE_SINGLE_ACT_VALID_BIT;
6176 status = ICE_ERR_CFG;
6177 goto err_ice_add_adv_rule;
6180 /* set the rule LOOKUP type based on caller specified 'RX'
6181 * instead of hardcoding it to be either LOOKUP_TX/RX
6183 * for 'RX' set the source to be the port number
6184 * for 'TX' set the source to be the source HW VSI number (determined
6188 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6189 s_rule->pdata.lkup_tx_rx.src =
6190 CPU_TO_LE16(hw->port_info->lport);
6192 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6193 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6196 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6197 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6199 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6200 pkt_len, pkt_offsets);
6202 goto err_ice_add_adv_rule;
6204 if (rinfo->tun_type != ICE_NON_TUN) {
6205 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6206 s_rule->pdata.lkup_tx_rx.hdr,
6209 goto err_ice_add_adv_rule;
6212 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6213 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6216 goto err_ice_add_adv_rule;
6217 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6218 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6220 status = ICE_ERR_NO_MEMORY;
6221 goto err_ice_add_adv_rule;
6224 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6225 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6226 ICE_NONDMA_TO_NONDMA);
6227 if (!adv_fltr->lkups) {
6228 status = ICE_ERR_NO_MEMORY;
6229 goto err_ice_add_adv_rule;
6232 adv_fltr->lkups_cnt = lkups_cnt;
6233 adv_fltr->rule_info = *rinfo;
6234 adv_fltr->rule_info.fltr_rule_id =
6235 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6236 sw = hw->switch_info;
6237 sw->recp_list[rid].adv_rule = true;
6238 rule_head = &sw->recp_list[rid].filt_rules;
6240 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6241 struct ice_fltr_info tmp_fltr;
6243 tmp_fltr.fltr_rule_id =
6244 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6245 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6246 tmp_fltr.fwd_id.hw_vsi_id =
6247 ice_get_hw_vsi_num(hw, vsi_handle);
6248 tmp_fltr.vsi_handle = vsi_handle;
6249 /* Update the previous switch rule of "forward to VSI" to
6252 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6254 goto err_ice_add_adv_rule;
6255 adv_fltr->vsi_count = 1;
6258 /* Add rule entry to book keeping list */
6259 LIST_ADD(&adv_fltr->list_entry, rule_head);
6261 added_entry->rid = rid;
6262 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6263 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6265 err_ice_add_adv_rule:
6266 if (status && adv_fltr) {
6267 ice_free(hw, adv_fltr->lkups);
6268 ice_free(hw, adv_fltr);
6271 ice_free(hw, s_rule);
6277 * ice_adv_rem_update_vsi_list
6278 * @hw: pointer to the hardware structure
6279 * @vsi_handle: VSI handle of the VSI to remove
6280 * @fm_list: filter management entry for which the VSI list management needs to
6283 static enum ice_status
6284 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6285 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6287 struct ice_vsi_list_map_info *vsi_list_info;
6288 enum ice_sw_lkup_type lkup_type;
6289 enum ice_status status;
6292 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6293 fm_list->vsi_count == 0)
6294 return ICE_ERR_PARAM;
6296 /* A rule with the VSI being removed does not exist */
6297 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6298 return ICE_ERR_DOES_NOT_EXIST;
6300 lkup_type = ICE_SW_LKUP_LAST;
6301 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6302 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6303 ice_aqc_opc_update_sw_rules,
6308 fm_list->vsi_count--;
6309 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6310 vsi_list_info = fm_list->vsi_list_info;
6311 if (fm_list->vsi_count == 1) {
6312 struct ice_fltr_info tmp_fltr;
6315 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6317 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6318 return ICE_ERR_OUT_OF_RANGE;
6320 /* Make sure VSI list is empty before removing it below */
6321 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6323 ice_aqc_opc_update_sw_rules,
6327 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6328 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6329 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6330 tmp_fltr.fwd_id.hw_vsi_id =
6331 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6332 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6333 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6335 /* Update the previous switch rule of "MAC forward to VSI" to
6336 * "MAC fwd to VSI list"
6338 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6340 ice_debug(hw, ICE_DBG_SW,
6341 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6342 tmp_fltr.fwd_id.hw_vsi_id, status);
6346 /* Remove the VSI list since it is no longer used */
6347 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6349 ice_debug(hw, ICE_DBG_SW,
6350 "Failed to remove VSI list %d, error %d\n",
6351 vsi_list_id, status);
6355 LIST_DEL(&vsi_list_info->list_entry);
6356 ice_free(hw, vsi_list_info);
6357 fm_list->vsi_list_info = NULL;
6364 * ice_rem_adv_rule - removes existing advanced switch rule
6365 * @hw: pointer to the hardware structure
6366 * @lkups: information on the words that needs to be looked up. All words
6367 * together makes one recipe
6368 * @lkups_cnt: num of entries in the lkups array
6369 * @rinfo: Its the pointer to the rule information for the rule
6371 * This function can be used to remove 1 rule at a time. The lkups is
6372 * used to describe all the words that forms the "lookup" portion of the
6373 * rule. These words can span multiple protocols. Callers to this function
6374 * need to pass in a list of protocol headers with lookup information along
6375 * and mask that determines which words are valid from the given protocol
6376 * header. rinfo describes other information related to this rule such as
6377 * forwarding IDs, priority of this rule, etc.
6380 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6381 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6383 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6384 struct ice_prot_lkup_ext lkup_exts;
6385 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6386 enum ice_status status = ICE_SUCCESS;
6387 bool remove_rule = false;
6388 u16 i, rid, vsi_handle;
6390 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6391 for (i = 0; i < lkups_cnt; i++) {
6394 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6397 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6402 /* Create any special protocol/offset pairs, such as looking at tunnel
6403 * bits by extracting metadata
6405 status = ice_add_special_words(rinfo, &lkup_exts);
6409 rid = ice_find_recp(hw, &lkup_exts);
6410 /* If did not find a recipe that match the existing criteria */
6411 if (rid == ICE_MAX_NUM_RECIPES)
6412 return ICE_ERR_PARAM;
6414 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6415 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6416 /* the rule is already removed */
6419 ice_acquire_lock(rule_lock);
6420 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6422 } else if (list_elem->vsi_count > 1) {
6423 list_elem->vsi_list_info->ref_cnt--;
6424 remove_rule = false;
6425 vsi_handle = rinfo->sw_act.vsi_handle;
6426 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6428 vsi_handle = rinfo->sw_act.vsi_handle;
6429 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6431 ice_release_lock(rule_lock);
6434 if (list_elem->vsi_count == 0)
6437 ice_release_lock(rule_lock);
6439 struct ice_aqc_sw_rules_elem *s_rule;
6442 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6444 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6447 return ICE_ERR_NO_MEMORY;
6448 s_rule->pdata.lkup_tx_rx.act = 0;
6449 s_rule->pdata.lkup_tx_rx.index =
6450 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6451 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6452 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6454 ice_aqc_opc_remove_sw_rules, NULL);
6455 if (status == ICE_SUCCESS) {
6456 ice_acquire_lock(rule_lock);
6457 LIST_DEL(&list_elem->list_entry);
6458 ice_free(hw, list_elem->lkups);
6459 ice_free(hw, list_elem);
6460 ice_release_lock(rule_lock);
6462 ice_free(hw, s_rule);
6468 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6469 * @hw: pointer to the hardware structure
6470 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6472 * This function is used to remove 1 rule at a time. The removal is based on
6473 * the remove_entry parameter. This function will remove rule for a given
6474 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6477 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6478 struct ice_rule_query_data *remove_entry)
6480 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6481 struct LIST_HEAD_TYPE *list_head;
6482 struct ice_adv_rule_info rinfo;
6483 struct ice_switch_info *sw;
6485 sw = hw->switch_info;
6486 if (!sw->recp_list[remove_entry->rid].recp_created)
6487 return ICE_ERR_PARAM;
6488 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6489 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6491 if (list_itr->rule_info.fltr_rule_id ==
6492 remove_entry->rule_id) {
6493 rinfo = list_itr->rule_info;
6494 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6495 return ice_rem_adv_rule(hw, list_itr->lkups,
6496 list_itr->lkups_cnt, &rinfo);
6499 return ICE_ERR_PARAM;
6503 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6505 * @hw: pointer to the hardware structure
6506 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6508 * This function is used to remove all the rules for a given VSI and as soon
6509 * as removing a rule fails, it will return immediately with the error code,
6510 * else it will return ICE_SUCCESS
6513 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6515 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6516 struct ice_vsi_list_map_info *map_info;
6517 struct LIST_HEAD_TYPE *list_head;
6518 struct ice_adv_rule_info rinfo;
6519 struct ice_switch_info *sw;
6520 enum ice_status status;
6521 u16 vsi_list_id = 0;
6524 sw = hw->switch_info;
6525 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6526 if (!sw->recp_list[rid].recp_created)
6528 if (!sw->recp_list[rid].adv_rule)
6530 list_head = &sw->recp_list[rid].filt_rules;
6532 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6533 ice_adv_fltr_mgmt_list_entry, list_entry) {
6534 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6538 rinfo = list_itr->rule_info;
6539 rinfo.sw_act.vsi_handle = vsi_handle;
6540 status = ice_rem_adv_rule(hw, list_itr->lkups,
6541 list_itr->lkups_cnt, &rinfo);
6551 * ice_replay_fltr - Replay all the filters stored by a specific list head
6552 * @hw: pointer to the hardware structure
6553 * @list_head: list for which filters needs to be replayed
6554 * @recp_id: Recipe ID for which rules need to be replayed
6556 static enum ice_status
6557 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6559 struct ice_fltr_mgmt_list_entry *itr;
6560 struct LIST_HEAD_TYPE l_head;
6561 enum ice_status status = ICE_SUCCESS;
6563 if (LIST_EMPTY(list_head))
6566 /* Move entries from the given list_head to a temporary l_head so that
6567 * they can be replayed. Otherwise when trying to re-add the same
6568 * filter, the function will return already exists
6570 LIST_REPLACE_INIT(list_head, &l_head);
6572 /* Mark the given list_head empty by reinitializing it so filters
6573 * could be added again by *handler
6575 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6577 struct ice_fltr_list_entry f_entry;
6579 f_entry.fltr_info = itr->fltr_info;
6580 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6581 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6582 if (status != ICE_SUCCESS)
6587 /* Add a filter per VSI separately */
6592 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6594 if (!ice_is_vsi_valid(hw, vsi_handle))
6597 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6598 f_entry.fltr_info.vsi_handle = vsi_handle;
6599 f_entry.fltr_info.fwd_id.hw_vsi_id =
6600 ice_get_hw_vsi_num(hw, vsi_handle);
6601 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6602 if (recp_id == ICE_SW_LKUP_VLAN)
6603 status = ice_add_vlan_internal(hw, &f_entry);
6605 status = ice_add_rule_internal(hw, recp_id,
6607 if (status != ICE_SUCCESS)
6612 /* Clear the filter management list */
6613 ice_rem_sw_rule_info(hw, &l_head);
6618 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6619 * @hw: pointer to the hardware structure
6621 * NOTE: This function does not clean up partially added filters on error.
6622 * It is up to caller of the function to issue a reset or fail early.
6624 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6626 struct ice_switch_info *sw = hw->switch_info;
6627 enum ice_status status = ICE_SUCCESS;
6630 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6631 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6633 status = ice_replay_fltr(hw, i, head);
6634 if (status != ICE_SUCCESS)
6641 * ice_replay_vsi_fltr - Replay filters for requested VSI
6642 * @hw: pointer to the hardware structure
6643 * @vsi_handle: driver VSI handle
6644 * @recp_id: Recipe ID for which rules need to be replayed
6645 * @list_head: list for which filters need to be replayed
6647 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6648 * It is required to pass valid VSI handle.
6650 static enum ice_status
6651 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6652 struct LIST_HEAD_TYPE *list_head)
6654 struct ice_fltr_mgmt_list_entry *itr;
6655 enum ice_status status = ICE_SUCCESS;
6658 if (LIST_EMPTY(list_head))
6660 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6662 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6664 struct ice_fltr_list_entry f_entry;
6666 f_entry.fltr_info = itr->fltr_info;
6667 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6668 itr->fltr_info.vsi_handle == vsi_handle) {
6669 /* update the src in case it is VSI num */
6670 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6671 f_entry.fltr_info.src = hw_vsi_id;
6672 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6673 if (status != ICE_SUCCESS)
6677 if (!itr->vsi_list_info ||
6678 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6680 /* Clearing it so that the logic can add it back */
6681 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6682 f_entry.fltr_info.vsi_handle = vsi_handle;
6683 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6684 /* update the src in case it is VSI num */
6685 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6686 f_entry.fltr_info.src = hw_vsi_id;
6687 if (recp_id == ICE_SW_LKUP_VLAN)
6688 status = ice_add_vlan_internal(hw, &f_entry);
6690 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6691 if (status != ICE_SUCCESS)
6699 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6700 * @hw: pointer to the hardware structure
6701 * @vsi_handle: driver VSI handle
6702 * @list_head: list for which filters need to be replayed
6704 * Replay the advanced rule for the given VSI.
6706 static enum ice_status
6707 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6708 struct LIST_HEAD_TYPE *list_head)
6710 struct ice_rule_query_data added_entry = { 0 };
6711 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6712 enum ice_status status = ICE_SUCCESS;
6714 if (LIST_EMPTY(list_head))
6716 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6718 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6719 u16 lk_cnt = adv_fltr->lkups_cnt;
6721 if (vsi_handle != rinfo->sw_act.vsi_handle)
6723 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6732 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6733 * @hw: pointer to the hardware structure
6734 * @vsi_handle: driver VSI handle
6736 * Replays filters for requested VSI via vsi_handle.
6738 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6740 struct ice_switch_info *sw = hw->switch_info;
6741 enum ice_status status;
6744 /* Update the recipes that were created */
6745 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6746 struct LIST_HEAD_TYPE *head;
6748 head = &sw->recp_list[i].filt_replay_rules;
6749 if (!sw->recp_list[i].adv_rule)
6750 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6752 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6753 if (status != ICE_SUCCESS)
6761 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6762 * @hw: pointer to the HW struct
6764 * Deletes the filter replay rules.
6766 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6768 struct ice_switch_info *sw = hw->switch_info;
6774 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6775 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6776 struct LIST_HEAD_TYPE *l_head;
6778 l_head = &sw->recp_list[i].filt_replay_rules;
6779 if (!sw->recp_list[i].adv_rule)
6780 ice_rem_sw_rule_info(hw, l_head);
6782 ice_rem_adv_rule_info(hw, l_head);