1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
74 u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
109 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
111 { ICE_ETYPE_OL, 12 },
112 { ICE_IPV4_OFOS, 14 },
116 { ICE_UDP_ILOS, 76 },
117 { ICE_PROTOCOL_LAST, 0 },
121 u8 dummy_gre_udp_packet[] = {
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_OL 12 */
128 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x2F, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
135 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x00, 0x00, 0x00,
142 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
149 0x00, 0x08, 0x00, 0x00,
153 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
155 { ICE_ETYPE_OL, 12 },
156 { ICE_IPV4_OFOS, 14 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
214 { ICE_UDP_ILOS, 84 },
215 { ICE_PROTOCOL_LAST, 0 },
219 u8 dummy_udp_tun_udp_packet[] = {
220 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x08, 0x00, /* ICE_ETYPE_OL 12 */
226 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
227 0x00, 0x01, 0x00, 0x00,
228 0x00, 0x11, 0x00, 0x00,
229 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
232 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
233 0x00, 0x3a, 0x00, 0x00,
235 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
236 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
243 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
244 0x00, 0x01, 0x00, 0x00,
245 0x00, 0x11, 0x00, 0x00,
246 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
250 0x00, 0x08, 0x00, 0x00,
254 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
256 { ICE_ETYPE_OL, 12 },
257 { ICE_IPV4_OFOS, 14 },
258 { ICE_UDP_ILOS, 34 },
259 { ICE_PROTOCOL_LAST, 0 },
263 dummy_udp_packet[] = {
264 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
265 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00,
268 0x08, 0x00, /* ICE_ETYPE_OL 12 */
270 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
271 0x00, 0x01, 0x00, 0x00,
272 0x00, 0x11, 0x00, 0x00,
273 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
276 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
277 0x00, 0x08, 0x00, 0x00,
279 0x00, 0x00, /* 2 bytes for 4 byte alignment */
283 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
285 { ICE_ETYPE_OL, 12 },
286 { ICE_IPV4_OFOS, 14 },
288 { ICE_PROTOCOL_LAST, 0 },
292 dummy_tcp_packet[] = {
293 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
294 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00,
297 0x08, 0x00, /* ICE_ETYPE_OL 12 */
299 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
300 0x00, 0x01, 0x00, 0x00,
301 0x00, 0x06, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
306 0x00, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
308 0x50, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, /* 2 bytes for 4 byte alignment */
315 struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
317 { ICE_ETYPE_OL, 12 },
318 { ICE_IPV6_OFOS, 14 },
320 { ICE_PROTOCOL_LAST, 0 },
324 dummy_tcp_ipv6_packet[] = {
325 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
326 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
329 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
331 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
332 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
343 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x50, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, /* 2 bytes for 4 byte alignment */
352 struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
354 { ICE_ETYPE_OL, 12 },
355 { ICE_IPV6_OFOS, 14 },
356 { ICE_UDP_ILOS, 54 },
357 { ICE_PROTOCOL_LAST, 0 },
361 dummy_udp_ipv6_packet[] = {
362 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
363 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00,
366 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
368 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
369 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
380 0x00, 0x08, 0x00, 0x00,
382 0x00, 0x00, /* 2 bytes for 4 byte alignment */
386 struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
388 { ICE_IPV4_OFOS, 14 },
391 { ICE_PROTOCOL_LAST, 0 },
395 dummy_udp_gtp_packet[] = {
396 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
401 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
402 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x11, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
408 0x00, 0x1c, 0x00, 0x00,
410 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
411 0x00, 0x00, 0x00, 0x00,
412 0x00, 0x00, 0x00, 0x85,
414 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
415 0x00, 0x00, 0x00, 0x00,
419 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
422 { ICE_PROTOCOL_LAST, 0 },
426 dummy_pppoe_packet[] = {
427 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
428 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, 0x00, 0x00,
432 0x11, 0x00, 0x00, 0x01, /* ICE_PPPOE 14 */
433 0x00, 0x4e, 0x00, 0x21,
435 0x45, 0x00, 0x00, 0x30, /* PDU */
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x11, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
442 /* this is a recipe to profile association bitmap */
443 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
444 ICE_MAX_NUM_PROFILES);
446 /* this is a profile to recipe association bitmap */
447 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
448 ICE_MAX_NUM_RECIPES);
450 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
453 * ice_collect_result_idx - copy result index values
454 * @buf: buffer that contains the result index
455 * @recp: the recipe struct to copy data into
457 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
458 struct ice_sw_recipe *recp)
460 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
461 ice_set_bit(buf->content.result_indx &
462 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
466 * ice_init_possible_res_bm - initialize possible result bitmap
467 * @pos_result_bm: pointer to the bitmap to initialize
469 static void ice_init_possible_res_bm(ice_bitmap_t *pos_result_bm)
473 ice_zero_bitmap(pos_result_bm, ICE_MAX_FV_WORDS);
475 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
476 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
477 ice_set_bit(bit, pos_result_bm);
481 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
482 * @hw: pointer to hardware structure
483 * @recps: struct that we need to populate
484 * @rid: recipe ID that we are populating
485 * @refresh_required: true if we should get recipe to profile mapping from FW
487 * This function is used to populate all the necessary entries into our
488 * bookkeeping so that we have a current list of all the recipes that are
489 * programmed in the firmware.
491 static enum ice_status
492 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
493 bool *refresh_required)
495 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
496 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
497 struct ice_aqc_recipe_data_elem *tmp;
498 u16 num_recps = ICE_MAX_NUM_RECIPES;
499 struct ice_prot_lkup_ext *lkup_exts;
500 u16 i, sub_recps, fv_word_idx = 0;
501 enum ice_status status;
503 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
504 ice_init_possible_res_bm(possible_idx);
506 /* we need a buffer big enough to accommodate all the recipes */
507 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
508 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
510 return ICE_ERR_NO_MEMORY;
512 tmp[0].recipe_indx = rid;
513 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
514 /* non-zero status meaning recipe doesn't exist */
518 /* Get recipe to profile map so that we can get the fv from lkups that
519 * we read for a recipe from FW. Since we want to minimize the number of
520 * times we make this FW call, just make one call and cache the copy
521 * until a new recipe is added. This operation is only required the
522 * first time to get the changes from FW. Then to search existing
523 * entries we don't need to update the cache again until another recipe
526 if (*refresh_required) {
527 ice_get_recp_to_prof_map(hw);
528 *refresh_required = false;
531 /* Start populating all the entries for recps[rid] based on lkups from
532 * firmware. Note that we are only creating the root recipe in our
535 lkup_exts = &recps[rid].lkup_exts;
537 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
538 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
539 struct ice_recp_grp_entry *rg_entry;
540 u8 prof_id, idx, prot = 0;
544 rg_entry = (struct ice_recp_grp_entry *)
545 ice_malloc(hw, sizeof(*rg_entry));
547 status = ICE_ERR_NO_MEMORY;
551 idx = root_bufs.recipe_indx;
552 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
554 /* Mark all result indices in this chain */
555 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
556 ice_set_bit(root_bufs.content.result_indx &
557 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
559 /* get the first profile that is associated with rid */
560 prof_id = ice_find_first_bit(recipe_to_profile[idx],
561 ICE_MAX_NUM_PROFILES);
562 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
563 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
565 rg_entry->fv_idx[i] = lkup_indx;
566 rg_entry->fv_mask[i] =
567 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
569 /* If the recipe is a chained recipe then all its
570 * child recipe's result will have a result index.
571 * To fill fv_words we should not use those result
572 * index, we only need the protocol ids and offsets.
573 * We will skip all the fv_idx which stores result
574 * index in them. We also need to skip any fv_idx which
575 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
576 * valid offset value.
578 if (ice_is_bit_set(possible_idx, rg_entry->fv_idx[i]) ||
579 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
580 rg_entry->fv_idx[i] == 0)
583 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
584 rg_entry->fv_idx[i], &prot, &off);
585 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
586 lkup_exts->fv_words[fv_word_idx].off = off;
589 /* populate rg_list with the data from the child entry of this
592 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
594 /* Propagate some data to the recipe database */
595 recps[idx].is_root = is_root;
596 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
597 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
598 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
599 recps[idx].chain_idx = root_bufs.content.result_indx &
600 ~ICE_AQ_RECIPE_RESULT_EN;
601 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
603 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
609 /* Only do the following for root recipes entries */
610 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
611 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
612 recps[idx].root_rid = root_bufs.content.rid &
613 ~ICE_AQ_RECIPE_ID_IS_ROOT;
614 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
617 /* Complete initialization of the root recipe entry */
618 lkup_exts->n_val_words = fv_word_idx;
619 recps[rid].big_recp = (num_recps > 1);
620 recps[rid].n_grp_count = num_recps;
621 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
622 ice_calloc(hw, recps[rid].n_grp_count,
623 sizeof(struct ice_aqc_recipe_data_elem));
624 if (!recps[rid].root_buf)
627 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
628 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
630 /* Copy result indexes */
631 ice_memcpy(recps[rid].res_idxs, result_bm, sizeof(recps[rid].res_idxs),
632 ICE_NONDMA_TO_NONDMA);
633 recps[rid].recp_created = true;
641 * ice_get_recp_to_prof_map - updates recipe to profile mapping
642 * @hw: pointer to hardware structure
644 * This function is used to populate recipe_to_profile matrix where index to
645 * this array is the recipe ID and the element is the mapping of which profiles
646 * is this recipe mapped to.
649 ice_get_recp_to_prof_map(struct ice_hw *hw)
651 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
654 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
657 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
658 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
659 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
661 ice_memcpy(profile_to_recipe[i], r_bitmap,
662 sizeof(profile_to_recipe[i]), ICE_NONDMA_TO_NONDMA);
663 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
664 if (ice_is_bit_set(r_bitmap, j))
665 ice_set_bit(i, recipe_to_profile[j]);
670 * ice_init_def_sw_recp - initialize the recipe book keeping tables
671 * @hw: pointer to the HW struct
673 * Allocate memory for the entire recipe table and initialize the structures/
674 * entries corresponding to basic recipes.
676 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
678 struct ice_sw_recipe *recps;
681 recps = (struct ice_sw_recipe *)
682 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
684 return ICE_ERR_NO_MEMORY;
686 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
687 recps[i].root_rid = i;
688 INIT_LIST_HEAD(&recps[i].filt_rules);
689 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
690 INIT_LIST_HEAD(&recps[i].rg_list);
691 ice_init_lock(&recps[i].filt_rule_lock);
694 hw->switch_info->recp_list = recps;
700 * ice_aq_get_sw_cfg - get switch configuration
701 * @hw: pointer to the hardware structure
702 * @buf: pointer to the result buffer
703 * @buf_size: length of the buffer available for response
704 * @req_desc: pointer to requested descriptor
705 * @num_elems: pointer to number of elements
706 * @cd: pointer to command details structure or NULL
708 * Get switch configuration (0x0200) to be placed in 'buff'.
709 * This admin command returns information such as initial VSI/port number
710 * and switch ID it belongs to.
712 * NOTE: *req_desc is both an input/output parameter.
713 * The caller of this function first calls this function with *request_desc set
714 * to 0. If the response from f/w has *req_desc set to 0, all the switch
715 * configuration information has been returned; if non-zero (meaning not all
716 * the information was returned), the caller should call this function again
717 * with *req_desc set to the previous value returned by f/w to get the
718 * next block of switch configuration information.
720 * *num_elems is output only parameter. This reflects the number of elements
721 * in response buffer. The caller of this function to use *num_elems while
722 * parsing the response buffer.
724 static enum ice_status
725 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
726 u16 buf_size, u16 *req_desc, u16 *num_elems,
727 struct ice_sq_cd *cd)
729 struct ice_aqc_get_sw_cfg *cmd;
730 enum ice_status status;
731 struct ice_aq_desc desc;
733 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
734 cmd = &desc.params.get_sw_conf;
735 cmd->element = CPU_TO_LE16(*req_desc);
737 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
739 *req_desc = LE16_TO_CPU(cmd->element);
740 *num_elems = LE16_TO_CPU(cmd->num_elems);
748 * ice_alloc_sw - allocate resources specific to switch
749 * @hw: pointer to the HW struct
750 * @ena_stats: true to turn on VEB stats
751 * @shared_res: true for shared resource, false for dedicated resource
752 * @sw_id: switch ID returned
753 * @counter_id: VEB counter ID returned
755 * allocates switch resources (SWID and VEB counter) (0x0208)
758 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
761 struct ice_aqc_alloc_free_res_elem *sw_buf;
762 struct ice_aqc_res_elem *sw_ele;
763 enum ice_status status;
766 buf_len = sizeof(*sw_buf);
767 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
768 ice_malloc(hw, buf_len);
770 return ICE_ERR_NO_MEMORY;
772 /* Prepare buffer for switch ID.
773 * The number of resource entries in buffer is passed as 1 since only a
774 * single switch/VEB instance is allocated, and hence a single sw_id
777 sw_buf->num_elems = CPU_TO_LE16(1);
779 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
780 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
781 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
783 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
784 ice_aqc_opc_alloc_res, NULL);
787 goto ice_alloc_sw_exit;
789 sw_ele = &sw_buf->elem[0];
790 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
793 /* Prepare buffer for VEB Counter */
794 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
795 struct ice_aqc_alloc_free_res_elem *counter_buf;
796 struct ice_aqc_res_elem *counter_ele;
798 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
799 ice_malloc(hw, buf_len);
801 status = ICE_ERR_NO_MEMORY;
802 goto ice_alloc_sw_exit;
805 /* The number of resource entries in buffer is passed as 1 since
806 * only a single switch/VEB instance is allocated, and hence a
807 * single VEB counter is requested.
809 counter_buf->num_elems = CPU_TO_LE16(1);
810 counter_buf->res_type =
811 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
812 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
813 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
817 ice_free(hw, counter_buf);
818 goto ice_alloc_sw_exit;
820 counter_ele = &counter_buf->elem[0];
821 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
822 ice_free(hw, counter_buf);
826 ice_free(hw, sw_buf);
831 * ice_free_sw - free resources specific to switch
832 * @hw: pointer to the HW struct
833 * @sw_id: switch ID returned
834 * @counter_id: VEB counter ID returned
836 * free switch resources (SWID and VEB counter) (0x0209)
838 * NOTE: This function frees multiple resources. It continues
839 * releasing other resources even after it encounters error.
840 * The error code returned is the last error it encountered.
842 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
844 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
845 enum ice_status status, ret_status;
848 buf_len = sizeof(*sw_buf);
849 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
850 ice_malloc(hw, buf_len);
852 return ICE_ERR_NO_MEMORY;
854 /* Prepare buffer to free for switch ID res.
855 * The number of resource entries in buffer is passed as 1 since only a
856 * single switch/VEB instance is freed, and hence a single sw_id
859 sw_buf->num_elems = CPU_TO_LE16(1);
860 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
861 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
863 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
864 ice_aqc_opc_free_res, NULL);
867 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
869 /* Prepare buffer to free for VEB Counter resource */
870 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
871 ice_malloc(hw, buf_len);
873 ice_free(hw, sw_buf);
874 return ICE_ERR_NO_MEMORY;
877 /* The number of resource entries in buffer is passed as 1 since only a
878 * single switch/VEB instance is freed, and hence a single VEB counter
881 counter_buf->num_elems = CPU_TO_LE16(1);
882 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
883 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
885 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
886 ice_aqc_opc_free_res, NULL);
888 ice_debug(hw, ICE_DBG_SW,
889 "VEB counter resource could not be freed\n");
893 ice_free(hw, counter_buf);
894 ice_free(hw, sw_buf);
900 * @hw: pointer to the HW struct
901 * @vsi_ctx: pointer to a VSI context struct
902 * @cd: pointer to command details structure or NULL
904 * Add a VSI context to the hardware (0x0210)
907 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
908 struct ice_sq_cd *cd)
910 struct ice_aqc_add_update_free_vsi_resp *res;
911 struct ice_aqc_add_get_update_free_vsi *cmd;
912 struct ice_aq_desc desc;
913 enum ice_status status;
915 cmd = &desc.params.vsi_cmd;
916 res = &desc.params.add_update_free_vsi_res;
918 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
920 if (!vsi_ctx->alloc_from_pool)
921 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
922 ICE_AQ_VSI_IS_VALID);
924 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
926 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
928 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
929 sizeof(vsi_ctx->info), cd);
932 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
933 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
934 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
942 * @hw: pointer to the HW struct
943 * @vsi_ctx: pointer to a VSI context struct
944 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
945 * @cd: pointer to command details structure or NULL
947 * Free VSI context info from hardware (0x0213)
950 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
951 bool keep_vsi_alloc, struct ice_sq_cd *cd)
953 struct ice_aqc_add_update_free_vsi_resp *resp;
954 struct ice_aqc_add_get_update_free_vsi *cmd;
955 struct ice_aq_desc desc;
956 enum ice_status status;
958 cmd = &desc.params.vsi_cmd;
959 resp = &desc.params.add_update_free_vsi_res;
961 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
963 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
965 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
967 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
969 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
970 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
978 * @hw: pointer to the HW struct
979 * @vsi_ctx: pointer to a VSI context struct
980 * @cd: pointer to command details structure or NULL
982 * Update VSI context in the hardware (0x0211)
985 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
986 struct ice_sq_cd *cd)
988 struct ice_aqc_add_update_free_vsi_resp *resp;
989 struct ice_aqc_add_get_update_free_vsi *cmd;
990 struct ice_aq_desc desc;
991 enum ice_status status;
993 cmd = &desc.params.vsi_cmd;
994 resp = &desc.params.add_update_free_vsi_res;
996 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
998 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1000 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1002 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1003 sizeof(vsi_ctx->info), cd);
1006 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1007 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1014 * ice_is_vsi_valid - check whether the VSI is valid or not
1015 * @hw: pointer to the HW struct
1016 * @vsi_handle: VSI handle
1018 * check whether the VSI is valid or not
1020 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1022 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1026 * ice_get_hw_vsi_num - return the HW VSI number
1027 * @hw: pointer to the HW struct
1028 * @vsi_handle: VSI handle
1030 * return the HW VSI number
1031 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1033 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1035 return hw->vsi_ctx[vsi_handle]->vsi_num;
1039 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1040 * @hw: pointer to the HW struct
1041 * @vsi_handle: VSI handle
1043 * return the VSI context entry for a given VSI handle
1045 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1047 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1051 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1052 * @hw: pointer to the HW struct
1053 * @vsi_handle: VSI handle
1054 * @vsi: VSI context pointer
1056 * save the VSI context entry for a given VSI handle
1059 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1061 hw->vsi_ctx[vsi_handle] = vsi;
1065 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1066 * @hw: pointer to the HW struct
1067 * @vsi_handle: VSI handle
1069 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1071 struct ice_vsi_ctx *vsi;
1074 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1077 ice_for_each_traffic_class(i) {
1078 if (vsi->lan_q_ctx[i]) {
1079 ice_free(hw, vsi->lan_q_ctx[i]);
1080 vsi->lan_q_ctx[i] = NULL;
1086 * ice_clear_vsi_ctx - clear the VSI context entry
1087 * @hw: pointer to the HW struct
1088 * @vsi_handle: VSI handle
1090 * clear the VSI context entry
1092 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1094 struct ice_vsi_ctx *vsi;
1096 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1098 ice_clear_vsi_q_ctx(hw, vsi_handle);
1100 hw->vsi_ctx[vsi_handle] = NULL;
1105 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1106 * @hw: pointer to the HW struct
1108 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1112 for (i = 0; i < ICE_MAX_VSI; i++)
1113 ice_clear_vsi_ctx(hw, i);
1117 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1118 * @hw: pointer to the HW struct
1119 * @vsi_handle: unique VSI handle provided by drivers
1120 * @vsi_ctx: pointer to a VSI context struct
1121 * @cd: pointer to command details structure or NULL
1123 * Add a VSI context to the hardware also add it into the VSI handle list.
1124 * If this function gets called after reset for existing VSIs then update
1125 * with the new HW VSI number in the corresponding VSI handle list entry.
1128 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1129 struct ice_sq_cd *cd)
1131 struct ice_vsi_ctx *tmp_vsi_ctx;
1132 enum ice_status status;
1134 if (vsi_handle >= ICE_MAX_VSI)
1135 return ICE_ERR_PARAM;
1136 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1139 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1141 /* Create a new VSI context */
1142 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1143 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1145 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1146 return ICE_ERR_NO_MEMORY;
1148 *tmp_vsi_ctx = *vsi_ctx;
1150 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1152 /* update with new HW VSI num */
1153 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1160 * ice_free_vsi- free VSI context from hardware and VSI handle list
1161 * @hw: pointer to the HW struct
1162 * @vsi_handle: unique VSI handle
1163 * @vsi_ctx: pointer to a VSI context struct
1164 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1165 * @cd: pointer to command details structure or NULL
1167 * Free VSI context info from hardware as well as from VSI handle list
1170 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1171 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1173 enum ice_status status;
1175 if (!ice_is_vsi_valid(hw, vsi_handle))
1176 return ICE_ERR_PARAM;
1177 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1178 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1180 ice_clear_vsi_ctx(hw, vsi_handle);
1186 * @hw: pointer to the HW struct
1187 * @vsi_handle: unique VSI handle
1188 * @vsi_ctx: pointer to a VSI context struct
1189 * @cd: pointer to command details structure or NULL
1191 * Update VSI context in the hardware
1194 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1195 struct ice_sq_cd *cd)
1197 if (!ice_is_vsi_valid(hw, vsi_handle))
1198 return ICE_ERR_PARAM;
1199 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1200 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1204 * ice_aq_get_vsi_params
1205 * @hw: pointer to the HW struct
1206 * @vsi_ctx: pointer to a VSI context struct
1207 * @cd: pointer to command details structure or NULL
1209 * Get VSI context info from hardware (0x0212)
1212 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1213 struct ice_sq_cd *cd)
1215 struct ice_aqc_add_get_update_free_vsi *cmd;
1216 struct ice_aqc_get_vsi_resp *resp;
1217 struct ice_aq_desc desc;
1218 enum ice_status status;
1220 cmd = &desc.params.vsi_cmd;
1221 resp = &desc.params.get_vsi_resp;
1223 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1225 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1227 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1228 sizeof(vsi_ctx->info), cd);
1230 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1232 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1233 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1240 * ice_aq_add_update_mir_rule - add/update a mirror rule
1241 * @hw: pointer to the HW struct
1242 * @rule_type: Rule Type
1243 * @dest_vsi: VSI number to which packets will be mirrored
1244 * @count: length of the list
1245 * @mr_buf: buffer for list of mirrored VSI numbers
1246 * @cd: pointer to command details structure or NULL
1249 * Add/Update Mirror Rule (0x260).
1252 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1253 u16 count, struct ice_mir_rule_buf *mr_buf,
1254 struct ice_sq_cd *cd, u16 *rule_id)
1256 struct ice_aqc_add_update_mir_rule *cmd;
1257 struct ice_aq_desc desc;
1258 enum ice_status status;
1259 __le16 *mr_list = NULL;
1262 switch (rule_type) {
1263 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1264 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1265 /* Make sure count and mr_buf are set for these rule_types */
1266 if (!(count && mr_buf))
1267 return ICE_ERR_PARAM;
1269 buf_size = count * sizeof(__le16);
1270 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1272 return ICE_ERR_NO_MEMORY;
1274 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1275 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1276 /* Make sure count and mr_buf are not set for these
1279 if (count || mr_buf)
1280 return ICE_ERR_PARAM;
1283 ice_debug(hw, ICE_DBG_SW,
1284 "Error due to unsupported rule_type %u\n", rule_type);
1285 return ICE_ERR_OUT_OF_RANGE;
1288 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1290 /* Pre-process 'mr_buf' items for add/update of virtual port
1291 * ingress/egress mirroring (but not physical port ingress/egress
1297 for (i = 0; i < count; i++) {
1300 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1302 /* Validate specified VSI number, make sure it is less
1303 * than ICE_MAX_VSI, if not return with error.
1305 if (id >= ICE_MAX_VSI) {
1306 ice_debug(hw, ICE_DBG_SW,
1307 "Error VSI index (%u) out-of-range\n",
1309 ice_free(hw, mr_list);
1310 return ICE_ERR_OUT_OF_RANGE;
1313 /* add VSI to mirror rule */
1316 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1317 else /* remove VSI from mirror rule */
1318 mr_list[i] = CPU_TO_LE16(id);
1322 cmd = &desc.params.add_update_rule;
1323 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1324 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1325 ICE_AQC_RULE_ID_VALID_M);
1326 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1327 cmd->num_entries = CPU_TO_LE16(count);
1328 cmd->dest = CPU_TO_LE16(dest_vsi);
1330 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1332 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1334 ice_free(hw, mr_list);
1340 * ice_aq_delete_mir_rule - delete a mirror rule
1341 * @hw: pointer to the HW struct
1342 * @rule_id: Mirror rule ID (to be deleted)
1343 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1344 * otherwise it is returned to the shared pool
1345 * @cd: pointer to command details structure or NULL
1347 * Delete Mirror Rule (0x261).
1350 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1351 struct ice_sq_cd *cd)
1353 struct ice_aqc_delete_mir_rule *cmd;
1354 struct ice_aq_desc desc;
1356 /* rule_id should be in the range 0...63 */
1357 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1358 return ICE_ERR_OUT_OF_RANGE;
1360 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1362 cmd = &desc.params.del_rule;
1363 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1364 cmd->rule_id = CPU_TO_LE16(rule_id);
1367 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1369 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1373 * ice_aq_alloc_free_vsi_list
1374 * @hw: pointer to the HW struct
1375 * @vsi_list_id: VSI list ID returned or used for lookup
1376 * @lkup_type: switch rule filter lookup type
1377 * @opc: switch rules population command type - pass in the command opcode
1379 * allocates or free a VSI list resource
1381 static enum ice_status
1382 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1383 enum ice_sw_lkup_type lkup_type,
1384 enum ice_adminq_opc opc)
1386 struct ice_aqc_alloc_free_res_elem *sw_buf;
1387 struct ice_aqc_res_elem *vsi_ele;
1388 enum ice_status status;
1391 buf_len = sizeof(*sw_buf);
1392 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1393 ice_malloc(hw, buf_len);
1395 return ICE_ERR_NO_MEMORY;
1396 sw_buf->num_elems = CPU_TO_LE16(1);
1398 if (lkup_type == ICE_SW_LKUP_MAC ||
1399 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1400 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1401 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1402 lkup_type == ICE_SW_LKUP_PROMISC ||
1403 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1404 lkup_type == ICE_SW_LKUP_LAST) {
1405 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1406 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1408 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1410 status = ICE_ERR_PARAM;
1411 goto ice_aq_alloc_free_vsi_list_exit;
1414 if (opc == ice_aqc_opc_free_res)
1415 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1417 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1419 goto ice_aq_alloc_free_vsi_list_exit;
1421 if (opc == ice_aqc_opc_alloc_res) {
1422 vsi_ele = &sw_buf->elem[0];
1423 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1426 ice_aq_alloc_free_vsi_list_exit:
1427 ice_free(hw, sw_buf);
1432 * ice_aq_set_storm_ctrl - Sets storm control configuration
1433 * @hw: pointer to the HW struct
1434 * @bcast_thresh: represents the upper threshold for broadcast storm control
1435 * @mcast_thresh: represents the upper threshold for multicast storm control
1436 * @ctl_bitmask: storm control control knobs
1438 * Sets the storm control configuration (0x0280)
1441 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1444 struct ice_aqc_storm_cfg *cmd;
1445 struct ice_aq_desc desc;
1447 cmd = &desc.params.storm_conf;
1449 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1451 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1452 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1453 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1455 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1459 * ice_aq_get_storm_ctrl - gets storm control configuration
1460 * @hw: pointer to the HW struct
1461 * @bcast_thresh: represents the upper threshold for broadcast storm control
1462 * @mcast_thresh: represents the upper threshold for multicast storm control
1463 * @ctl_bitmask: storm control control knobs
1465 * Gets the storm control configuration (0x0281)
1468 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1471 enum ice_status status;
1472 struct ice_aq_desc desc;
1474 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1476 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1478 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1481 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1484 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1487 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1494 * ice_aq_sw_rules - add/update/remove switch rules
1495 * @hw: pointer to the HW struct
1496 * @rule_list: pointer to switch rule population list
1497 * @rule_list_sz: total size of the rule list in bytes
1498 * @num_rules: number of switch rules in the rule_list
1499 * @opc: switch rules population command type - pass in the command opcode
1500 * @cd: pointer to command details structure or NULL
1502 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1504 static enum ice_status
1505 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1506 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1508 struct ice_aq_desc desc;
1510 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1512 if (opc != ice_aqc_opc_add_sw_rules &&
1513 opc != ice_aqc_opc_update_sw_rules &&
1514 opc != ice_aqc_opc_remove_sw_rules)
1515 return ICE_ERR_PARAM;
1517 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1519 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1520 desc.params.sw_rules.num_rules_fltr_entry_index =
1521 CPU_TO_LE16(num_rules);
1522 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1526 * ice_aq_add_recipe - add switch recipe
1527 * @hw: pointer to the HW struct
1528 * @s_recipe_list: pointer to switch rule population list
1529 * @num_recipes: number of switch recipes in the list
1530 * @cd: pointer to command details structure or NULL
1535 ice_aq_add_recipe(struct ice_hw *hw,
1536 struct ice_aqc_recipe_data_elem *s_recipe_list,
1537 u16 num_recipes, struct ice_sq_cd *cd)
1539 struct ice_aqc_add_get_recipe *cmd;
1540 struct ice_aq_desc desc;
1543 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1544 cmd = &desc.params.add_get_recipe;
1545 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1547 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1548 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1550 buf_size = num_recipes * sizeof(*s_recipe_list);
1552 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1556 * ice_aq_get_recipe - get switch recipe
1557 * @hw: pointer to the HW struct
1558 * @s_recipe_list: pointer to switch rule population list
1559 * @num_recipes: pointer to the number of recipes (input and output)
1560 * @recipe_root: root recipe number of recipe(s) to retrieve
1561 * @cd: pointer to command details structure or NULL
1565 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1566 * On output, *num_recipes will equal the number of entries returned in
1569 * The caller must supply enough space in s_recipe_list to hold all possible
1570 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1573 ice_aq_get_recipe(struct ice_hw *hw,
1574 struct ice_aqc_recipe_data_elem *s_recipe_list,
1575 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1577 struct ice_aqc_add_get_recipe *cmd;
1578 struct ice_aq_desc desc;
1579 enum ice_status status;
1582 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1583 return ICE_ERR_PARAM;
1585 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1586 cmd = &desc.params.add_get_recipe;
1587 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1589 cmd->return_index = CPU_TO_LE16(recipe_root);
1590 cmd->num_sub_recipes = 0;
1592 buf_size = *num_recipes * sizeof(*s_recipe_list);
1594 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1595 /* cppcheck-suppress constArgument */
1596 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1602 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1603 * @hw: pointer to the HW struct
1604 * @profile_id: package profile ID to associate the recipe with
1605 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1606 * @cd: pointer to command details structure or NULL
1607 * Recipe to profile association (0x0291)
1610 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1611 struct ice_sq_cd *cd)
1613 struct ice_aqc_recipe_to_profile *cmd;
1614 struct ice_aq_desc desc;
1616 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1617 cmd = &desc.params.recipe_to_profile;
1618 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1619 cmd->profile_id = CPU_TO_LE16(profile_id);
1620 /* Set the recipe ID bit in the bitmask to let the device know which
1621 * profile we are associating the recipe to
1623 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1624 ICE_NONDMA_TO_NONDMA);
1626 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1630 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1631 * @hw: pointer to the HW struct
1632 * @profile_id: package profile ID to associate the recipe with
1633 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1634 * @cd: pointer to command details structure or NULL
1635 * Associate profile ID with given recipe (0x0293)
1638 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1639 struct ice_sq_cd *cd)
1641 struct ice_aqc_recipe_to_profile *cmd;
1642 struct ice_aq_desc desc;
1643 enum ice_status status;
1645 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1646 cmd = &desc.params.recipe_to_profile;
1647 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1648 cmd->profile_id = CPU_TO_LE16(profile_id);
1650 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1652 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1653 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1659 * ice_alloc_recipe - add recipe resource
1660 * @hw: pointer to the hardware structure
1661 * @rid: recipe ID returned as response to AQ call
1663 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1665 struct ice_aqc_alloc_free_res_elem *sw_buf;
1666 enum ice_status status;
1669 buf_len = sizeof(*sw_buf);
1670 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1672 return ICE_ERR_NO_MEMORY;
1674 sw_buf->num_elems = CPU_TO_LE16(1);
1675 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1676 ICE_AQC_RES_TYPE_S) |
1677 ICE_AQC_RES_TYPE_FLAG_SHARED);
1678 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1679 ice_aqc_opc_alloc_res, NULL);
1681 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1682 ice_free(hw, sw_buf);
1687 /* ice_init_port_info - Initialize port_info with switch configuration data
1688 * @pi: pointer to port_info
1689 * @vsi_port_num: VSI number or port number
1690 * @type: Type of switch element (port or VSI)
1691 * @swid: switch ID of the switch the element is attached to
1692 * @pf_vf_num: PF or VF number
1693 * @is_vf: true if the element is a VF, false otherwise
1696 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1697 u16 swid, u16 pf_vf_num, bool is_vf)
1700 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1701 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1703 pi->pf_vf_num = pf_vf_num;
1705 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1706 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1709 ice_debug(pi->hw, ICE_DBG_SW,
1710 "incorrect VSI/port type received\n");
1715 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1716 * @hw: pointer to the hardware structure
1718 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1720 struct ice_aqc_get_sw_cfg_resp *rbuf;
1721 enum ice_status status;
1722 u16 num_total_ports;
1728 num_total_ports = 1;
1730 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1731 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1734 return ICE_ERR_NO_MEMORY;
1736 /* Multiple calls to ice_aq_get_sw_cfg may be required
1737 * to get all the switch configuration information. The need
1738 * for additional calls is indicated by ice_aq_get_sw_cfg
1739 * writing a non-zero value in req_desc
1742 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1743 &req_desc, &num_elems, NULL);
1748 for (i = 0; i < num_elems; i++) {
1749 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1750 u16 pf_vf_num, swid, vsi_port_num;
1754 ele = rbuf[i].elements;
1755 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1756 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1758 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1759 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1761 swid = LE16_TO_CPU(ele->swid);
1763 if (LE16_TO_CPU(ele->pf_vf_num) &
1764 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1767 type = LE16_TO_CPU(ele->vsi_port_num) >>
1768 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1771 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1772 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1773 if (j == num_total_ports) {
1774 ice_debug(hw, ICE_DBG_SW,
1775 "more ports than expected\n");
1776 status = ICE_ERR_CFG;
1779 ice_init_port_info(hw->port_info,
1780 vsi_port_num, type, swid,
1788 } while (req_desc && !status);
1792 ice_free(hw, (void *)rbuf);
1798 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1799 * @hw: pointer to the hardware structure
1800 * @fi: filter info structure to fill/update
1802 * This helper function populates the lb_en and lan_en elements of the provided
1803 * ice_fltr_info struct using the switch's type and characteristics of the
1804 * switch rule being configured.
1806 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1810 if ((fi->flag & ICE_FLTR_TX) &&
1811 (fi->fltr_act == ICE_FWD_TO_VSI ||
1812 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1813 fi->fltr_act == ICE_FWD_TO_Q ||
1814 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1815 /* Setting LB for prune actions will result in replicated
1816 * packets to the internal switch that will be dropped.
1818 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1821 /* Set lan_en to TRUE if
1822 * 1. The switch is a VEB AND
1824 * 2.1 The lookup is a directional lookup like ethertype,
1825 * promiscuous, ethertype-MAC, promiscuous-VLAN
1826 * and default-port OR
1827 * 2.2 The lookup is VLAN, OR
1828 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1829 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1833 * The switch is a VEPA.
1835 * In all other cases, the LAN enable has to be set to false.
1838 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1839 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1840 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1841 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1842 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1843 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1844 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1845 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1846 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1847 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1856 * ice_fill_sw_rule - Helper function to fill switch rule structure
1857 * @hw: pointer to the hardware structure
1858 * @f_info: entry containing packet forwarding information
1859 * @s_rule: switch rule structure to be filled in based on mac_entry
1860 * @opc: switch rules population command type - pass in the command opcode
1863 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1864 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1866 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1874 if (opc == ice_aqc_opc_remove_sw_rules) {
1875 s_rule->pdata.lkup_tx_rx.act = 0;
1876 s_rule->pdata.lkup_tx_rx.index =
1877 CPU_TO_LE16(f_info->fltr_rule_id);
1878 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1882 eth_hdr_sz = sizeof(dummy_eth_header);
1883 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1885 /* initialize the ether header with a dummy header */
1886 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1887 ice_fill_sw_info(hw, f_info);
1889 switch (f_info->fltr_act) {
1890 case ICE_FWD_TO_VSI:
1891 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1892 ICE_SINGLE_ACT_VSI_ID_M;
1893 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1894 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1895 ICE_SINGLE_ACT_VALID_BIT;
1897 case ICE_FWD_TO_VSI_LIST:
1898 act |= ICE_SINGLE_ACT_VSI_LIST;
1899 act |= (f_info->fwd_id.vsi_list_id <<
1900 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1901 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1902 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1903 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1904 ICE_SINGLE_ACT_VALID_BIT;
1907 act |= ICE_SINGLE_ACT_TO_Q;
1908 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1909 ICE_SINGLE_ACT_Q_INDEX_M;
1911 case ICE_DROP_PACKET:
1912 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1913 ICE_SINGLE_ACT_VALID_BIT;
1915 case ICE_FWD_TO_QGRP:
1916 q_rgn = f_info->qgrp_size > 0 ?
1917 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1918 act |= ICE_SINGLE_ACT_TO_Q;
1919 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1920 ICE_SINGLE_ACT_Q_INDEX_M;
1921 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1922 ICE_SINGLE_ACT_Q_REGION_M;
1929 act |= ICE_SINGLE_ACT_LB_ENABLE;
1931 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1933 switch (f_info->lkup_type) {
1934 case ICE_SW_LKUP_MAC:
1935 daddr = f_info->l_data.mac.mac_addr;
1937 case ICE_SW_LKUP_VLAN:
1938 vlan_id = f_info->l_data.vlan.vlan_id;
1939 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1940 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1941 act |= ICE_SINGLE_ACT_PRUNE;
1942 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1945 case ICE_SW_LKUP_ETHERTYPE_MAC:
1946 daddr = f_info->l_data.ethertype_mac.mac_addr;
1948 case ICE_SW_LKUP_ETHERTYPE:
1949 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1950 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1952 case ICE_SW_LKUP_MAC_VLAN:
1953 daddr = f_info->l_data.mac_vlan.mac_addr;
1954 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1956 case ICE_SW_LKUP_PROMISC_VLAN:
1957 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1959 case ICE_SW_LKUP_PROMISC:
1960 daddr = f_info->l_data.mac_vlan.mac_addr;
1966 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1967 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1968 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1970 /* Recipe set depending on lookup type */
1971 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1972 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1973 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1976 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1977 ICE_NONDMA_TO_NONDMA);
1979 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1980 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1981 *off = CPU_TO_BE16(vlan_id);
1984 /* Create the switch rule with the final dummy Ethernet header */
1985 if (opc != ice_aqc_opc_update_sw_rules)
1986 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1990 * ice_add_marker_act
1991 * @hw: pointer to the hardware structure
1992 * @m_ent: the management entry for which sw marker needs to be added
1993 * @sw_marker: sw marker to tag the Rx descriptor with
1994 * @l_id: large action resource ID
1996 * Create a large action to hold software marker and update the switch rule
1997 * entry pointed by m_ent with newly created large action
1999 static enum ice_status
2000 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2001 u16 sw_marker, u16 l_id)
2003 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2004 /* For software marker we need 3 large actions
2005 * 1. FWD action: FWD TO VSI or VSI LIST
2006 * 2. GENERIC VALUE action to hold the profile ID
2007 * 3. GENERIC VALUE action to hold the software marker ID
2009 const u16 num_lg_acts = 3;
2010 enum ice_status status;
2016 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2017 return ICE_ERR_PARAM;
2019 /* Create two back-to-back switch rules and submit them to the HW using
2020 * one memory buffer:
2024 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2025 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2026 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2028 return ICE_ERR_NO_MEMORY;
2030 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2032 /* Fill in the first switch rule i.e. large action */
2033 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2034 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2035 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2037 /* First action VSI forwarding or VSI list forwarding depending on how
2040 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2041 m_ent->fltr_info.fwd_id.hw_vsi_id;
2043 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2044 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2045 ICE_LG_ACT_VSI_LIST_ID_M;
2046 if (m_ent->vsi_count > 1)
2047 act |= ICE_LG_ACT_VSI_LIST;
2048 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2050 /* Second action descriptor type */
2051 act = ICE_LG_ACT_GENERIC;
2053 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2054 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2056 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2057 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2059 /* Third action Marker value */
2060 act |= ICE_LG_ACT_GENERIC;
2061 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2062 ICE_LG_ACT_GENERIC_VALUE_M;
2064 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2066 /* call the fill switch rule to fill the lookup Tx Rx structure */
2067 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2068 ice_aqc_opc_update_sw_rules);
2070 /* Update the action to point to the large action ID */
2071 rx_tx->pdata.lkup_tx_rx.act =
2072 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2073 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2074 ICE_SINGLE_ACT_PTR_VAL_M));
2076 /* Use the filter rule ID of the previously created rule with single
2077 * act. Once the update happens, hardware will treat this as large
2080 rx_tx->pdata.lkup_tx_rx.index =
2081 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2083 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2084 ice_aqc_opc_update_sw_rules, NULL);
2086 m_ent->lg_act_idx = l_id;
2087 m_ent->sw_marker_id = sw_marker;
2090 ice_free(hw, lg_act);
2095 * ice_add_counter_act - add/update filter rule with counter action
2096 * @hw: pointer to the hardware structure
2097 * @m_ent: the management entry for which counter needs to be added
2098 * @counter_id: VLAN counter ID returned as part of allocate resource
2099 * @l_id: large action resource ID
2101 static enum ice_status
2102 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2103 u16 counter_id, u16 l_id)
2105 struct ice_aqc_sw_rules_elem *lg_act;
2106 struct ice_aqc_sw_rules_elem *rx_tx;
2107 enum ice_status status;
2108 /* 2 actions will be added while adding a large action counter */
2109 const int num_acts = 2;
2116 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2117 return ICE_ERR_PARAM;
2119 /* Create two back-to-back switch rules and submit them to the HW using
2120 * one memory buffer:
2124 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2125 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2126 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2129 return ICE_ERR_NO_MEMORY;
2131 rx_tx = (struct ice_aqc_sw_rules_elem *)
2132 ((u8 *)lg_act + lg_act_size);
2134 /* Fill in the first switch rule i.e. large action */
2135 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2136 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2137 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2139 /* First action VSI forwarding or VSI list forwarding depending on how
2142 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2143 m_ent->fltr_info.fwd_id.hw_vsi_id;
2145 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2146 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2147 ICE_LG_ACT_VSI_LIST_ID_M;
2148 if (m_ent->vsi_count > 1)
2149 act |= ICE_LG_ACT_VSI_LIST;
2150 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2152 /* Second action counter ID */
2153 act = ICE_LG_ACT_STAT_COUNT;
2154 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2155 ICE_LG_ACT_STAT_COUNT_M;
2156 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2158 /* call the fill switch rule to fill the lookup Tx Rx structure */
2159 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2160 ice_aqc_opc_update_sw_rules);
2162 act = ICE_SINGLE_ACT_PTR;
2163 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2164 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2166 /* Use the filter rule ID of the previously created rule with single
2167 * act. Once the update happens, hardware will treat this as large
2170 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2171 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2173 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2174 ice_aqc_opc_update_sw_rules, NULL);
2176 m_ent->lg_act_idx = l_id;
2177 m_ent->counter_index = counter_id;
2180 ice_free(hw, lg_act);
2185 * ice_create_vsi_list_map
2186 * @hw: pointer to the hardware structure
2187 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2188 * @num_vsi: number of VSI handles in the array
2189 * @vsi_list_id: VSI list ID generated as part of allocate resource
2191 * Helper function to create a new entry of VSI list ID to VSI mapping
2192 * using the given VSI list ID
2194 static struct ice_vsi_list_map_info *
2195 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2198 struct ice_switch_info *sw = hw->switch_info;
2199 struct ice_vsi_list_map_info *v_map;
2202 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2207 v_map->vsi_list_id = vsi_list_id;
2209 for (i = 0; i < num_vsi; i++)
2210 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2212 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2217 * ice_update_vsi_list_rule
2218 * @hw: pointer to the hardware structure
2219 * @vsi_handle_arr: array of VSI handles to form a VSI list
2220 * @num_vsi: number of VSI handles in the array
2221 * @vsi_list_id: VSI list ID generated as part of allocate resource
2222 * @remove: Boolean value to indicate if this is a remove action
2223 * @opc: switch rules population command type - pass in the command opcode
2224 * @lkup_type: lookup type of the filter
2226 * Call AQ command to add a new switch rule or update existing switch rule
2227 * using the given VSI list ID
2229 static enum ice_status
2230 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2231 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2232 enum ice_sw_lkup_type lkup_type)
2234 struct ice_aqc_sw_rules_elem *s_rule;
2235 enum ice_status status;
2241 return ICE_ERR_PARAM;
2243 if (lkup_type == ICE_SW_LKUP_MAC ||
2244 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2245 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2246 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2247 lkup_type == ICE_SW_LKUP_PROMISC ||
2248 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2249 lkup_type == ICE_SW_LKUP_LAST)
2250 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2251 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2252 else if (lkup_type == ICE_SW_LKUP_VLAN)
2253 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2254 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2256 return ICE_ERR_PARAM;
2258 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2259 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2261 return ICE_ERR_NO_MEMORY;
2262 for (i = 0; i < num_vsi; i++) {
2263 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2264 status = ICE_ERR_PARAM;
2267 /* AQ call requires hw_vsi_id(s) */
2268 s_rule->pdata.vsi_list.vsi[i] =
2269 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2272 s_rule->type = CPU_TO_LE16(type);
2273 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2274 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2276 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2279 ice_free(hw, s_rule);
2284 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2285 * @hw: pointer to the HW struct
2286 * @vsi_handle_arr: array of VSI handles to form a VSI list
2287 * @num_vsi: number of VSI handles in the array
2288 * @vsi_list_id: stores the ID of the VSI list to be created
2289 * @lkup_type: switch rule filter's lookup type
2291 static enum ice_status
2292 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2293 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2295 enum ice_status status;
2297 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2298 ice_aqc_opc_alloc_res);
2302 /* Update the newly created VSI list to include the specified VSIs */
2303 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2304 *vsi_list_id, false,
2305 ice_aqc_opc_add_sw_rules, lkup_type);
2309 * ice_create_pkt_fwd_rule
2310 * @hw: pointer to the hardware structure
2311 * @f_entry: entry containing packet forwarding information
2313 * Create switch rule with given filter information and add an entry
2314 * to the corresponding filter management list to track this switch rule
2317 static enum ice_status
2318 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2319 struct ice_fltr_list_entry *f_entry)
2321 struct ice_fltr_mgmt_list_entry *fm_entry;
2322 struct ice_aqc_sw_rules_elem *s_rule;
2323 enum ice_sw_lkup_type l_type;
2324 struct ice_sw_recipe *recp;
2325 enum ice_status status;
2327 s_rule = (struct ice_aqc_sw_rules_elem *)
2328 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2330 return ICE_ERR_NO_MEMORY;
2331 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2332 ice_malloc(hw, sizeof(*fm_entry));
2334 status = ICE_ERR_NO_MEMORY;
2335 goto ice_create_pkt_fwd_rule_exit;
2338 fm_entry->fltr_info = f_entry->fltr_info;
2340 /* Initialize all the fields for the management entry */
2341 fm_entry->vsi_count = 1;
2342 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2343 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2344 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2346 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2347 ice_aqc_opc_add_sw_rules);
2349 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2350 ice_aqc_opc_add_sw_rules, NULL);
2352 ice_free(hw, fm_entry);
2353 goto ice_create_pkt_fwd_rule_exit;
2356 f_entry->fltr_info.fltr_rule_id =
2357 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2358 fm_entry->fltr_info.fltr_rule_id =
2359 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2361 /* The book keeping entries will get removed when base driver
2362 * calls remove filter AQ command
2364 l_type = fm_entry->fltr_info.lkup_type;
2365 recp = &hw->switch_info->recp_list[l_type];
2366 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2368 ice_create_pkt_fwd_rule_exit:
2369 ice_free(hw, s_rule);
2374 * ice_update_pkt_fwd_rule
2375 * @hw: pointer to the hardware structure
2376 * @f_info: filter information for switch rule
2378 * Call AQ command to update a previously created switch rule with a
2381 static enum ice_status
2382 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2384 struct ice_aqc_sw_rules_elem *s_rule;
2385 enum ice_status status;
2387 s_rule = (struct ice_aqc_sw_rules_elem *)
2388 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2390 return ICE_ERR_NO_MEMORY;
2392 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2394 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2396 /* Update switch rule with new rule set to forward VSI list */
2397 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2398 ice_aqc_opc_update_sw_rules, NULL);
2400 ice_free(hw, s_rule);
2405 * ice_update_sw_rule_bridge_mode
2406 * @hw: pointer to the HW struct
2408 * Updates unicast switch filter rules based on VEB/VEPA mode
2410 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2412 struct ice_switch_info *sw = hw->switch_info;
2413 struct ice_fltr_mgmt_list_entry *fm_entry;
2414 enum ice_status status = ICE_SUCCESS;
2415 struct LIST_HEAD_TYPE *rule_head;
2416 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2418 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2419 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2421 ice_acquire_lock(rule_lock);
2422 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2424 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2425 u8 *addr = fi->l_data.mac.mac_addr;
2427 /* Update unicast Tx rules to reflect the selected
2430 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2431 (fi->fltr_act == ICE_FWD_TO_VSI ||
2432 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2433 fi->fltr_act == ICE_FWD_TO_Q ||
2434 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2435 status = ice_update_pkt_fwd_rule(hw, fi);
2441 ice_release_lock(rule_lock);
2447 * ice_add_update_vsi_list
2448 * @hw: pointer to the hardware structure
2449 * @m_entry: pointer to current filter management list entry
2450 * @cur_fltr: filter information from the book keeping entry
2451 * @new_fltr: filter information with the new VSI to be added
2453 * Call AQ command to add or update previously created VSI list with new VSI.
2455 * Helper function to do book keeping associated with adding filter information
2456 * The algorithm to do the book keeping is described below :
2457 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2458 * if only one VSI has been added till now
2459 * Allocate a new VSI list and add two VSIs
2460 * to this list using switch rule command
2461 * Update the previously created switch rule with the
2462 * newly created VSI list ID
2463 * if a VSI list was previously created
2464 * Add the new VSI to the previously created VSI list set
2465 * using the update switch rule command
2467 static enum ice_status
2468 ice_add_update_vsi_list(struct ice_hw *hw,
2469 struct ice_fltr_mgmt_list_entry *m_entry,
2470 struct ice_fltr_info *cur_fltr,
2471 struct ice_fltr_info *new_fltr)
2473 enum ice_status status = ICE_SUCCESS;
2474 u16 vsi_list_id = 0;
2476 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2477 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2478 return ICE_ERR_NOT_IMPL;
2480 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2481 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2482 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2483 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2484 return ICE_ERR_NOT_IMPL;
2486 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2487 /* Only one entry existed in the mapping and it was not already
2488 * a part of a VSI list. So, create a VSI list with the old and
2491 struct ice_fltr_info tmp_fltr;
2492 u16 vsi_handle_arr[2];
2494 /* A rule already exists with the new VSI being added */
2495 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2496 return ICE_ERR_ALREADY_EXISTS;
2498 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2499 vsi_handle_arr[1] = new_fltr->vsi_handle;
2500 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2502 new_fltr->lkup_type);
2506 tmp_fltr = *new_fltr;
2507 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2508 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2509 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2510 /* Update the previous switch rule of "MAC forward to VSI" to
2511 * "MAC fwd to VSI list"
2513 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2517 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2518 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2519 m_entry->vsi_list_info =
2520 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2523 /* If this entry was large action then the large action needs
2524 * to be updated to point to FWD to VSI list
2526 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2528 ice_add_marker_act(hw, m_entry,
2529 m_entry->sw_marker_id,
2530 m_entry->lg_act_idx);
2532 u16 vsi_handle = new_fltr->vsi_handle;
2533 enum ice_adminq_opc opcode;
2535 if (!m_entry->vsi_list_info)
2538 /* A rule already exists with the new VSI being added */
2539 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2542 /* Update the previously created VSI list set with
2543 * the new VSI ID passed in
2545 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2546 opcode = ice_aqc_opc_update_sw_rules;
2548 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2549 vsi_list_id, false, opcode,
2550 new_fltr->lkup_type);
2551 /* update VSI list mapping info with new VSI ID */
2553 ice_set_bit(vsi_handle,
2554 m_entry->vsi_list_info->vsi_map);
2557 m_entry->vsi_count++;
2562 * ice_find_rule_entry - Search a rule entry
2563 * @hw: pointer to the hardware structure
2564 * @recp_id: lookup type for which the specified rule needs to be searched
2565 * @f_info: rule information
2567 * Helper function to search for a given rule entry
2568 * Returns pointer to entry storing the rule if found
2570 static struct ice_fltr_mgmt_list_entry *
2571 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2573 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2574 struct ice_switch_info *sw = hw->switch_info;
2575 struct LIST_HEAD_TYPE *list_head;
2577 list_head = &sw->recp_list[recp_id].filt_rules;
2578 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2580 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2581 sizeof(f_info->l_data)) &&
2582 f_info->flag == list_itr->fltr_info.flag) {
2591 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2592 * @hw: pointer to the hardware structure
2593 * @recp_id: lookup type for which VSI lists needs to be searched
2594 * @vsi_handle: VSI handle to be found in VSI list
2595 * @vsi_list_id: VSI list ID found containing vsi_handle
2597 * Helper function to search a VSI list with single entry containing given VSI
2598 * handle element. This can be extended further to search VSI list with more
2599 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2601 static struct ice_vsi_list_map_info *
2602 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2605 struct ice_vsi_list_map_info *map_info = NULL;
2606 struct ice_switch_info *sw = hw->switch_info;
2607 struct LIST_HEAD_TYPE *list_head;
2609 list_head = &sw->recp_list[recp_id].filt_rules;
2610 if (sw->recp_list[recp_id].adv_rule) {
2611 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2613 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2614 ice_adv_fltr_mgmt_list_entry,
2616 if (list_itr->vsi_list_info) {
2617 map_info = list_itr->vsi_list_info;
2618 if (ice_is_bit_set(map_info->vsi_map,
2620 *vsi_list_id = map_info->vsi_list_id;
2626 struct ice_fltr_mgmt_list_entry *list_itr;
2628 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2629 ice_fltr_mgmt_list_entry,
2631 if (list_itr->vsi_count == 1 &&
2632 list_itr->vsi_list_info) {
2633 map_info = list_itr->vsi_list_info;
2634 if (ice_is_bit_set(map_info->vsi_map,
2636 *vsi_list_id = map_info->vsi_list_id;
2646 * ice_add_rule_internal - add rule for a given lookup type
2647 * @hw: pointer to the hardware structure
2648 * @recp_id: lookup type (recipe ID) for which rule has to be added
2649 * @f_entry: structure containing MAC forwarding information
2651 * Adds or updates the rule lists for a given recipe
2653 static enum ice_status
2654 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2655 struct ice_fltr_list_entry *f_entry)
2657 struct ice_switch_info *sw = hw->switch_info;
2658 struct ice_fltr_info *new_fltr, *cur_fltr;
2659 struct ice_fltr_mgmt_list_entry *m_entry;
2660 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2661 enum ice_status status = ICE_SUCCESS;
2663 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2664 return ICE_ERR_PARAM;
2666 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2667 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2668 f_entry->fltr_info.fwd_id.hw_vsi_id =
2669 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2671 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2673 ice_acquire_lock(rule_lock);
2674 new_fltr = &f_entry->fltr_info;
2675 if (new_fltr->flag & ICE_FLTR_RX)
2676 new_fltr->src = hw->port_info->lport;
2677 else if (new_fltr->flag & ICE_FLTR_TX)
2679 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2681 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2683 status = ice_create_pkt_fwd_rule(hw, f_entry);
2684 goto exit_add_rule_internal;
2687 cur_fltr = &m_entry->fltr_info;
2688 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2690 exit_add_rule_internal:
2691 ice_release_lock(rule_lock);
2696 * ice_remove_vsi_list_rule
2697 * @hw: pointer to the hardware structure
2698 * @vsi_list_id: VSI list ID generated as part of allocate resource
2699 * @lkup_type: switch rule filter lookup type
2701 * The VSI list should be emptied before this function is called to remove the
2704 static enum ice_status
2705 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2706 enum ice_sw_lkup_type lkup_type)
2708 struct ice_aqc_sw_rules_elem *s_rule;
2709 enum ice_status status;
2712 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2713 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2715 return ICE_ERR_NO_MEMORY;
2717 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2718 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2720 /* Free the vsi_list resource that we allocated. It is assumed that the
2721 * list is empty at this point.
2723 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2724 ice_aqc_opc_free_res);
2726 ice_free(hw, s_rule);
2731 * ice_rem_update_vsi_list
2732 * @hw: pointer to the hardware structure
2733 * @vsi_handle: VSI handle of the VSI to remove
2734 * @fm_list: filter management entry for which the VSI list management needs to
2737 static enum ice_status
2738 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2739 struct ice_fltr_mgmt_list_entry *fm_list)
2741 enum ice_sw_lkup_type lkup_type;
2742 enum ice_status status = ICE_SUCCESS;
2745 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2746 fm_list->vsi_count == 0)
2747 return ICE_ERR_PARAM;
2749 /* A rule with the VSI being removed does not exist */
2750 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2751 return ICE_ERR_DOES_NOT_EXIST;
2753 lkup_type = fm_list->fltr_info.lkup_type;
2754 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2755 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2756 ice_aqc_opc_update_sw_rules,
2761 fm_list->vsi_count--;
2762 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2764 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2765 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2766 struct ice_vsi_list_map_info *vsi_list_info =
2767 fm_list->vsi_list_info;
2770 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2772 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2773 return ICE_ERR_OUT_OF_RANGE;
2775 /* Make sure VSI list is empty before removing it below */
2776 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2778 ice_aqc_opc_update_sw_rules,
2783 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2784 tmp_fltr_info.fwd_id.hw_vsi_id =
2785 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2786 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2787 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2789 ice_debug(hw, ICE_DBG_SW,
2790 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2791 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2795 fm_list->fltr_info = tmp_fltr_info;
2798 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2799 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2800 struct ice_vsi_list_map_info *vsi_list_info =
2801 fm_list->vsi_list_info;
2803 /* Remove the VSI list since it is no longer used */
2804 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2806 ice_debug(hw, ICE_DBG_SW,
2807 "Failed to remove VSI list %d, error %d\n",
2808 vsi_list_id, status);
2812 LIST_DEL(&vsi_list_info->list_entry);
2813 ice_free(hw, vsi_list_info);
2814 fm_list->vsi_list_info = NULL;
2821 * ice_remove_rule_internal - Remove a filter rule of a given type
2823 * @hw: pointer to the hardware structure
2824 * @recp_id: recipe ID for which the rule needs to removed
2825 * @f_entry: rule entry containing filter information
2827 static enum ice_status
2828 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2829 struct ice_fltr_list_entry *f_entry)
2831 struct ice_switch_info *sw = hw->switch_info;
2832 struct ice_fltr_mgmt_list_entry *list_elem;
2833 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2834 enum ice_status status = ICE_SUCCESS;
2835 bool remove_rule = false;
2838 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2839 return ICE_ERR_PARAM;
2840 f_entry->fltr_info.fwd_id.hw_vsi_id =
2841 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2843 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2844 ice_acquire_lock(rule_lock);
2845 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2847 status = ICE_ERR_DOES_NOT_EXIST;
2851 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2853 } else if (!list_elem->vsi_list_info) {
2854 status = ICE_ERR_DOES_NOT_EXIST;
2856 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2857 /* a ref_cnt > 1 indicates that the vsi_list is being
2858 * shared by multiple rules. Decrement the ref_cnt and
2859 * remove this rule, but do not modify the list, as it
2860 * is in-use by other rules.
2862 list_elem->vsi_list_info->ref_cnt--;
2865 /* a ref_cnt of 1 indicates the vsi_list is only used
2866 * by one rule. However, the original removal request is only
2867 * for a single VSI. Update the vsi_list first, and only
2868 * remove the rule if there are no further VSIs in this list.
2870 vsi_handle = f_entry->fltr_info.vsi_handle;
2871 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2874 /* if VSI count goes to zero after updating the VSI list */
2875 if (list_elem->vsi_count == 0)
2880 /* Remove the lookup rule */
2881 struct ice_aqc_sw_rules_elem *s_rule;
2883 s_rule = (struct ice_aqc_sw_rules_elem *)
2884 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2886 status = ICE_ERR_NO_MEMORY;
2890 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2891 ice_aqc_opc_remove_sw_rules);
2893 status = ice_aq_sw_rules(hw, s_rule,
2894 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2895 ice_aqc_opc_remove_sw_rules, NULL);
2897 /* Remove a book keeping from the list */
2898 ice_free(hw, s_rule);
2903 LIST_DEL(&list_elem->list_entry);
2904 ice_free(hw, list_elem);
2907 ice_release_lock(rule_lock);
2912 * ice_aq_get_res_alloc - get allocated resources
2913 * @hw: pointer to the HW struct
2914 * @num_entries: pointer to u16 to store the number of resource entries returned
2915 * @buf: pointer to user-supplied buffer
2916 * @buf_size: size of buff
2917 * @cd: pointer to command details structure or NULL
2919 * The user-supplied buffer must be large enough to store the resource
2920 * information for all resource types. Each resource type is an
2921 * ice_aqc_get_res_resp_data_elem structure.
2924 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2925 u16 buf_size, struct ice_sq_cd *cd)
2927 struct ice_aqc_get_res_alloc *resp;
2928 enum ice_status status;
2929 struct ice_aq_desc desc;
2932 return ICE_ERR_BAD_PTR;
2934 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2935 return ICE_ERR_INVAL_SIZE;
2937 resp = &desc.params.get_res;
2939 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2940 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2942 if (!status && num_entries)
2943 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2949 * ice_aq_get_res_descs - get allocated resource descriptors
2950 * @hw: pointer to the hardware structure
2951 * @num_entries: number of resource entries in buffer
2952 * @buf: Indirect buffer to hold data parameters and response
2953 * @buf_size: size of buffer for indirect commands
2954 * @res_type: resource type
2955 * @res_shared: is resource shared
2956 * @desc_id: input - first desc ID to start; output - next desc ID
2957 * @cd: pointer to command details structure or NULL
2960 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2961 struct ice_aqc_get_allocd_res_desc_resp *buf,
2962 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2963 struct ice_sq_cd *cd)
2965 struct ice_aqc_get_allocd_res_desc *cmd;
2966 struct ice_aq_desc desc;
2967 enum ice_status status;
2969 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2971 cmd = &desc.params.get_res_desc;
2974 return ICE_ERR_PARAM;
2976 if (buf_size != (num_entries * sizeof(*buf)))
2977 return ICE_ERR_PARAM;
2979 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2981 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2982 ICE_AQC_RES_TYPE_M) | (res_shared ?
2983 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2984 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2986 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2988 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2994 * ice_add_mac - Add a MAC address based filter rule
2995 * @hw: pointer to the hardware structure
2996 * @m_list: list of MAC addresses and forwarding information
2998 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2999 * multiple unicast addresses, the function assumes that all the
3000 * addresses are unique in a given add_mac call. It doesn't
3001 * check for duplicates in this case, removing duplicates from a given
3002 * list should be taken care of in the caller of this function.
3005 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3007 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3008 struct ice_fltr_list_entry *m_list_itr;
3009 struct LIST_HEAD_TYPE *rule_head;
3010 u16 elem_sent, total_elem_left;
3011 struct ice_switch_info *sw;
3012 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3013 enum ice_status status = ICE_SUCCESS;
3014 u16 num_unicast = 0;
3018 return ICE_ERR_PARAM;
3020 sw = hw->switch_info;
3021 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3022 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3024 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3028 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3029 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3030 if (!ice_is_vsi_valid(hw, vsi_handle))
3031 return ICE_ERR_PARAM;
3032 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3033 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3034 /* update the src in case it is VSI num */
3035 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3036 return ICE_ERR_PARAM;
3037 m_list_itr->fltr_info.src = hw_vsi_id;
3038 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3039 IS_ZERO_ETHER_ADDR(add))
3040 return ICE_ERR_PARAM;
3041 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3042 /* Don't overwrite the unicast address */
3043 ice_acquire_lock(rule_lock);
3044 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3045 &m_list_itr->fltr_info)) {
3046 ice_release_lock(rule_lock);
3047 return ICE_ERR_ALREADY_EXISTS;
3049 ice_release_lock(rule_lock);
3051 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3052 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3053 m_list_itr->status =
3054 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3056 if (m_list_itr->status)
3057 return m_list_itr->status;
3061 ice_acquire_lock(rule_lock);
3062 /* Exit if no suitable entries were found for adding bulk switch rule */
3064 status = ICE_SUCCESS;
3065 goto ice_add_mac_exit;
3068 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3070 /* Allocate switch rule buffer for the bulk update for unicast */
3071 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3072 s_rule = (struct ice_aqc_sw_rules_elem *)
3073 ice_calloc(hw, num_unicast, s_rule_size);
3075 status = ICE_ERR_NO_MEMORY;
3076 goto ice_add_mac_exit;
3080 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3082 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3083 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3085 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3086 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3087 ice_aqc_opc_add_sw_rules);
3088 r_iter = (struct ice_aqc_sw_rules_elem *)
3089 ((u8 *)r_iter + s_rule_size);
3093 /* Call AQ bulk switch rule update for all unicast addresses */
3095 /* Call AQ switch rule in AQ_MAX chunk */
3096 for (total_elem_left = num_unicast; total_elem_left > 0;
3097 total_elem_left -= elem_sent) {
3098 struct ice_aqc_sw_rules_elem *entry = r_iter;
3100 elem_sent = min(total_elem_left,
3101 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3102 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3103 elem_sent, ice_aqc_opc_add_sw_rules,
3106 goto ice_add_mac_exit;
3107 r_iter = (struct ice_aqc_sw_rules_elem *)
3108 ((u8 *)r_iter + (elem_sent * s_rule_size));
3111 /* Fill up rule ID based on the value returned from FW */
3113 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3115 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3116 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3117 struct ice_fltr_mgmt_list_entry *fm_entry;
3119 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3120 f_info->fltr_rule_id =
3121 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3122 f_info->fltr_act = ICE_FWD_TO_VSI;
3123 /* Create an entry to track this MAC address */
3124 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3125 ice_malloc(hw, sizeof(*fm_entry));
3127 status = ICE_ERR_NO_MEMORY;
3128 goto ice_add_mac_exit;
3130 fm_entry->fltr_info = *f_info;
3131 fm_entry->vsi_count = 1;
3132 /* The book keeping entries will get removed when
3133 * base driver calls remove filter AQ command
3136 LIST_ADD(&fm_entry->list_entry, rule_head);
3137 r_iter = (struct ice_aqc_sw_rules_elem *)
3138 ((u8 *)r_iter + s_rule_size);
3143 ice_release_lock(rule_lock);
3145 ice_free(hw, s_rule);
3150 * ice_add_vlan_internal - Add one VLAN based filter rule
3151 * @hw: pointer to the hardware structure
3152 * @f_entry: filter entry containing one VLAN information
3154 static enum ice_status
3155 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3157 struct ice_switch_info *sw = hw->switch_info;
3158 struct ice_fltr_mgmt_list_entry *v_list_itr;
3159 struct ice_fltr_info *new_fltr, *cur_fltr;
3160 enum ice_sw_lkup_type lkup_type;
3161 u16 vsi_list_id = 0, vsi_handle;
3162 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3163 enum ice_status status = ICE_SUCCESS;
3165 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3166 return ICE_ERR_PARAM;
3168 f_entry->fltr_info.fwd_id.hw_vsi_id =
3169 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3170 new_fltr = &f_entry->fltr_info;
3172 /* VLAN ID should only be 12 bits */
3173 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3174 return ICE_ERR_PARAM;
3176 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3177 return ICE_ERR_PARAM;
3179 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3180 lkup_type = new_fltr->lkup_type;
3181 vsi_handle = new_fltr->vsi_handle;
3182 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3183 ice_acquire_lock(rule_lock);
3184 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3186 struct ice_vsi_list_map_info *map_info = NULL;
3188 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3189 /* All VLAN pruning rules use a VSI list. Check if
3190 * there is already a VSI list containing VSI that we
3191 * want to add. If found, use the same vsi_list_id for
3192 * this new VLAN rule or else create a new list.
3194 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3198 status = ice_create_vsi_list_rule(hw,
3206 /* Convert the action to forwarding to a VSI list. */
3207 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3208 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3211 status = ice_create_pkt_fwd_rule(hw, f_entry);
3213 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3216 status = ICE_ERR_DOES_NOT_EXIST;
3219 /* reuse VSI list for new rule and increment ref_cnt */
3221 v_list_itr->vsi_list_info = map_info;
3222 map_info->ref_cnt++;
3224 v_list_itr->vsi_list_info =
3225 ice_create_vsi_list_map(hw, &vsi_handle,
3229 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3230 /* Update existing VSI list to add new VSI ID only if it used
3233 cur_fltr = &v_list_itr->fltr_info;
3234 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3237 /* If VLAN rule exists and VSI list being used by this rule is
3238 * referenced by more than 1 VLAN rule. Then create a new VSI
3239 * list appending previous VSI with new VSI and update existing
3240 * VLAN rule to point to new VSI list ID
3242 struct ice_fltr_info tmp_fltr;
3243 u16 vsi_handle_arr[2];
3246 /* Current implementation only supports reusing VSI list with
3247 * one VSI count. We should never hit below condition
3249 if (v_list_itr->vsi_count > 1 &&
3250 v_list_itr->vsi_list_info->ref_cnt > 1) {
3251 ice_debug(hw, ICE_DBG_SW,
3252 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3253 status = ICE_ERR_CFG;
3258 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3261 /* A rule already exists with the new VSI being added */
3262 if (cur_handle == vsi_handle) {
3263 status = ICE_ERR_ALREADY_EXISTS;
3267 vsi_handle_arr[0] = cur_handle;
3268 vsi_handle_arr[1] = vsi_handle;
3269 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3270 &vsi_list_id, lkup_type);
3274 tmp_fltr = v_list_itr->fltr_info;
3275 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3276 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3277 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3278 /* Update the previous switch rule to a new VSI list which
3279 * includes current VSI that is requested
3281 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3285 /* before overriding VSI list map info. decrement ref_cnt of
3288 v_list_itr->vsi_list_info->ref_cnt--;
3290 /* now update to newly created list */
3291 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3292 v_list_itr->vsi_list_info =
3293 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3295 v_list_itr->vsi_count++;
3299 ice_release_lock(rule_lock);
3304 * ice_add_vlan - Add VLAN based filter rule
3305 * @hw: pointer to the hardware structure
3306 * @v_list: list of VLAN entries and forwarding information
3309 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3311 struct ice_fltr_list_entry *v_list_itr;
3314 return ICE_ERR_PARAM;
3316 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3318 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3319 return ICE_ERR_PARAM;
3320 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3321 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3322 if (v_list_itr->status)
3323 return v_list_itr->status;
3329 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3330 * @hw: pointer to the hardware structure
3331 * @mv_list: list of MAC and VLAN filters
3333 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3334 * pruning bits enabled, then it is the responsibility of the caller to make
3335 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3336 * VLAN won't be received on that VSI otherwise.
3339 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3341 struct ice_fltr_list_entry *mv_list_itr;
3343 if (!mv_list || !hw)
3344 return ICE_ERR_PARAM;
3346 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3348 enum ice_sw_lkup_type l_type =
3349 mv_list_itr->fltr_info.lkup_type;
3351 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3352 return ICE_ERR_PARAM;
3353 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3354 mv_list_itr->status =
3355 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3357 if (mv_list_itr->status)
3358 return mv_list_itr->status;
3364 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3365 * @hw: pointer to the hardware structure
3366 * @em_list: list of ether type MAC filter, MAC is optional
3368 * This function requires the caller to populate the entries in
3369 * the filter list with the necessary fields (including flags to
3370 * indicate Tx or Rx rules).
3373 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3375 struct ice_fltr_list_entry *em_list_itr;
3377 if (!em_list || !hw)
3378 return ICE_ERR_PARAM;
3380 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3382 enum ice_sw_lkup_type l_type =
3383 em_list_itr->fltr_info.lkup_type;
3385 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3386 l_type != ICE_SW_LKUP_ETHERTYPE)
3387 return ICE_ERR_PARAM;
3389 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3391 if (em_list_itr->status)
3392 return em_list_itr->status;
3398 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3399 * @hw: pointer to the hardware structure
3400 * @em_list: list of ethertype or ethertype MAC entries
3403 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3405 struct ice_fltr_list_entry *em_list_itr, *tmp;
3407 if (!em_list || !hw)
3408 return ICE_ERR_PARAM;
3410 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3412 enum ice_sw_lkup_type l_type =
3413 em_list_itr->fltr_info.lkup_type;
3415 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3416 l_type != ICE_SW_LKUP_ETHERTYPE)
3417 return ICE_ERR_PARAM;
3419 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3421 if (em_list_itr->status)
3422 return em_list_itr->status;
3429 * ice_rem_sw_rule_info
3430 * @hw: pointer to the hardware structure
3431 * @rule_head: pointer to the switch list structure that we want to delete
3434 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3436 if (!LIST_EMPTY(rule_head)) {
3437 struct ice_fltr_mgmt_list_entry *entry;
3438 struct ice_fltr_mgmt_list_entry *tmp;
3440 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3441 ice_fltr_mgmt_list_entry, list_entry) {
3442 LIST_DEL(&entry->list_entry);
3443 ice_free(hw, entry);
3449 * ice_rem_adv_rule_info
3450 * @hw: pointer to the hardware structure
3451 * @rule_head: pointer to the switch list structure that we want to delete
3454 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3456 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3457 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3459 if (LIST_EMPTY(rule_head))
3462 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3463 ice_adv_fltr_mgmt_list_entry, list_entry) {
3464 LIST_DEL(&lst_itr->list_entry);
3465 ice_free(hw, lst_itr->lkups);
3466 ice_free(hw, lst_itr);
3471 * ice_rem_all_sw_rules_info
3472 * @hw: pointer to the hardware structure
3474 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3476 struct ice_switch_info *sw = hw->switch_info;
3479 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3480 struct LIST_HEAD_TYPE *rule_head;
3482 rule_head = &sw->recp_list[i].filt_rules;
3483 if (!sw->recp_list[i].adv_rule)
3484 ice_rem_sw_rule_info(hw, rule_head);
3486 ice_rem_adv_rule_info(hw, rule_head);
3491 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3492 * @pi: pointer to the port_info structure
3493 * @vsi_handle: VSI handle to set as default
3494 * @set: true to add the above mentioned switch rule, false to remove it
3495 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3497 * add filter rule to set/unset given VSI as default VSI for the switch
3498 * (represented by swid)
3501 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3504 struct ice_aqc_sw_rules_elem *s_rule;
3505 struct ice_fltr_info f_info;
3506 struct ice_hw *hw = pi->hw;
3507 enum ice_adminq_opc opcode;
3508 enum ice_status status;
3512 if (!ice_is_vsi_valid(hw, vsi_handle))
3513 return ICE_ERR_PARAM;
3514 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3516 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3517 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3518 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3520 return ICE_ERR_NO_MEMORY;
3522 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3524 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3525 f_info.flag = direction;
3526 f_info.fltr_act = ICE_FWD_TO_VSI;
3527 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3529 if (f_info.flag & ICE_FLTR_RX) {
3530 f_info.src = pi->lport;
3531 f_info.src_id = ICE_SRC_ID_LPORT;
3533 f_info.fltr_rule_id =
3534 pi->dflt_rx_vsi_rule_id;
3535 } else if (f_info.flag & ICE_FLTR_TX) {
3536 f_info.src_id = ICE_SRC_ID_VSI;
3537 f_info.src = hw_vsi_id;
3539 f_info.fltr_rule_id =
3540 pi->dflt_tx_vsi_rule_id;
3544 opcode = ice_aqc_opc_add_sw_rules;
3546 opcode = ice_aqc_opc_remove_sw_rules;
3548 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3550 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3551 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3554 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3556 if (f_info.flag & ICE_FLTR_TX) {
3557 pi->dflt_tx_vsi_num = hw_vsi_id;
3558 pi->dflt_tx_vsi_rule_id = index;
3559 } else if (f_info.flag & ICE_FLTR_RX) {
3560 pi->dflt_rx_vsi_num = hw_vsi_id;
3561 pi->dflt_rx_vsi_rule_id = index;
3564 if (f_info.flag & ICE_FLTR_TX) {
3565 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3566 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3567 } else if (f_info.flag & ICE_FLTR_RX) {
3568 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3569 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3574 ice_free(hw, s_rule);
3579 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3580 * @hw: pointer to the hardware structure
3581 * @recp_id: lookup type for which the specified rule needs to be searched
3582 * @f_info: rule information
3584 * Helper function to search for a unicast rule entry - this is to be used
3585 * to remove unicast MAC filter that is not shared with other VSIs on the
3588 * Returns pointer to entry storing the rule if found
3590 static struct ice_fltr_mgmt_list_entry *
3591 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3592 struct ice_fltr_info *f_info)
3594 struct ice_switch_info *sw = hw->switch_info;
3595 struct ice_fltr_mgmt_list_entry *list_itr;
3596 struct LIST_HEAD_TYPE *list_head;
3598 list_head = &sw->recp_list[recp_id].filt_rules;
3599 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3601 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3602 sizeof(f_info->l_data)) &&
3603 f_info->fwd_id.hw_vsi_id ==
3604 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3605 f_info->flag == list_itr->fltr_info.flag)
3612 * ice_remove_mac - remove a MAC address based filter rule
3613 * @hw: pointer to the hardware structure
3614 * @m_list: list of MAC addresses and forwarding information
3616 * This function removes either a MAC filter rule or a specific VSI from a
3617 * VSI list for a multicast MAC address.
3619 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3620 * ice_add_mac. Caller should be aware that this call will only work if all
3621 * the entries passed into m_list were added previously. It will not attempt to
3622 * do a partial remove of entries that were found.
3625 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3627 struct ice_fltr_list_entry *list_itr, *tmp;
3628 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3631 return ICE_ERR_PARAM;
3633 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3634 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3636 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3637 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3640 if (l_type != ICE_SW_LKUP_MAC)
3641 return ICE_ERR_PARAM;
3643 vsi_handle = list_itr->fltr_info.vsi_handle;
3644 if (!ice_is_vsi_valid(hw, vsi_handle))
3645 return ICE_ERR_PARAM;
3647 list_itr->fltr_info.fwd_id.hw_vsi_id =
3648 ice_get_hw_vsi_num(hw, vsi_handle);
3649 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3650 /* Don't remove the unicast address that belongs to
3651 * another VSI on the switch, since it is not being
3654 ice_acquire_lock(rule_lock);
3655 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3656 &list_itr->fltr_info)) {
3657 ice_release_lock(rule_lock);
3658 return ICE_ERR_DOES_NOT_EXIST;
3660 ice_release_lock(rule_lock);
3662 list_itr->status = ice_remove_rule_internal(hw,
3665 if (list_itr->status)
3666 return list_itr->status;
3672 * ice_remove_vlan - Remove VLAN based filter rule
3673 * @hw: pointer to the hardware structure
3674 * @v_list: list of VLAN entries and forwarding information
3677 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3679 struct ice_fltr_list_entry *v_list_itr, *tmp;
3682 return ICE_ERR_PARAM;
3684 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3686 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3688 if (l_type != ICE_SW_LKUP_VLAN)
3689 return ICE_ERR_PARAM;
3690 v_list_itr->status = ice_remove_rule_internal(hw,
3693 if (v_list_itr->status)
3694 return v_list_itr->status;
3700 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3701 * @hw: pointer to the hardware structure
3702 * @v_list: list of MAC VLAN entries and forwarding information
3705 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3707 struct ice_fltr_list_entry *v_list_itr, *tmp;
3710 return ICE_ERR_PARAM;
3712 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3714 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3716 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3717 return ICE_ERR_PARAM;
3718 v_list_itr->status =
3719 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3721 if (v_list_itr->status)
3722 return v_list_itr->status;
3728 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3729 * @fm_entry: filter entry to inspect
3730 * @vsi_handle: VSI handle to compare with filter info
3733 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3735 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3736 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3737 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3738 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3743 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3744 * @hw: pointer to the hardware structure
3745 * @vsi_handle: VSI handle to remove filters from
3746 * @vsi_list_head: pointer to the list to add entry to
3747 * @fi: pointer to fltr_info of filter entry to copy & add
3749 * Helper function, used when creating a list of filters to remove from
3750 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3751 * original filter entry, with the exception of fltr_info.fltr_act and
3752 * fltr_info.fwd_id fields. These are set such that later logic can
3753 * extract which VSI to remove the fltr from, and pass on that information.
3755 static enum ice_status
3756 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3757 struct LIST_HEAD_TYPE *vsi_list_head,
3758 struct ice_fltr_info *fi)
3760 struct ice_fltr_list_entry *tmp;
3762 /* this memory is freed up in the caller function
3763 * once filters for this VSI are removed
3765 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3767 return ICE_ERR_NO_MEMORY;
3769 tmp->fltr_info = *fi;
3771 /* Overwrite these fields to indicate which VSI to remove filter from,
3772 * so find and remove logic can extract the information from the
3773 * list entries. Note that original entries will still have proper
3776 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3777 tmp->fltr_info.vsi_handle = vsi_handle;
3778 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3780 LIST_ADD(&tmp->list_entry, vsi_list_head);
3786 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3787 * @hw: pointer to the hardware structure
3788 * @vsi_handle: VSI handle to remove filters from
3789 * @lkup_list_head: pointer to the list that has certain lookup type filters
3790 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3792 * Locates all filters in lkup_list_head that are used by the given VSI,
3793 * and adds COPIES of those entries to vsi_list_head (intended to be used
3794 * to remove the listed filters).
3795 * Note that this means all entries in vsi_list_head must be explicitly
3796 * deallocated by the caller when done with list.
3798 static enum ice_status
3799 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3800 struct LIST_HEAD_TYPE *lkup_list_head,
3801 struct LIST_HEAD_TYPE *vsi_list_head)
3803 struct ice_fltr_mgmt_list_entry *fm_entry;
3804 enum ice_status status = ICE_SUCCESS;
3806 /* check to make sure VSI ID is valid and within boundary */
3807 if (!ice_is_vsi_valid(hw, vsi_handle))
3808 return ICE_ERR_PARAM;
3810 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3811 ice_fltr_mgmt_list_entry, list_entry) {
3812 struct ice_fltr_info *fi;
3814 fi = &fm_entry->fltr_info;
3815 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3818 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3828 * ice_determine_promisc_mask
3829 * @fi: filter info to parse
3831 * Helper function to determine which ICE_PROMISC_ mask corresponds
3832 * to given filter into.
3834 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3836 u16 vid = fi->l_data.mac_vlan.vlan_id;
3837 u8 *macaddr = fi->l_data.mac.mac_addr;
3838 bool is_tx_fltr = false;
3839 u8 promisc_mask = 0;
3841 if (fi->flag == ICE_FLTR_TX)
3844 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3845 promisc_mask |= is_tx_fltr ?
3846 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3847 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3848 promisc_mask |= is_tx_fltr ?
3849 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3850 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3851 promisc_mask |= is_tx_fltr ?
3852 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3854 promisc_mask |= is_tx_fltr ?
3855 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3857 return promisc_mask;
3861 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3862 * @hw: pointer to the hardware structure
3863 * @vsi_handle: VSI handle to retrieve info from
3864 * @promisc_mask: pointer to mask to be filled in
3865 * @vid: VLAN ID of promisc VLAN VSI
3868 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3871 struct ice_switch_info *sw = hw->switch_info;
3872 struct ice_fltr_mgmt_list_entry *itr;
3873 struct LIST_HEAD_TYPE *rule_head;
3874 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3876 if (!ice_is_vsi_valid(hw, vsi_handle))
3877 return ICE_ERR_PARAM;
3881 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3882 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3884 ice_acquire_lock(rule_lock);
3885 LIST_FOR_EACH_ENTRY(itr, rule_head,
3886 ice_fltr_mgmt_list_entry, list_entry) {
3887 /* Continue if this filter doesn't apply to this VSI or the
3888 * VSI ID is not in the VSI map for this filter
3890 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3893 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3895 ice_release_lock(rule_lock);
3901 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3902 * @hw: pointer to the hardware structure
3903 * @vsi_handle: VSI handle to retrieve info from
3904 * @promisc_mask: pointer to mask to be filled in
3905 * @vid: VLAN ID of promisc VLAN VSI
3908 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3911 struct ice_switch_info *sw = hw->switch_info;
3912 struct ice_fltr_mgmt_list_entry *itr;
3913 struct LIST_HEAD_TYPE *rule_head;
3914 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3916 if (!ice_is_vsi_valid(hw, vsi_handle))
3917 return ICE_ERR_PARAM;
3921 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3922 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3924 ice_acquire_lock(rule_lock);
3925 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3927 /* Continue if this filter doesn't apply to this VSI or the
3928 * VSI ID is not in the VSI map for this filter
3930 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3933 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3935 ice_release_lock(rule_lock);
3941 * ice_remove_promisc - Remove promisc based filter rules
3942 * @hw: pointer to the hardware structure
3943 * @recp_id: recipe ID for which the rule needs to removed
3944 * @v_list: list of promisc entries
3946 static enum ice_status
3947 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3948 struct LIST_HEAD_TYPE *v_list)
3950 struct ice_fltr_list_entry *v_list_itr, *tmp;
3952 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3954 v_list_itr->status =
3955 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3956 if (v_list_itr->status)
3957 return v_list_itr->status;
3963 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3964 * @hw: pointer to the hardware structure
3965 * @vsi_handle: VSI handle to clear mode
3966 * @promisc_mask: mask of promiscuous config bits to clear
3967 * @vid: VLAN ID to clear VLAN promiscuous
3970 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3973 struct ice_switch_info *sw = hw->switch_info;
3974 struct ice_fltr_list_entry *fm_entry, *tmp;
3975 struct LIST_HEAD_TYPE remove_list_head;
3976 struct ice_fltr_mgmt_list_entry *itr;
3977 struct LIST_HEAD_TYPE *rule_head;
3978 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3979 enum ice_status status = ICE_SUCCESS;
3982 if (!ice_is_vsi_valid(hw, vsi_handle))
3983 return ICE_ERR_PARAM;
3985 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
3986 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3988 recipe_id = ICE_SW_LKUP_PROMISC;
3990 rule_head = &sw->recp_list[recipe_id].filt_rules;
3991 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3993 INIT_LIST_HEAD(&remove_list_head);
3995 ice_acquire_lock(rule_lock);
3996 LIST_FOR_EACH_ENTRY(itr, rule_head,
3997 ice_fltr_mgmt_list_entry, list_entry) {
3998 struct ice_fltr_info *fltr_info;
3999 u8 fltr_promisc_mask = 0;
4001 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4003 fltr_info = &itr->fltr_info;
4005 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4006 vid != fltr_info->l_data.mac_vlan.vlan_id)
4009 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4011 /* Skip if filter is not completely specified by given mask */
4012 if (fltr_promisc_mask & ~promisc_mask)
4015 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4019 ice_release_lock(rule_lock);
4020 goto free_fltr_list;
4023 ice_release_lock(rule_lock);
4025 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4028 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4029 ice_fltr_list_entry, list_entry) {
4030 LIST_DEL(&fm_entry->list_entry);
4031 ice_free(hw, fm_entry);
4038 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4039 * @hw: pointer to the hardware structure
4040 * @vsi_handle: VSI handle to configure
4041 * @promisc_mask: mask of promiscuous config bits
4042 * @vid: VLAN ID to set VLAN promiscuous
4045 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4047 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4048 struct ice_fltr_list_entry f_list_entry;
4049 struct ice_fltr_info new_fltr;
4050 enum ice_status status = ICE_SUCCESS;
4056 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4058 if (!ice_is_vsi_valid(hw, vsi_handle))
4059 return ICE_ERR_PARAM;
4060 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4062 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4064 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4065 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4066 new_fltr.l_data.mac_vlan.vlan_id = vid;
4067 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4069 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4070 recipe_id = ICE_SW_LKUP_PROMISC;
4073 /* Separate filters must be set for each direction/packet type
4074 * combination, so we will loop over the mask value, store the
4075 * individual type, and clear it out in the input mask as it
4078 while (promisc_mask) {
4084 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4085 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4086 pkt_type = UCAST_FLTR;
4087 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4088 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4089 pkt_type = UCAST_FLTR;
4091 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4092 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4093 pkt_type = MCAST_FLTR;
4094 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4095 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4096 pkt_type = MCAST_FLTR;
4098 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4099 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4100 pkt_type = BCAST_FLTR;
4101 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4102 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4103 pkt_type = BCAST_FLTR;
4107 /* Check for VLAN promiscuous flag */
4108 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4109 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4110 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4111 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4115 /* Set filter DA based on packet type */
4116 mac_addr = new_fltr.l_data.mac.mac_addr;
4117 if (pkt_type == BCAST_FLTR) {
4118 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4119 } else if (pkt_type == MCAST_FLTR ||
4120 pkt_type == UCAST_FLTR) {
4121 /* Use the dummy ether header DA */
4122 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4123 ICE_NONDMA_TO_NONDMA);
4124 if (pkt_type == MCAST_FLTR)
4125 mac_addr[0] |= 0x1; /* Set multicast bit */
4128 /* Need to reset this to zero for all iterations */
4131 new_fltr.flag |= ICE_FLTR_TX;
4132 new_fltr.src = hw_vsi_id;
4134 new_fltr.flag |= ICE_FLTR_RX;
4135 new_fltr.src = hw->port_info->lport;
4138 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4139 new_fltr.vsi_handle = vsi_handle;
4140 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4141 f_list_entry.fltr_info = new_fltr;
4143 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4144 if (status != ICE_SUCCESS)
4145 goto set_promisc_exit;
4153 * ice_set_vlan_vsi_promisc
4154 * @hw: pointer to the hardware structure
4155 * @vsi_handle: VSI handle to configure
4156 * @promisc_mask: mask of promiscuous config bits
4157 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4159 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4162 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4163 bool rm_vlan_promisc)
4165 struct ice_switch_info *sw = hw->switch_info;
4166 struct ice_fltr_list_entry *list_itr, *tmp;
4167 struct LIST_HEAD_TYPE vsi_list_head;
4168 struct LIST_HEAD_TYPE *vlan_head;
4169 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4170 enum ice_status status;
4173 INIT_LIST_HEAD(&vsi_list_head);
4174 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4175 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4176 ice_acquire_lock(vlan_lock);
4177 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4179 ice_release_lock(vlan_lock);
4181 goto free_fltr_list;
4183 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4185 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4186 if (rm_vlan_promisc)
4187 status = ice_clear_vsi_promisc(hw, vsi_handle,
4188 promisc_mask, vlan_id);
4190 status = ice_set_vsi_promisc(hw, vsi_handle,
4191 promisc_mask, vlan_id);
4197 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4198 ice_fltr_list_entry, list_entry) {
4199 LIST_DEL(&list_itr->list_entry);
4200 ice_free(hw, list_itr);
4206 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4207 * @hw: pointer to the hardware structure
4208 * @vsi_handle: VSI handle to remove filters from
4209 * @lkup: switch rule filter lookup type
4212 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4213 enum ice_sw_lkup_type lkup)
4215 struct ice_switch_info *sw = hw->switch_info;
4216 struct ice_fltr_list_entry *fm_entry;
4217 struct LIST_HEAD_TYPE remove_list_head;
4218 struct LIST_HEAD_TYPE *rule_head;
4219 struct ice_fltr_list_entry *tmp;
4220 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4221 enum ice_status status;
4223 INIT_LIST_HEAD(&remove_list_head);
4224 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4225 rule_head = &sw->recp_list[lkup].filt_rules;
4226 ice_acquire_lock(rule_lock);
4227 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4229 ice_release_lock(rule_lock);
4234 case ICE_SW_LKUP_MAC:
4235 ice_remove_mac(hw, &remove_list_head);
4237 case ICE_SW_LKUP_VLAN:
4238 ice_remove_vlan(hw, &remove_list_head);
4240 case ICE_SW_LKUP_PROMISC:
4241 case ICE_SW_LKUP_PROMISC_VLAN:
4242 ice_remove_promisc(hw, lkup, &remove_list_head);
4244 case ICE_SW_LKUP_MAC_VLAN:
4245 ice_remove_mac_vlan(hw, &remove_list_head);
4247 case ICE_SW_LKUP_ETHERTYPE:
4248 case ICE_SW_LKUP_ETHERTYPE_MAC:
4249 ice_remove_eth_mac(hw, &remove_list_head);
4251 case ICE_SW_LKUP_DFLT:
4252 ice_debug(hw, ICE_DBG_SW,
4253 "Remove filters for this lookup type hasn't been implemented yet\n");
4255 case ICE_SW_LKUP_LAST:
4256 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4260 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4261 ice_fltr_list_entry, list_entry) {
4262 LIST_DEL(&fm_entry->list_entry);
4263 ice_free(hw, fm_entry);
4268 * ice_remove_vsi_fltr - Remove all filters for a VSI
4269 * @hw: pointer to the hardware structure
4270 * @vsi_handle: VSI handle to remove filters from
4272 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4274 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4276 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4277 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4278 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4279 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4280 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4281 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4282 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4283 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4287 * ice_alloc_res_cntr - allocating resource counter
4288 * @hw: pointer to the hardware structure
4289 * @type: type of resource
4290 * @alloc_shared: if set it is shared else dedicated
4291 * @num_items: number of entries requested for FD resource type
4292 * @counter_id: counter index returned by AQ call
4295 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4298 struct ice_aqc_alloc_free_res_elem *buf;
4299 enum ice_status status;
4302 /* Allocate resource */
4303 buf_len = sizeof(*buf);
4304 buf = (struct ice_aqc_alloc_free_res_elem *)
4305 ice_malloc(hw, buf_len);
4307 return ICE_ERR_NO_MEMORY;
4309 buf->num_elems = CPU_TO_LE16(num_items);
4310 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4311 ICE_AQC_RES_TYPE_M) | alloc_shared);
4313 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4314 ice_aqc_opc_alloc_res, NULL);
4318 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4326 * ice_free_res_cntr - free resource counter
4327 * @hw: pointer to the hardware structure
4328 * @type: type of resource
4329 * @alloc_shared: if set it is shared else dedicated
4330 * @num_items: number of entries to be freed for FD resource type
4331 * @counter_id: counter ID resource which needs to be freed
4334 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4337 struct ice_aqc_alloc_free_res_elem *buf;
4338 enum ice_status status;
4342 buf_len = sizeof(*buf);
4343 buf = (struct ice_aqc_alloc_free_res_elem *)
4344 ice_malloc(hw, buf_len);
4346 return ICE_ERR_NO_MEMORY;
4348 buf->num_elems = CPU_TO_LE16(num_items);
4349 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4350 ICE_AQC_RES_TYPE_M) | alloc_shared);
4351 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4353 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4354 ice_aqc_opc_free_res, NULL);
4356 ice_debug(hw, ICE_DBG_SW,
4357 "counter resource could not be freed\n");
4364 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4365 * @hw: pointer to the hardware structure
4366 * @counter_id: returns counter index
4368 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4370 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4371 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4376 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4377 * @hw: pointer to the hardware structure
4378 * @counter_id: counter index to be freed
4380 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4382 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4383 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4388 * ice_alloc_res_lg_act - add large action resource
4389 * @hw: pointer to the hardware structure
4390 * @l_id: large action ID to fill it in
4391 * @num_acts: number of actions to hold with a large action entry
4393 static enum ice_status
4394 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4396 struct ice_aqc_alloc_free_res_elem *sw_buf;
4397 enum ice_status status;
4400 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4401 return ICE_ERR_PARAM;
4403 /* Allocate resource for large action */
4404 buf_len = sizeof(*sw_buf);
4405 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4406 ice_malloc(hw, buf_len);
4408 return ICE_ERR_NO_MEMORY;
4410 sw_buf->num_elems = CPU_TO_LE16(1);
4412 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4413 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4414 * If num_acts is greater than 2, then use
4415 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4416 * The num_acts cannot exceed 4. This was ensured at the
4417 * beginning of the function.
4420 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4421 else if (num_acts == 2)
4422 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4424 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4426 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4427 ice_aqc_opc_alloc_res, NULL);
4429 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4431 ice_free(hw, sw_buf);
4436 * ice_add_mac_with_sw_marker - add filter with sw marker
4437 * @hw: pointer to the hardware structure
4438 * @f_info: filter info structure containing the MAC filter information
4439 * @sw_marker: sw marker to tag the Rx descriptor with
4442 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4445 struct ice_switch_info *sw = hw->switch_info;
4446 struct ice_fltr_mgmt_list_entry *m_entry;
4447 struct ice_fltr_list_entry fl_info;
4448 struct LIST_HEAD_TYPE l_head;
4449 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4450 enum ice_status ret;
4454 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4455 return ICE_ERR_PARAM;
4457 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4458 return ICE_ERR_PARAM;
4460 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4461 return ICE_ERR_PARAM;
4463 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4464 return ICE_ERR_PARAM;
4465 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4467 /* Add filter if it doesn't exist so then the adding of large
4468 * action always results in update
4471 INIT_LIST_HEAD(&l_head);
4472 fl_info.fltr_info = *f_info;
4473 LIST_ADD(&fl_info.list_entry, &l_head);
4475 entry_exists = false;
4476 ret = ice_add_mac(hw, &l_head);
4477 if (ret == ICE_ERR_ALREADY_EXISTS)
4478 entry_exists = true;
4482 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4483 ice_acquire_lock(rule_lock);
4484 /* Get the book keeping entry for the filter */
4485 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4489 /* If counter action was enabled for this rule then don't enable
4490 * sw marker large action
4492 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4493 ret = ICE_ERR_PARAM;
4497 /* if same marker was added before */
4498 if (m_entry->sw_marker_id == sw_marker) {
4499 ret = ICE_ERR_ALREADY_EXISTS;
4503 /* Allocate a hardware table entry to hold large act. Three actions
4504 * for marker based large action
4506 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4510 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4513 /* Update the switch rule to add the marker action */
4514 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4516 ice_release_lock(rule_lock);
4521 ice_release_lock(rule_lock);
4522 /* only remove entry if it did not exist previously */
4524 ret = ice_remove_mac(hw, &l_head);
4530 * ice_add_mac_with_counter - add filter with counter enabled
4531 * @hw: pointer to the hardware structure
4532 * @f_info: pointer to filter info structure containing the MAC filter
4536 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4538 struct ice_switch_info *sw = hw->switch_info;
4539 struct ice_fltr_mgmt_list_entry *m_entry;
4540 struct ice_fltr_list_entry fl_info;
4541 struct LIST_HEAD_TYPE l_head;
4542 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4543 enum ice_status ret;
4548 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4549 return ICE_ERR_PARAM;
4551 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4552 return ICE_ERR_PARAM;
4554 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4555 return ICE_ERR_PARAM;
4556 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4558 entry_exist = false;
4560 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4562 /* Add filter if it doesn't exist so then the adding of large
4563 * action always results in update
4565 INIT_LIST_HEAD(&l_head);
4567 fl_info.fltr_info = *f_info;
4568 LIST_ADD(&fl_info.list_entry, &l_head);
4570 ret = ice_add_mac(hw, &l_head);
4571 if (ret == ICE_ERR_ALREADY_EXISTS)
4576 ice_acquire_lock(rule_lock);
4577 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4579 ret = ICE_ERR_BAD_PTR;
4583 /* Don't enable counter for a filter for which sw marker was enabled */
4584 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4585 ret = ICE_ERR_PARAM;
4589 /* If a counter was already enabled then don't need to add again */
4590 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4591 ret = ICE_ERR_ALREADY_EXISTS;
4595 /* Allocate a hardware table entry to VLAN counter */
4596 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4600 /* Allocate a hardware table entry to hold large act. Two actions for
4601 * counter based large action
4603 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4607 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4610 /* Update the switch rule to add the counter action */
4611 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4613 ice_release_lock(rule_lock);
4618 ice_release_lock(rule_lock);
4619 /* only remove entry if it did not exist previously */
4621 ret = ice_remove_mac(hw, &l_head);
4626 /* This is mapping table entry that maps every word within a given protocol
4627 * structure to the real byte offset as per the specification of that
4629 * for example dst address is 3 words in ethertype header and corresponding
4630 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4631 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4632 * matching entry describing its field. This needs to be updated if new
4633 * structure is added to that union.
4635 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4636 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4637 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4638 { ICE_ETYPE_OL, { 0 } },
4639 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4640 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4641 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4642 26, 28, 30, 32, 34, 36, 38 } },
4643 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4644 26, 28, 30, 32, 34, 36, 38 } },
4645 { ICE_TCP_IL, { 0, 2 } },
4646 { ICE_UDP_OF, { 0, 2 } },
4647 { ICE_UDP_ILOS, { 0, 2 } },
4648 { ICE_SCTP_IL, { 0, 2 } },
4649 { ICE_VXLAN, { 8, 10, 12, 14 } },
4650 { ICE_GENEVE, { 8, 10, 12, 14 } },
4651 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4652 { ICE_NVGRE, { 0, 2, 4, 6 } },
4653 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4654 { ICE_PPPOE, { 0, 2, 4, 6 } },
4655 { ICE_PROTOCOL_LAST, { 0 } }
4658 /* The following table describes preferred grouping of recipes.
4659 * If a recipe that needs to be programmed is a superset or matches one of the
4660 * following combinations, then the recipe needs to be chained as per the
4664 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4665 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4666 { ICE_MAC_IL, ICE_MAC_IL_HW },
4667 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4668 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4669 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4670 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4671 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4672 { ICE_TCP_IL, ICE_TCP_IL_HW },
4673 { ICE_UDP_OF, ICE_UDP_OF_HW },
4674 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4675 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4676 { ICE_VXLAN, ICE_UDP_OF_HW },
4677 { ICE_GENEVE, ICE_UDP_OF_HW },
4678 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4679 { ICE_NVGRE, ICE_GRE_OF_HW },
4680 { ICE_GTP, ICE_UDP_OF_HW },
4681 { ICE_PPPOE, ICE_PPPOE_HW },
4682 { ICE_PROTOCOL_LAST, 0 }
4686 * ice_find_recp - find a recipe
4687 * @hw: pointer to the hardware structure
4688 * @lkup_exts: extension sequence to match
4690 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4692 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4694 bool refresh_required = true;
4695 struct ice_sw_recipe *recp;
4698 /* Walk through existing recipes to find a match */
4699 recp = hw->switch_info->recp_list;
4700 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4701 /* If recipe was not created for this ID, in SW bookkeeping,
4702 * check if FW has an entry for this recipe. If the FW has an
4703 * entry update it in our SW bookkeeping and continue with the
4706 if (!recp[i].recp_created)
4707 if (ice_get_recp_frm_fw(hw,
4708 hw->switch_info->recp_list, i,
4712 /* Skip inverse action recipes */
4713 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4714 ICE_AQ_RECIPE_ACT_INV_ACT)
4717 /* if number of words we are looking for match */
4718 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4719 struct ice_fv_word *a = lkup_exts->fv_words;
4720 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4724 for (p = 0; p < lkup_exts->n_val_words; p++) {
4725 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4727 if (a[p].off == b[q].off &&
4728 a[p].prot_id == b[q].prot_id)
4729 /* Found the "p"th word in the
4734 /* After walking through all the words in the
4735 * "i"th recipe if "p"th word was not found then
4736 * this recipe is not what we are looking for.
4737 * So break out from this loop and try the next
4740 if (q >= recp[i].lkup_exts.n_val_words) {
4745 /* If for "i"th recipe the found was never set to false
4746 * then it means we found our match
4749 return i; /* Return the recipe ID */
4752 return ICE_MAX_NUM_RECIPES;
4756 * ice_prot_type_to_id - get protocol ID from protocol type
4757 * @type: protocol type
4758 * @id: pointer to variable that will receive the ID
4760 * Returns true if found, false otherwise
4762 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4766 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4767 if (ice_prot_id_tbl[i].type == type) {
4768 *id = ice_prot_id_tbl[i].protocol_id;
4775 * ice_find_valid_words - count valid words
4776 * @rule: advanced rule with lookup information
4777 * @lkup_exts: byte offset extractions of the words that are valid
4779 * calculate valid words in a lookup rule using mask value
4782 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4783 struct ice_prot_lkup_ext *lkup_exts)
4789 if (!ice_prot_type_to_id(rule->type, &prot_id))
4792 word = lkup_exts->n_val_words;
4794 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4795 if (((u16 *)&rule->m_u)[j] &&
4796 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4797 /* No more space to accommodate */
4798 if (word >= ICE_MAX_CHAIN_WORDS)
4800 lkup_exts->fv_words[word].off =
4801 ice_prot_ext[rule->type].offs[j];
4802 lkup_exts->fv_words[word].prot_id =
4803 ice_prot_id_tbl[rule->type].protocol_id;
4804 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4808 ret_val = word - lkup_exts->n_val_words;
4809 lkup_exts->n_val_words = word;
4817 * ice_create_first_fit_recp_def - Create a recipe grouping
4818 * @hw: pointer to the hardware structure
4819 * @lkup_exts: an array of protocol header extractions
4820 * @rg_list: pointer to a list that stores new recipe groups
4821 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4823 * Using first fit algorithm, take all the words that are still not done
4824 * and start grouping them in 4-word groups. Each group makes up one
4827 static enum ice_status
4828 ice_create_first_fit_recp_def(struct ice_hw *hw,
4829 struct ice_prot_lkup_ext *lkup_exts,
4830 struct LIST_HEAD_TYPE *rg_list,
4833 struct ice_pref_recipe_group *grp = NULL;
4838 /* Walk through every word in the rule to check if it is not done. If so
4839 * then this word needs to be part of a new recipe.
4841 for (j = 0; j < lkup_exts->n_val_words; j++)
4842 if (!ice_is_bit_set(lkup_exts->done, j)) {
4844 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4845 struct ice_recp_grp_entry *entry;
4847 entry = (struct ice_recp_grp_entry *)
4848 ice_malloc(hw, sizeof(*entry));
4850 return ICE_ERR_NO_MEMORY;
4851 LIST_ADD(&entry->l_entry, rg_list);
4852 grp = &entry->r_group;
4856 grp->pairs[grp->n_val_pairs].prot_id =
4857 lkup_exts->fv_words[j].prot_id;
4858 grp->pairs[grp->n_val_pairs].off =
4859 lkup_exts->fv_words[j].off;
4860 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4868 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4869 * @hw: pointer to the hardware structure
4870 * @fv_list: field vector with the extraction sequence information
4871 * @rg_list: recipe groupings with protocol-offset pairs
4873 * Helper function to fill in the field vector indices for protocol-offset
4874 * pairs. These indexes are then ultimately programmed into a recipe.
4876 static enum ice_status
4877 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4878 struct LIST_HEAD_TYPE *rg_list)
4880 struct ice_sw_fv_list_entry *fv;
4881 struct ice_recp_grp_entry *rg;
4882 struct ice_fv_word *fv_ext;
4884 if (LIST_EMPTY(fv_list))
4887 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4888 fv_ext = fv->fv_ptr->ew;
4890 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4893 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4894 struct ice_fv_word *pr;
4899 pr = &rg->r_group.pairs[i];
4900 mask = rg->r_group.mask[i];
4902 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4903 if (fv_ext[j].prot_id == pr->prot_id &&
4904 fv_ext[j].off == pr->off) {
4907 /* Store index of field vector */
4909 /* Mask is given by caller as big
4910 * endian, but sent to FW as little
4913 rg->fv_mask[i] = mask << 8 | mask >> 8;
4917 /* Protocol/offset could not be found, caller gave an
4921 return ICE_ERR_PARAM;
4929 * ice_find_free_recp_res_idx - find free result indexes for recipe
4930 * @hw: pointer to hardware structure
4931 * @profiles: bitmap of profiles that will be associated with the new recipe
4932 * @free_idx: pointer to variable to receive the free index bitmap
4934 * The algorithm used here is:
4935 * 1. When creating a new recipe, create a set P which contains all
4936 * Profiles that will be associated with our new recipe
4938 * 2. For each Profile p in set P:
4939 * a. Add all recipes associated with Profile p into set R
4940 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4941 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4942 * i. Or just assume they all have the same possible indexes:
4944 * i.e., PossibleIndexes = 0x0000F00000000000
4946 * 3. For each Recipe r in set R:
4947 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4948 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4950 * FreeIndexes will contain the bits indicating the indexes free for use,
4951 * then the code needs to update the recipe[r].used_result_idx_bits to
4952 * indicate which indexes were selected for use by this recipe.
4955 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
4956 ice_bitmap_t *free_idx)
4958 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
4959 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
4960 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4964 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
4965 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
4966 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
4967 ice_init_possible_res_bm(possible_idx);
4969 for (bit = 0; bit < ICE_MAX_FV_WORDS; bit++)
4970 if (ICE_POSSIBLE_RES_IDX & BIT_ULL(bit))
4971 ice_set_bit(bit, possible_idx);
4973 /* For each profile we are going to associate the recipe with, add the
4974 * recipes that are associated with that profile. This will give us
4975 * the set of recipes that our recipe may collide with.
4978 while (ICE_MAX_NUM_PROFILES >
4979 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
4980 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
4981 ICE_MAX_NUM_RECIPES);
4986 /* For each recipe that our new recipe may collide with, determine
4987 * which indexes have been used.
4989 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
4990 if (ice_is_bit_set(recipes, bit))
4991 ice_or_bitmap(used_idx, used_idx,
4992 hw->switch_info->recp_list[bit].res_idxs,
4995 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4997 /* return number of free indexes */
4999 while (ICE_MAX_FV_WORDS >
5000 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5009 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5010 * @hw: pointer to hardware structure
5011 * @rm: recipe management list entry
5012 * @match_tun: if field vector index for tunnel needs to be programmed
5013 * @profiles: bitmap of profiles that will be assocated.
5015 static enum ice_status
5016 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5017 bool match_tun, ice_bitmap_t *profiles)
5019 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5020 struct ice_aqc_recipe_data_elem *tmp;
5021 struct ice_aqc_recipe_data_elem *buf;
5022 struct ice_recp_grp_entry *entry;
5023 enum ice_status status;
5029 /* When more than one recipe are required, another recipe is needed to
5030 * chain them together. Matching a tunnel metadata ID takes up one of
5031 * the match fields in the chaining recipe reducing the number of
5032 * chained recipes by one.
5034 /* check number of free result indices */
5035 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5036 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5038 if (rm->n_grp_count > 1) {
5039 if (rm->n_grp_count > free_res_idx)
5040 return ICE_ERR_MAX_LIMIT;
5045 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5046 ICE_MAX_NUM_RECIPES,
5049 return ICE_ERR_NO_MEMORY;
5051 buf = (struct ice_aqc_recipe_data_elem *)
5052 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5054 status = ICE_ERR_NO_MEMORY;
5058 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5059 recipe_count = ICE_MAX_NUM_RECIPES;
5060 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5062 if (status || recipe_count == 0)
5065 /* Allocate the recipe resources, and configure them according to the
5066 * match fields from protocol headers and extracted field vectors.
5068 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5069 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5072 status = ice_alloc_recipe(hw, &entry->rid);
5076 /* Clear the result index of the located recipe, as this will be
5077 * updated, if needed, later in the recipe creation process.
5079 tmp[0].content.result_indx = 0;
5081 buf[recps] = tmp[0];
5082 buf[recps].recipe_indx = (u8)entry->rid;
5083 /* if the recipe is a non-root recipe RID should be programmed
5084 * as 0 for the rules to be applied correctly.
5086 buf[recps].content.rid = 0;
5087 ice_memset(&buf[recps].content.lkup_indx, 0,
5088 sizeof(buf[recps].content.lkup_indx),
5091 /* All recipes use look-up index 0 to match switch ID. */
5092 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5093 buf[recps].content.mask[0] =
5094 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5095 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5098 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5099 buf[recps].content.lkup_indx[i] = 0x80;
5100 buf[recps].content.mask[i] = 0;
5103 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5104 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5105 buf[recps].content.mask[i + 1] =
5106 CPU_TO_LE16(entry->fv_mask[i]);
5109 if (rm->n_grp_count > 1) {
5110 /* Checks to see if there really is a valid result index
5113 if (chain_idx >= ICE_MAX_FV_WORDS) {
5114 ice_debug(hw, ICE_DBG_SW,
5115 "No chain index available\n");
5116 status = ICE_ERR_MAX_LIMIT;
5120 entry->chain_idx = chain_idx;
5121 buf[recps].content.result_indx =
5122 ICE_AQ_RECIPE_RESULT_EN |
5123 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5124 ICE_AQ_RECIPE_RESULT_DATA_M);
5125 ice_clear_bit(chain_idx, result_idx_bm);
5126 chain_idx = ice_find_first_bit(result_idx_bm,
5130 /* fill recipe dependencies */
5131 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5132 ICE_MAX_NUM_RECIPES);
5133 ice_set_bit(buf[recps].recipe_indx,
5134 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5135 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5139 if (rm->n_grp_count == 1) {
5140 rm->root_rid = buf[0].recipe_indx;
5141 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5142 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5143 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5144 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5145 sizeof(buf[0].recipe_bitmap),
5146 ICE_NONDMA_TO_NONDMA);
5148 status = ICE_ERR_BAD_PTR;
5151 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5152 * the recipe which is getting created if specified
5153 * by user. Usually any advanced switch filter, which results
5154 * into new extraction sequence, ended up creating a new recipe
5155 * of type ROOT and usually recipes are associated with profiles
5156 * Switch rule referreing newly created recipe, needs to have
5157 * either/or 'fwd' or 'join' priority, otherwise switch rule
5158 * evaluation will not happen correctly. In other words, if
5159 * switch rule to be evaluated on priority basis, then recipe
5160 * needs to have priority, otherwise it will be evaluated last.
5162 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5164 struct ice_recp_grp_entry *last_chain_entry;
5167 /* Allocate the last recipe that will chain the outcomes of the
5168 * other recipes together
5170 status = ice_alloc_recipe(hw, &rid);
5174 buf[recps].recipe_indx = (u8)rid;
5175 buf[recps].content.rid = (u8)rid;
5176 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5177 /* the new entry created should also be part of rg_list to
5178 * make sure we have complete recipe
5180 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5181 sizeof(*last_chain_entry));
5182 if (!last_chain_entry) {
5183 status = ICE_ERR_NO_MEMORY;
5186 last_chain_entry->rid = rid;
5187 ice_memset(&buf[recps].content.lkup_indx, 0,
5188 sizeof(buf[recps].content.lkup_indx),
5190 /* All recipes use look-up index 0 to match switch ID. */
5191 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5192 buf[recps].content.mask[0] =
5193 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5194 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5195 buf[recps].content.lkup_indx[i] =
5196 ICE_AQ_RECIPE_LKUP_IGNORE;
5197 buf[recps].content.mask[i] = 0;
5201 /* update r_bitmap with the recp that is used for chaining */
5202 ice_set_bit(rid, rm->r_bitmap);
5203 /* this is the recipe that chains all the other recipes so it
5204 * should not have a chaining ID to indicate the same
5206 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5207 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5209 last_chain_entry->fv_idx[i] = entry->chain_idx;
5210 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5211 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5212 ice_set_bit(entry->rid, rm->r_bitmap);
5214 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5215 if (sizeof(buf[recps].recipe_bitmap) >=
5216 sizeof(rm->r_bitmap)) {
5217 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5218 sizeof(buf[recps].recipe_bitmap),
5219 ICE_NONDMA_TO_NONDMA);
5221 status = ICE_ERR_BAD_PTR;
5224 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5226 /* To differentiate among different UDP tunnels, a meta data ID
5230 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5231 buf[recps].content.mask[i] =
5232 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5236 rm->root_rid = (u8)rid;
5238 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5242 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5243 ice_release_change_lock(hw);
5247 /* Every recipe that just got created add it to the recipe
5250 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5251 struct ice_switch_info *sw = hw->switch_info;
5252 bool is_root, idx_found = false;
5253 struct ice_sw_recipe *recp;
5254 u16 idx, buf_idx = 0;
5256 /* find buffer index for copying some data */
5257 for (idx = 0; idx < rm->n_grp_count; idx++)
5258 if (buf[idx].recipe_indx == entry->rid) {
5264 status = ICE_ERR_OUT_OF_RANGE;
5268 recp = &sw->recp_list[entry->rid];
5269 is_root = (rm->root_rid == entry->rid);
5270 recp->is_root = is_root;
5272 recp->root_rid = entry->rid;
5273 recp->big_recp = (is_root && rm->n_grp_count > 1);
5275 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5276 entry->r_group.n_val_pairs *
5277 sizeof(struct ice_fv_word),
5278 ICE_NONDMA_TO_NONDMA);
5280 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5281 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5283 /* Copy non-result fv index values and masks to recipe. This
5284 * call will also update the result recipe bitmask.
5286 ice_collect_result_idx(&buf[buf_idx], recp);
5288 /* for non-root recipes, also copy to the root, this allows
5289 * easier matching of a complete chained recipe
5292 ice_collect_result_idx(&buf[buf_idx],
5293 &sw->recp_list[rm->root_rid]);
5295 recp->n_ext_words = entry->r_group.n_val_pairs;
5296 recp->chain_idx = entry->chain_idx;
5297 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5298 recp->n_grp_count = rm->n_grp_count;
5299 recp->tun_type = rm->tun_type;
5300 recp->recp_created = true;
5315 * ice_create_recipe_group - creates recipe group
5316 * @hw: pointer to hardware structure
5317 * @rm: recipe management list entry
5318 * @lkup_exts: lookup elements
5320 static enum ice_status
5321 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5322 struct ice_prot_lkup_ext *lkup_exts)
5324 enum ice_status status;
5327 rm->n_grp_count = 0;
5329 /* Create recipes for words that are marked not done by packing them
5332 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5333 &rm->rg_list, &recp_count);
5335 rm->n_grp_count += recp_count;
5336 rm->n_ext_words = lkup_exts->n_val_words;
5337 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5338 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5339 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5340 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5347 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5348 * @hw: pointer to hardware structure
5349 * @lkups: lookup elements or match criteria for the advanced recipe, one
5350 * structure per protocol header
5351 * @lkups_cnt: number of protocols
5352 * @bm: bitmap of field vectors to consider
5353 * @fv_list: pointer to a list that holds the returned field vectors
5355 static enum ice_status
5356 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5357 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5359 enum ice_status status;
5363 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5365 return ICE_ERR_NO_MEMORY;
5367 for (i = 0; i < lkups_cnt; i++)
5368 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5369 status = ICE_ERR_CFG;
5373 /* Find field vectors that include all specified protocol types */
5374 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5377 ice_free(hw, prot_ids);
5382 * ice_add_special_words - Add words that are not protocols, such as metadata
5383 * @rinfo: other information regarding the rule e.g. priority and action info
5384 * @lkup_exts: lookup word structure
5386 static enum ice_status
5387 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5388 struct ice_prot_lkup_ext *lkup_exts)
5390 /* If this is a tunneled packet, then add recipe index to match the
5391 * tunnel bit in the packet metadata flags.
5393 if (rinfo->tun_type != ICE_NON_TUN) {
5394 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5395 u8 word = lkup_exts->n_val_words++;
5397 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5398 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5400 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5402 return ICE_ERR_MAX_LIMIT;
5409 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5410 * @hw: pointer to hardware structure
5411 * @rinfo: other information regarding the rule e.g. priority and action info
5412 * @bm: pointer to memory for returning the bitmap of field vectors
5415 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5418 enum ice_prof_type type;
5420 switch (rinfo->tun_type) {
5422 type = ICE_PROF_NON_TUN;
5424 case ICE_ALL_TUNNELS:
5425 type = ICE_PROF_TUN_ALL;
5427 case ICE_SW_TUN_VXLAN_GPE:
5428 case ICE_SW_TUN_GENEVE:
5429 case ICE_SW_TUN_VXLAN:
5430 case ICE_SW_TUN_UDP:
5431 case ICE_SW_TUN_GTP:
5432 type = ICE_PROF_TUN_UDP;
5434 case ICE_SW_TUN_NVGRE:
5435 type = ICE_PROF_TUN_GRE;
5437 case ICE_SW_TUN_PPPOE:
5438 type = ICE_PROF_TUN_PPPOE;
5440 case ICE_SW_TUN_AND_NON_TUN:
5442 type = ICE_PROF_ALL;
5446 ice_get_sw_fv_bitmap(hw, type, bm);
5450 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5451 * @hw: pointer to hardware structure
5452 * @lkups: lookup elements or match criteria for the advanced recipe, one
5453 * structure per protocol header
5454 * @lkups_cnt: number of protocols
5455 * @rinfo: other information regarding the rule e.g. priority and action info
5456 * @rid: return the recipe ID of the recipe created
5458 static enum ice_status
5459 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5460 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5462 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5463 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5464 struct ice_prot_lkup_ext *lkup_exts;
5465 struct ice_recp_grp_entry *r_entry;
5466 struct ice_sw_fv_list_entry *fvit;
5467 struct ice_recp_grp_entry *r_tmp;
5468 struct ice_sw_fv_list_entry *tmp;
5469 enum ice_status status = ICE_SUCCESS;
5470 struct ice_sw_recipe *rm;
5471 bool match_tun = false;
5475 return ICE_ERR_PARAM;
5477 lkup_exts = (struct ice_prot_lkup_ext *)
5478 ice_malloc(hw, sizeof(*lkup_exts));
5480 return ICE_ERR_NO_MEMORY;
5482 /* Determine the number of words to be matched and if it exceeds a
5483 * recipe's restrictions
5485 for (i = 0; i < lkups_cnt; i++) {
5488 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5489 status = ICE_ERR_CFG;
5490 goto err_free_lkup_exts;
5493 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5495 status = ICE_ERR_CFG;
5496 goto err_free_lkup_exts;
5500 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5502 status = ICE_ERR_NO_MEMORY;
5503 goto err_free_lkup_exts;
5506 /* Get field vectors that contain fields extracted from all the protocol
5507 * headers being programmed.
5509 INIT_LIST_HEAD(&rm->fv_list);
5510 INIT_LIST_HEAD(&rm->rg_list);
5512 /* Get bitmap of field vectors (profiles) that are compatible with the
5513 * rule request; only these will be searched in the subsequent call to
5516 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5518 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5522 /* Group match words into recipes using preferred recipe grouping
5525 status = ice_create_recipe_group(hw, rm, lkup_exts);
5529 /* There is only profile for UDP tunnels. So, it is necessary to use a
5530 * metadata ID flag to differentiate different tunnel types. A separate
5531 * recipe needs to be used for the metadata.
5533 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5534 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5535 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5538 /* set the recipe priority if specified */
5539 rm->priority = rinfo->priority ? rinfo->priority : 0;
5541 /* Find offsets from the field vector. Pick the first one for all the
5544 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5548 /* get bitmap of all profiles the recipe will be associated with */
5549 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5550 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5552 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5553 ice_set_bit((u16)fvit->profile_id, profiles);
5556 /* Create any special protocol/offset pairs, such as looking at tunnel
5557 * bits by extracting metadata
5559 status = ice_add_special_words(rinfo, lkup_exts);
5561 goto err_free_lkup_exts;
5563 /* Look for a recipe which matches our requested fv / mask list */
5564 *rid = ice_find_recp(hw, lkup_exts);
5565 if (*rid < ICE_MAX_NUM_RECIPES)
5566 /* Success if found a recipe that match the existing criteria */
5569 /* Recipe we need does not exist, add a recipe */
5570 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5574 /* Associate all the recipes created with all the profiles in the
5575 * common field vector.
5577 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5579 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5582 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5583 (u8 *)r_bitmap, NULL);
5587 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5588 ICE_MAX_NUM_RECIPES);
5589 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5593 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5596 ice_release_change_lock(hw);
5601 /* Update profile to recipe bitmap array */
5602 ice_memcpy(profile_to_recipe[fvit->profile_id], rm->r_bitmap,
5603 sizeof(rm->r_bitmap), ICE_NONDMA_TO_NONDMA);
5605 /* Update recipe to profile bitmap array */
5606 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5607 if (ice_is_bit_set(rm->r_bitmap, j))
5608 ice_set_bit((u16)fvit->profile_id,
5609 recipe_to_profile[j]);
5612 *rid = rm->root_rid;
5613 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5614 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5616 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5617 ice_recp_grp_entry, l_entry) {
5618 LIST_DEL(&r_entry->l_entry);
5619 ice_free(hw, r_entry);
5622 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5624 LIST_DEL(&fvit->list_entry);
5629 ice_free(hw, rm->root_buf);
5634 ice_free(hw, lkup_exts);
5640 * ice_find_dummy_packet - find dummy packet by tunnel type
5642 * @lkups: lookup elements or match criteria for the advanced recipe, one
5643 * structure per protocol header
5644 * @lkups_cnt: number of protocols
5645 * @tun_type: tunnel type from the match criteria
5646 * @pkt: dummy packet to fill according to filter match criteria
5647 * @pkt_len: packet length of dummy packet
5648 * @offsets: pointer to receive the pointer to the offsets for the packet
5651 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5652 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5654 const struct ice_dummy_pkt_offsets **offsets)
5656 bool tcp = false, udp = false, ipv6 = false;
5659 if (tun_type == ICE_SW_TUN_GTP) {
5660 *pkt = dummy_udp_gtp_packet;
5661 *pkt_len = sizeof(dummy_udp_gtp_packet);
5662 *offsets = dummy_udp_gtp_packet_offsets;
5665 if (tun_type == ICE_SW_TUN_PPPOE) {
5666 *pkt = dummy_pppoe_packet;
5667 *pkt_len = sizeof(dummy_pppoe_packet);
5668 *offsets = dummy_pppoe_packet_offsets;
5671 for (i = 0; i < lkups_cnt; i++) {
5672 if (lkups[i].type == ICE_UDP_ILOS)
5674 else if (lkups[i].type == ICE_TCP_IL)
5676 else if (lkups[i].type == ICE_IPV6_OFOS)
5680 if (tun_type == ICE_ALL_TUNNELS) {
5681 *pkt = dummy_gre_udp_packet;
5682 *pkt_len = sizeof(dummy_gre_udp_packet);
5683 *offsets = dummy_gre_udp_packet_offsets;
5687 if (tun_type == ICE_SW_TUN_NVGRE) {
5689 *pkt = dummy_gre_tcp_packet;
5690 *pkt_len = sizeof(dummy_gre_tcp_packet);
5691 *offsets = dummy_gre_tcp_packet_offsets;
5695 *pkt = dummy_gre_udp_packet;
5696 *pkt_len = sizeof(dummy_gre_udp_packet);
5697 *offsets = dummy_gre_udp_packet_offsets;
5701 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5702 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5704 *pkt = dummy_udp_tun_tcp_packet;
5705 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5706 *offsets = dummy_udp_tun_tcp_packet_offsets;
5710 *pkt = dummy_udp_tun_udp_packet;
5711 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5712 *offsets = dummy_udp_tun_udp_packet_offsets;
5717 *pkt = dummy_udp_packet;
5718 *pkt_len = sizeof(dummy_udp_packet);
5719 *offsets = dummy_udp_packet_offsets;
5721 } else if (udp && ipv6) {
5722 *pkt = dummy_udp_ipv6_packet;
5723 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5724 *offsets = dummy_udp_ipv6_packet_offsets;
5726 } else if ((tcp && ipv6) || ipv6) {
5727 *pkt = dummy_tcp_ipv6_packet;
5728 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5729 *offsets = dummy_tcp_ipv6_packet_offsets;
5733 *pkt = dummy_tcp_packet;
5734 *pkt_len = sizeof(dummy_tcp_packet);
5735 *offsets = dummy_tcp_packet_offsets;
5739 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5741 * @lkups: lookup elements or match criteria for the advanced recipe, one
5742 * structure per protocol header
5743 * @lkups_cnt: number of protocols
5744 * @s_rule: stores rule information from the match criteria
5745 * @dummy_pkt: dummy packet to fill according to filter match criteria
5746 * @pkt_len: packet length of dummy packet
5747 * @offsets: offset info for the dummy packet
5749 static enum ice_status
5750 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5751 struct ice_aqc_sw_rules_elem *s_rule,
5752 const u8 *dummy_pkt, u16 pkt_len,
5753 const struct ice_dummy_pkt_offsets *offsets)
5758 /* Start with a packet with a pre-defined/dummy content. Then, fill
5759 * in the header values to be looked up or matched.
5761 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5763 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5765 for (i = 0; i < lkups_cnt; i++) {
5766 enum ice_protocol_type type;
5767 u16 offset = 0, len = 0, j;
5770 /* find the start of this layer; it should be found since this
5771 * was already checked when search for the dummy packet
5773 type = lkups[i].type;
5774 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5775 if (type == offsets[j].type) {
5776 offset = offsets[j].offset;
5781 /* this should never happen in a correct calling sequence */
5783 return ICE_ERR_PARAM;
5785 switch (lkups[i].type) {
5788 len = sizeof(struct ice_ether_hdr);
5791 len = sizeof(struct ice_ethtype_hdr);
5795 len = sizeof(struct ice_ipv4_hdr);
5799 len = sizeof(struct ice_ipv6_hdr);
5804 len = sizeof(struct ice_l4_hdr);
5807 len = sizeof(struct ice_sctp_hdr);
5810 len = sizeof(struct ice_nvgre);
5815 len = sizeof(struct ice_udp_tnl_hdr);
5819 len = sizeof(struct ice_udp_gtp_hdr);
5822 return ICE_ERR_PARAM;
5825 /* the length should be a word multiple */
5826 if (len % ICE_BYTES_PER_WORD)
5829 /* We have the offset to the header start, the length, the
5830 * caller's header values and mask. Use this information to
5831 * copy the data into the dummy packet appropriately based on
5832 * the mask. Note that we need to only write the bits as
5833 * indicated by the mask to make sure we don't improperly write
5834 * over any significant packet data.
5836 for (j = 0; j < len / sizeof(u16); j++)
5837 if (((u16 *)&lkups[i].m_u)[j])
5838 ((u16 *)(pkt + offset))[j] =
5839 (((u16 *)(pkt + offset))[j] &
5840 ~((u16 *)&lkups[i].m_u)[j]) |
5841 (((u16 *)&lkups[i].h_u)[j] &
5842 ((u16 *)&lkups[i].m_u)[j]);
5845 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5851 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5852 * @hw: pointer to the hardware structure
5853 * @tun_type: tunnel type
5854 * @pkt: dummy packet to fill in
5855 * @offsets: offset info for the dummy packet
5857 static enum ice_status
5858 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5859 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5864 case ICE_SW_TUN_AND_NON_TUN:
5865 case ICE_SW_TUN_VXLAN_GPE:
5866 case ICE_SW_TUN_VXLAN:
5867 case ICE_SW_TUN_UDP:
5868 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
5872 case ICE_SW_TUN_GENEVE:
5873 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
5878 /* Nothing needs to be done for this tunnel type */
5882 /* Find the outer UDP protocol header and insert the port number */
5883 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5884 if (offsets[i].type == ICE_UDP_OF) {
5885 struct ice_l4_hdr *hdr;
5888 offset = offsets[i].offset;
5889 hdr = (struct ice_l4_hdr *)&pkt[offset];
5890 hdr->dst_port = open_port << 8 | open_port >> 8;
5900 * ice_find_adv_rule_entry - Search a rule entry
5901 * @hw: pointer to the hardware structure
5902 * @lkups: lookup elements or match criteria for the advanced recipe, one
5903 * structure per protocol header
5904 * @lkups_cnt: number of protocols
5905 * @recp_id: recipe ID for which we are finding the rule
5906 * @rinfo: other information regarding the rule e.g. priority and action info
5908 * Helper function to search for a given advance rule entry
5909 * Returns pointer to entry storing the rule if found
5911 static struct ice_adv_fltr_mgmt_list_entry *
5912 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5913 u16 lkups_cnt, u8 recp_id,
5914 struct ice_adv_rule_info *rinfo)
5916 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5917 struct ice_switch_info *sw = hw->switch_info;
5920 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5921 ice_adv_fltr_mgmt_list_entry, list_entry) {
5922 bool lkups_matched = true;
5924 if (lkups_cnt != list_itr->lkups_cnt)
5926 for (i = 0; i < list_itr->lkups_cnt; i++)
5927 if (memcmp(&list_itr->lkups[i], &lkups[i],
5929 lkups_matched = false;
5932 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5933 rinfo->tun_type == list_itr->rule_info.tun_type &&
5941 * ice_adv_add_update_vsi_list
5942 * @hw: pointer to the hardware structure
5943 * @m_entry: pointer to current adv filter management list entry
5944 * @cur_fltr: filter information from the book keeping entry
5945 * @new_fltr: filter information with the new VSI to be added
5947 * Call AQ command to add or update previously created VSI list with new VSI.
5949 * Helper function to do book keeping associated with adding filter information
5950 * The algorithm to do the booking keeping is described below :
5951 * When a VSI needs to subscribe to a given advanced filter
5952 * if only one VSI has been added till now
5953 * Allocate a new VSI list and add two VSIs
5954 * to this list using switch rule command
5955 * Update the previously created switch rule with the
5956 * newly created VSI list ID
5957 * if a VSI list was previously created
5958 * Add the new VSI to the previously created VSI list set
5959 * using the update switch rule command
5961 static enum ice_status
5962 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5963 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5964 struct ice_adv_rule_info *cur_fltr,
5965 struct ice_adv_rule_info *new_fltr)
5967 enum ice_status status;
5968 u16 vsi_list_id = 0;
5970 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5971 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5972 return ICE_ERR_NOT_IMPL;
5974 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5975 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5976 return ICE_ERR_ALREADY_EXISTS;
5978 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5979 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5980 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5981 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5982 return ICE_ERR_NOT_IMPL;
5984 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5985 /* Only one entry existed in the mapping and it was not already
5986 * a part of a VSI list. So, create a VSI list with the old and
5989 struct ice_fltr_info tmp_fltr;
5990 u16 vsi_handle_arr[2];
5992 /* A rule already exists with the new VSI being added */
5993 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5994 new_fltr->sw_act.fwd_id.hw_vsi_id)
5995 return ICE_ERR_ALREADY_EXISTS;
5997 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5998 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5999 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6005 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6006 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6007 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6008 /* Update the previous switch rule of "forward to VSI" to
6011 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6015 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6016 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6017 m_entry->vsi_list_info =
6018 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6021 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6023 if (!m_entry->vsi_list_info)
6026 /* A rule already exists with the new VSI being added */
6027 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6030 /* Update the previously created VSI list set with
6031 * the new VSI ID passed in
6033 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6035 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6037 ice_aqc_opc_update_sw_rules,
6039 /* update VSI list mapping info with new VSI ID */
6041 ice_set_bit(vsi_handle,
6042 m_entry->vsi_list_info->vsi_map);
6045 m_entry->vsi_count++;
6050 * ice_add_adv_rule - helper function to create an advanced switch rule
6051 * @hw: pointer to the hardware structure
6052 * @lkups: information on the words that needs to be looked up. All words
6053 * together makes one recipe
6054 * @lkups_cnt: num of entries in the lkups array
6055 * @rinfo: other information related to the rule that needs to be programmed
6056 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6057 * ignored is case of error.
6059 * This function can program only 1 rule at a time. The lkups is used to
6060 * describe the all the words that forms the "lookup" portion of the recipe.
6061 * These words can span multiple protocols. Callers to this function need to
6062 * pass in a list of protocol headers with lookup information along and mask
6063 * that determines which words are valid from the given protocol header.
6064 * rinfo describes other information related to this rule such as forwarding
6065 * IDs, priority of this rule, etc.
6068 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6069 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6070 struct ice_rule_query_data *added_entry)
6072 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6073 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6074 const struct ice_dummy_pkt_offsets *pkt_offsets;
6075 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6076 struct LIST_HEAD_TYPE *rule_head;
6077 struct ice_switch_info *sw;
6078 enum ice_status status;
6079 const u8 *pkt = NULL;
6085 return ICE_ERR_PARAM;
6087 /* get # of words we need to match */
6089 for (i = 0; i < lkups_cnt; i++) {
6092 ptr = (u16 *)&lkups[i].m_u;
6093 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6097 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6098 return ICE_ERR_PARAM;
6100 /* make sure that we can locate a dummy packet */
6101 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6104 status = ICE_ERR_PARAM;
6105 goto err_ice_add_adv_rule;
6108 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6109 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6110 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6111 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6114 vsi_handle = rinfo->sw_act.vsi_handle;
6115 if (!ice_is_vsi_valid(hw, vsi_handle))
6116 return ICE_ERR_PARAM;
6118 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6119 rinfo->sw_act.fwd_id.hw_vsi_id =
6120 ice_get_hw_vsi_num(hw, vsi_handle);
6121 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6122 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6124 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6127 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6129 /* we have to add VSI to VSI_LIST and increment vsi_count.
6130 * Also Update VSI list so that we can change forwarding rule
6131 * if the rule already exists, we will check if it exists with
6132 * same vsi_id, if not then add it to the VSI list if it already
6133 * exists if not then create a VSI list and add the existing VSI
6134 * ID and the new VSI ID to the list
6135 * We will add that VSI to the list
6137 status = ice_adv_add_update_vsi_list(hw, m_entry,
6138 &m_entry->rule_info,
6141 added_entry->rid = rid;
6142 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6143 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6147 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6148 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6150 return ICE_ERR_NO_MEMORY;
6151 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6152 switch (rinfo->sw_act.fltr_act) {
6153 case ICE_FWD_TO_VSI:
6154 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6155 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6156 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6159 act |= ICE_SINGLE_ACT_TO_Q;
6160 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6161 ICE_SINGLE_ACT_Q_INDEX_M;
6163 case ICE_FWD_TO_QGRP:
6164 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6165 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6166 act |= ICE_SINGLE_ACT_TO_Q;
6167 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6168 ICE_SINGLE_ACT_Q_INDEX_M;
6169 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6170 ICE_SINGLE_ACT_Q_REGION_M;
6172 case ICE_DROP_PACKET:
6173 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6174 ICE_SINGLE_ACT_VALID_BIT;
6177 status = ICE_ERR_CFG;
6178 goto err_ice_add_adv_rule;
6181 /* set the rule LOOKUP type based on caller specified 'RX'
6182 * instead of hardcoding it to be either LOOKUP_TX/RX
6184 * for 'RX' set the source to be the port number
6185 * for 'TX' set the source to be the source HW VSI number (determined
6189 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6190 s_rule->pdata.lkup_tx_rx.src =
6191 CPU_TO_LE16(hw->port_info->lport);
6193 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6194 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6197 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6198 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6200 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
6203 if (rinfo->tun_type != ICE_NON_TUN) {
6204 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6205 s_rule->pdata.lkup_tx_rx.hdr,
6208 goto err_ice_add_adv_rule;
6211 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6212 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6215 goto err_ice_add_adv_rule;
6216 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6217 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6219 status = ICE_ERR_NO_MEMORY;
6220 goto err_ice_add_adv_rule;
6223 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6224 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6225 ICE_NONDMA_TO_NONDMA);
6226 if (!adv_fltr->lkups) {
6227 status = ICE_ERR_NO_MEMORY;
6228 goto err_ice_add_adv_rule;
6231 adv_fltr->lkups_cnt = lkups_cnt;
6232 adv_fltr->rule_info = *rinfo;
6233 adv_fltr->rule_info.fltr_rule_id =
6234 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6235 sw = hw->switch_info;
6236 sw->recp_list[rid].adv_rule = true;
6237 rule_head = &sw->recp_list[rid].filt_rules;
6239 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6240 struct ice_fltr_info tmp_fltr;
6242 tmp_fltr.fltr_rule_id =
6243 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6244 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6245 tmp_fltr.fwd_id.hw_vsi_id =
6246 ice_get_hw_vsi_num(hw, vsi_handle);
6247 tmp_fltr.vsi_handle = vsi_handle;
6248 /* Update the previous switch rule of "forward to VSI" to
6251 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6253 goto err_ice_add_adv_rule;
6254 adv_fltr->vsi_count = 1;
6257 /* Add rule entry to book keeping list */
6258 LIST_ADD(&adv_fltr->list_entry, rule_head);
6260 added_entry->rid = rid;
6261 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6262 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6264 err_ice_add_adv_rule:
6265 if (status && adv_fltr) {
6266 ice_free(hw, adv_fltr->lkups);
6267 ice_free(hw, adv_fltr);
6270 ice_free(hw, s_rule);
6276 * ice_adv_rem_update_vsi_list
6277 * @hw: pointer to the hardware structure
6278 * @vsi_handle: VSI handle of the VSI to remove
6279 * @fm_list: filter management entry for which the VSI list management needs to
6282 static enum ice_status
6283 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6284 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6286 struct ice_vsi_list_map_info *vsi_list_info;
6287 enum ice_sw_lkup_type lkup_type;
6288 enum ice_status status;
6291 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6292 fm_list->vsi_count == 0)
6293 return ICE_ERR_PARAM;
6295 /* A rule with the VSI being removed does not exist */
6296 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6297 return ICE_ERR_DOES_NOT_EXIST;
6299 lkup_type = ICE_SW_LKUP_LAST;
6300 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6301 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6302 ice_aqc_opc_update_sw_rules,
6307 fm_list->vsi_count--;
6308 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6309 vsi_list_info = fm_list->vsi_list_info;
6310 if (fm_list->vsi_count == 1) {
6311 struct ice_fltr_info tmp_fltr;
6314 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6316 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6317 return ICE_ERR_OUT_OF_RANGE;
6319 /* Make sure VSI list is empty before removing it below */
6320 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6322 ice_aqc_opc_update_sw_rules,
6326 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6327 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6328 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6329 tmp_fltr.fwd_id.hw_vsi_id =
6330 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6331 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6332 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6334 /* Update the previous switch rule of "MAC forward to VSI" to
6335 * "MAC fwd to VSI list"
6337 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6339 ice_debug(hw, ICE_DBG_SW,
6340 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6341 tmp_fltr.fwd_id.hw_vsi_id, status);
6345 /* Remove the VSI list since it is no longer used */
6346 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6348 ice_debug(hw, ICE_DBG_SW,
6349 "Failed to remove VSI list %d, error %d\n",
6350 vsi_list_id, status);
6354 LIST_DEL(&vsi_list_info->list_entry);
6355 ice_free(hw, vsi_list_info);
6356 fm_list->vsi_list_info = NULL;
6363 * ice_rem_adv_rule - removes existing advanced switch rule
6364 * @hw: pointer to the hardware structure
6365 * @lkups: information on the words that needs to be looked up. All words
6366 * together makes one recipe
6367 * @lkups_cnt: num of entries in the lkups array
6368 * @rinfo: Its the pointer to the rule information for the rule
6370 * This function can be used to remove 1 rule at a time. The lkups is
6371 * used to describe all the words that forms the "lookup" portion of the
6372 * rule. These words can span multiple protocols. Callers to this function
6373 * need to pass in a list of protocol headers with lookup information along
6374 * and mask that determines which words are valid from the given protocol
6375 * header. rinfo describes other information related to this rule such as
6376 * forwarding IDs, priority of this rule, etc.
6379 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6380 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6382 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6383 struct ice_prot_lkup_ext lkup_exts;
6384 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6385 enum ice_status status = ICE_SUCCESS;
6386 bool remove_rule = false;
6387 u16 i, rid, vsi_handle;
6389 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6390 for (i = 0; i < lkups_cnt; i++) {
6393 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6396 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6401 /* Create any special protocol/offset pairs, such as looking at tunnel
6402 * bits by extracting metadata
6404 status = ice_add_special_words(rinfo, &lkup_exts);
6408 rid = ice_find_recp(hw, &lkup_exts);
6409 /* If did not find a recipe that match the existing criteria */
6410 if (rid == ICE_MAX_NUM_RECIPES)
6411 return ICE_ERR_PARAM;
6413 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6414 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6415 /* the rule is already removed */
6418 ice_acquire_lock(rule_lock);
6419 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6421 } else if (list_elem->vsi_count > 1) {
6422 list_elem->vsi_list_info->ref_cnt--;
6423 remove_rule = false;
6424 vsi_handle = rinfo->sw_act.vsi_handle;
6425 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6427 vsi_handle = rinfo->sw_act.vsi_handle;
6428 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6430 ice_release_lock(rule_lock);
6433 if (list_elem->vsi_count == 0)
6436 ice_release_lock(rule_lock);
6438 struct ice_aqc_sw_rules_elem *s_rule;
6441 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6443 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6446 return ICE_ERR_NO_MEMORY;
6447 s_rule->pdata.lkup_tx_rx.act = 0;
6448 s_rule->pdata.lkup_tx_rx.index =
6449 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6450 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6451 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6453 ice_aqc_opc_remove_sw_rules, NULL);
6454 if (status == ICE_SUCCESS) {
6455 ice_acquire_lock(rule_lock);
6456 LIST_DEL(&list_elem->list_entry);
6457 ice_free(hw, list_elem->lkups);
6458 ice_free(hw, list_elem);
6459 ice_release_lock(rule_lock);
6461 ice_free(hw, s_rule);
6467 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6468 * @hw: pointer to the hardware structure
6469 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6471 * This function is used to remove 1 rule at a time. The removal is based on
6472 * the remove_entry parameter. This function will remove rule for a given
6473 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6476 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6477 struct ice_rule_query_data *remove_entry)
6479 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6480 struct LIST_HEAD_TYPE *list_head;
6481 struct ice_adv_rule_info rinfo;
6482 struct ice_switch_info *sw;
6484 sw = hw->switch_info;
6485 if (!sw->recp_list[remove_entry->rid].recp_created)
6486 return ICE_ERR_PARAM;
6487 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6488 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6490 if (list_itr->rule_info.fltr_rule_id ==
6491 remove_entry->rule_id) {
6492 rinfo = list_itr->rule_info;
6493 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6494 return ice_rem_adv_rule(hw, list_itr->lkups,
6495 list_itr->lkups_cnt, &rinfo);
6498 return ICE_ERR_PARAM;
6502 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6504 * @hw: pointer to the hardware structure
6505 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6507 * This function is used to remove all the rules for a given VSI and as soon
6508 * as removing a rule fails, it will return immediately with the error code,
6509 * else it will return ICE_SUCCESS
6512 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6514 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6515 struct ice_vsi_list_map_info *map_info;
6516 struct LIST_HEAD_TYPE *list_head;
6517 struct ice_adv_rule_info rinfo;
6518 struct ice_switch_info *sw;
6519 enum ice_status status;
6520 u16 vsi_list_id = 0;
6523 sw = hw->switch_info;
6524 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6525 if (!sw->recp_list[rid].recp_created)
6527 if (!sw->recp_list[rid].adv_rule)
6529 list_head = &sw->recp_list[rid].filt_rules;
6531 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6532 ice_adv_fltr_mgmt_list_entry, list_entry) {
6533 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6537 rinfo = list_itr->rule_info;
6538 rinfo.sw_act.vsi_handle = vsi_handle;
6539 status = ice_rem_adv_rule(hw, list_itr->lkups,
6540 list_itr->lkups_cnt, &rinfo);
6550 * ice_replay_fltr - Replay all the filters stored by a specific list head
6551 * @hw: pointer to the hardware structure
6552 * @list_head: list for which filters needs to be replayed
6553 * @recp_id: Recipe ID for which rules need to be replayed
6555 static enum ice_status
6556 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6558 struct ice_fltr_mgmt_list_entry *itr;
6559 struct LIST_HEAD_TYPE l_head;
6560 enum ice_status status = ICE_SUCCESS;
6562 if (LIST_EMPTY(list_head))
6565 /* Move entries from the given list_head to a temporary l_head so that
6566 * they can be replayed. Otherwise when trying to re-add the same
6567 * filter, the function will return already exists
6569 LIST_REPLACE_INIT(list_head, &l_head);
6571 /* Mark the given list_head empty by reinitializing it so filters
6572 * could be added again by *handler
6574 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6576 struct ice_fltr_list_entry f_entry;
6578 f_entry.fltr_info = itr->fltr_info;
6579 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6580 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6581 if (status != ICE_SUCCESS)
6586 /* Add a filter per VSI separately */
6591 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6593 if (!ice_is_vsi_valid(hw, vsi_handle))
6596 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6597 f_entry.fltr_info.vsi_handle = vsi_handle;
6598 f_entry.fltr_info.fwd_id.hw_vsi_id =
6599 ice_get_hw_vsi_num(hw, vsi_handle);
6600 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6601 if (recp_id == ICE_SW_LKUP_VLAN)
6602 status = ice_add_vlan_internal(hw, &f_entry);
6604 status = ice_add_rule_internal(hw, recp_id,
6606 if (status != ICE_SUCCESS)
6611 /* Clear the filter management list */
6612 ice_rem_sw_rule_info(hw, &l_head);
6617 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6618 * @hw: pointer to the hardware structure
6620 * NOTE: This function does not clean up partially added filters on error.
6621 * It is up to caller of the function to issue a reset or fail early.
6623 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6625 struct ice_switch_info *sw = hw->switch_info;
6626 enum ice_status status = ICE_SUCCESS;
6629 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6630 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6632 status = ice_replay_fltr(hw, i, head);
6633 if (status != ICE_SUCCESS)
6640 * ice_replay_vsi_fltr - Replay filters for requested VSI
6641 * @hw: pointer to the hardware structure
6642 * @vsi_handle: driver VSI handle
6643 * @recp_id: Recipe ID for which rules need to be replayed
6644 * @list_head: list for which filters need to be replayed
6646 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6647 * It is required to pass valid VSI handle.
6649 static enum ice_status
6650 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6651 struct LIST_HEAD_TYPE *list_head)
6653 struct ice_fltr_mgmt_list_entry *itr;
6654 enum ice_status status = ICE_SUCCESS;
6657 if (LIST_EMPTY(list_head))
6659 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6661 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6663 struct ice_fltr_list_entry f_entry;
6665 f_entry.fltr_info = itr->fltr_info;
6666 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6667 itr->fltr_info.vsi_handle == vsi_handle) {
6668 /* update the src in case it is VSI num */
6669 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6670 f_entry.fltr_info.src = hw_vsi_id;
6671 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6672 if (status != ICE_SUCCESS)
6676 if (!itr->vsi_list_info ||
6677 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6679 /* Clearing it so that the logic can add it back */
6680 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6681 f_entry.fltr_info.vsi_handle = vsi_handle;
6682 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6683 /* update the src in case it is VSI num */
6684 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6685 f_entry.fltr_info.src = hw_vsi_id;
6686 if (recp_id == ICE_SW_LKUP_VLAN)
6687 status = ice_add_vlan_internal(hw, &f_entry);
6689 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6690 if (status != ICE_SUCCESS)
6698 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6699 * @hw: pointer to the hardware structure
6700 * @vsi_handle: driver VSI handle
6701 * @list_head: list for which filters need to be replayed
6703 * Replay the advanced rule for the given VSI.
6705 static enum ice_status
6706 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6707 struct LIST_HEAD_TYPE *list_head)
6709 struct ice_rule_query_data added_entry = { 0 };
6710 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6711 enum ice_status status = ICE_SUCCESS;
6713 if (LIST_EMPTY(list_head))
6715 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6717 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6718 u16 lk_cnt = adv_fltr->lkups_cnt;
6720 if (vsi_handle != rinfo->sw_act.vsi_handle)
6722 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6731 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6732 * @hw: pointer to the hardware structure
6733 * @vsi_handle: driver VSI handle
6735 * Replays filters for requested VSI via vsi_handle.
6737 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6739 struct ice_switch_info *sw = hw->switch_info;
6740 enum ice_status status;
6743 /* Update the recipes that were created */
6744 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6745 struct LIST_HEAD_TYPE *head;
6747 head = &sw->recp_list[i].filt_replay_rules;
6748 if (!sw->recp_list[i].adv_rule)
6749 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6751 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6752 if (status != ICE_SUCCESS)
6760 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6761 * @hw: pointer to the HW struct
6763 * Deletes the filter replay rules.
6765 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6767 struct ice_switch_info *sw = hw->switch_info;
6773 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6774 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6775 struct LIST_HEAD_TYPE *l_head;
6777 l_head = &sw->recp_list[i].filt_replay_rules;
6778 if (!sw->recp_list[i].adv_rule)
6779 ice_rem_sw_rule_info(hw, l_head);
6781 ice_rem_adv_rule_info(hw, l_head);