1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
12 static __checkReturn efx_rc_t
13 efx_mae_get_capabilities(
17 EFX_MCDI_DECLARE_BUF(payload,
18 MC_CMD_MAE_GET_CAPS_IN_LEN,
19 MC_CMD_MAE_GET_CAPS_OUT_LEN);
20 struct efx_mae_s *maep = enp->en_maep;
23 req.emr_cmd = MC_CMD_MAE_GET_CAPS;
24 req.emr_in_buf = payload;
25 req.emr_in_length = MC_CMD_MAE_GET_CAPS_IN_LEN;
26 req.emr_out_buf = payload;
27 req.emr_out_length = MC_CMD_MAE_GET_CAPS_OUT_LEN;
29 efx_mcdi_execute(enp, &req);
31 if (req.emr_rc != 0) {
36 if (req.emr_out_length_used < MC_CMD_MAE_GET_CAPS_OUT_LEN) {
41 maep->em_max_n_outer_prios =
42 MCDI_OUT_DWORD(req, MAE_GET_CAPS_OUT_OUTER_PRIOS);
44 maep->em_max_n_action_prios =
45 MCDI_OUT_DWORD(req, MAE_GET_CAPS_OUT_ACTION_PRIOS);
47 maep->em_encap_types_supported = 0;
49 if (MCDI_OUT_DWORD_FIELD(req, MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED,
50 MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN) != 0) {
51 maep->em_encap_types_supported |=
52 (1U << EFX_TUNNEL_PROTOCOL_VXLAN);
55 if (MCDI_OUT_DWORD_FIELD(req, MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED,
56 MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE) != 0) {
57 maep->em_encap_types_supported |=
58 (1U << EFX_TUNNEL_PROTOCOL_GENEVE);
61 if (MCDI_OUT_DWORD_FIELD(req, MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED,
62 MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE) != 0) {
63 maep->em_encap_types_supported |=
64 (1U << EFX_TUNNEL_PROTOCOL_NVGRE);
67 maep->em_max_nfields =
68 MCDI_OUT_DWORD(req, MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT);
70 maep->em_max_ncounters =
71 MCDI_OUT_DWORD(req, MAE_GET_CAPS_OUT_COUNTERS);
78 EFSYS_PROBE1(fail1, efx_rc_t, rc);
82 static __checkReturn efx_rc_t
83 efx_mae_get_outer_rule_caps(
85 __in unsigned int field_ncaps,
86 __out_ecount(field_ncaps) efx_mae_field_cap_t *field_caps)
89 EFX_MCDI_DECLARE_BUF(payload,
90 MC_CMD_MAE_GET_OR_CAPS_IN_LEN,
91 MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX_MCDI2);
92 unsigned int mcdi_field_ncaps;
96 if (MC_CMD_MAE_GET_OR_CAPS_OUT_LEN(field_ncaps) >
97 MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX_MCDI2) {
102 req.emr_cmd = MC_CMD_MAE_GET_OR_CAPS;
103 req.emr_in_buf = payload;
104 req.emr_in_length = MC_CMD_MAE_GET_OR_CAPS_IN_LEN;
105 req.emr_out_buf = payload;
106 req.emr_out_length = MC_CMD_MAE_GET_OR_CAPS_OUT_LEN(field_ncaps);
108 efx_mcdi_execute(enp, &req);
110 if (req.emr_rc != 0) {
115 if (req.emr_out_length_used < MC_CMD_MAE_GET_OR_CAPS_OUT_LENMIN) {
120 mcdi_field_ncaps = MCDI_OUT_DWORD(req, MAE_GET_OR_CAPS_OUT_COUNT);
122 if (req.emr_out_length_used <
123 MC_CMD_MAE_GET_OR_CAPS_OUT_LEN(mcdi_field_ncaps)) {
128 if (mcdi_field_ncaps > field_ncaps) {
133 for (i = 0; i < mcdi_field_ncaps; ++i) {
137 field_caps[i].emfc_support = MCDI_OUT_INDEXED_DWORD_FIELD(req,
138 MAE_GET_OR_CAPS_OUT_FIELD_FLAGS, i,
139 MAE_FIELD_FLAGS_SUPPORT_STATUS);
141 match_flag = MCDI_OUT_INDEXED_DWORD_FIELD(req,
142 MAE_GET_OR_CAPS_OUT_FIELD_FLAGS, i,
143 MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS);
145 field_caps[i].emfc_match_affects_class =
146 (match_flag != 0) ? B_TRUE : B_FALSE;
148 mask_flag = MCDI_OUT_INDEXED_DWORD_FIELD(req,
149 MAE_GET_OR_CAPS_OUT_FIELD_FLAGS, i,
150 MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS);
152 field_caps[i].emfc_mask_affects_class =
153 (mask_flag != 0) ? B_TRUE : B_FALSE;
167 EFSYS_PROBE1(fail1, efx_rc_t, rc);
171 static __checkReturn efx_rc_t
172 efx_mae_get_action_rule_caps(
174 __in unsigned int field_ncaps,
175 __out_ecount(field_ncaps) efx_mae_field_cap_t *field_caps)
178 EFX_MCDI_DECLARE_BUF(payload,
179 MC_CMD_MAE_GET_AR_CAPS_IN_LEN,
180 MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX_MCDI2);
181 unsigned int mcdi_field_ncaps;
185 if (MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(field_ncaps) >
186 MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX_MCDI2) {
191 req.emr_cmd = MC_CMD_MAE_GET_AR_CAPS;
192 req.emr_in_buf = payload;
193 req.emr_in_length = MC_CMD_MAE_GET_AR_CAPS_IN_LEN;
194 req.emr_out_buf = payload;
195 req.emr_out_length = MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(field_ncaps);
197 efx_mcdi_execute(enp, &req);
199 if (req.emr_rc != 0) {
204 if (req.emr_out_length_used < MC_CMD_MAE_GET_AR_CAPS_OUT_LENMIN) {
209 mcdi_field_ncaps = MCDI_OUT_DWORD(req, MAE_GET_AR_CAPS_OUT_COUNT);
211 if (req.emr_out_length_used <
212 MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(mcdi_field_ncaps)) {
217 if (mcdi_field_ncaps > field_ncaps) {
222 for (i = 0; i < mcdi_field_ncaps; ++i) {
226 field_caps[i].emfc_support = MCDI_OUT_INDEXED_DWORD_FIELD(req,
227 MAE_GET_AR_CAPS_OUT_FIELD_FLAGS, i,
228 MAE_FIELD_FLAGS_SUPPORT_STATUS);
230 match_flag = MCDI_OUT_INDEXED_DWORD_FIELD(req,
231 MAE_GET_AR_CAPS_OUT_FIELD_FLAGS, i,
232 MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS);
234 field_caps[i].emfc_match_affects_class =
235 (match_flag != 0) ? B_TRUE : B_FALSE;
237 mask_flag = MCDI_OUT_INDEXED_DWORD_FIELD(req,
238 MAE_GET_AR_CAPS_OUT_FIELD_FLAGS, i,
239 MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS);
241 field_caps[i].emfc_mask_affects_class =
242 (mask_flag != 0) ? B_TRUE : B_FALSE;
256 EFSYS_PROBE1(fail1, efx_rc_t, rc);
260 __checkReturn efx_rc_t
264 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
265 efx_mae_field_cap_t *or_fcaps;
266 size_t or_fcaps_size;
267 efx_mae_field_cap_t *ar_fcaps;
268 size_t ar_fcaps_size;
272 if (encp->enc_mae_supported == B_FALSE) {
277 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*maep), maep);
285 rc = efx_mae_get_capabilities(enp);
289 or_fcaps_size = maep->em_max_nfields * sizeof (*or_fcaps);
290 EFSYS_KMEM_ALLOC(enp->en_esip, or_fcaps_size, or_fcaps);
291 if (or_fcaps == NULL) {
296 maep->em_outer_rule_field_caps_size = or_fcaps_size;
297 maep->em_outer_rule_field_caps = or_fcaps;
299 rc = efx_mae_get_outer_rule_caps(enp, maep->em_max_nfields, or_fcaps);
303 ar_fcaps_size = maep->em_max_nfields * sizeof (*ar_fcaps);
304 EFSYS_KMEM_ALLOC(enp->en_esip, ar_fcaps_size, ar_fcaps);
305 if (ar_fcaps == NULL) {
310 maep->em_action_rule_field_caps_size = ar_fcaps_size;
311 maep->em_action_rule_field_caps = ar_fcaps;
313 rc = efx_mae_get_action_rule_caps(enp, maep->em_max_nfields, ar_fcaps);
321 EFSYS_KMEM_FREE(enp->en_esip, ar_fcaps_size, ar_fcaps);
326 EFSYS_KMEM_FREE(enp->en_esip, or_fcaps_size, or_fcaps);
331 EFSYS_KMEM_FREE(enp->en_esip, sizeof (struct efx_mae_s), enp->en_maep);
336 EFSYS_PROBE1(fail1, efx_rc_t, rc);
344 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
345 efx_mae_t *maep = enp->en_maep;
347 if (encp->enc_mae_supported == B_FALSE)
350 EFSYS_KMEM_FREE(enp->en_esip, maep->em_action_rule_field_caps_size,
351 maep->em_action_rule_field_caps);
352 EFSYS_KMEM_FREE(enp->en_esip, maep->em_outer_rule_field_caps_size,
353 maep->em_outer_rule_field_caps);
354 EFSYS_KMEM_FREE(enp->en_esip, sizeof (*maep), maep);
358 __checkReturn efx_rc_t
361 __out efx_mae_limits_t *emlp)
363 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
364 struct efx_mae_s *maep = enp->en_maep;
367 if (encp->enc_mae_supported == B_FALSE) {
372 emlp->eml_max_n_outer_prios = maep->em_max_n_outer_prios;
373 emlp->eml_max_n_action_prios = maep->em_max_n_action_prios;
374 emlp->eml_encap_types_supported = maep->em_encap_types_supported;
375 emlp->eml_encap_header_size_limit =
376 MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM_MCDI2;
381 EFSYS_PROBE1(fail1, efx_rc_t, rc);
385 __checkReturn efx_rc_t
386 efx_mae_match_spec_init(
388 __in efx_mae_rule_type_t type,
390 __out efx_mae_match_spec_t **specp)
392 efx_mae_match_spec_t *spec;
396 case EFX_MAE_RULE_OUTER:
398 case EFX_MAE_RULE_ACTION:
405 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), spec);
411 spec->emms_type = type;
412 spec->emms_prio = prio;
421 EFSYS_PROBE1(fail1, efx_rc_t, rc);
426 efx_mae_match_spec_fini(
428 __in efx_mae_match_spec_t *spec)
430 EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);
433 /* Named identifiers which are valid indices to efx_mae_field_cap_t */
434 typedef enum efx_mae_field_cap_id_e {
435 EFX_MAE_FIELD_ID_INGRESS_MPORT_SELECTOR = MAE_FIELD_INGRESS_PORT,
436 EFX_MAE_FIELD_ID_ETHER_TYPE_BE = MAE_FIELD_ETHER_TYPE,
437 EFX_MAE_FIELD_ID_ETH_SADDR_BE = MAE_FIELD_ETH_SADDR,
438 EFX_MAE_FIELD_ID_ETH_DADDR_BE = MAE_FIELD_ETH_DADDR,
439 EFX_MAE_FIELD_ID_VLAN0_TCI_BE = MAE_FIELD_VLAN0_TCI,
440 EFX_MAE_FIELD_ID_VLAN0_PROTO_BE = MAE_FIELD_VLAN0_PROTO,
441 EFX_MAE_FIELD_ID_VLAN1_TCI_BE = MAE_FIELD_VLAN1_TCI,
442 EFX_MAE_FIELD_ID_VLAN1_PROTO_BE = MAE_FIELD_VLAN1_PROTO,
443 EFX_MAE_FIELD_ID_SRC_IP4_BE = MAE_FIELD_SRC_IP4,
444 EFX_MAE_FIELD_ID_DST_IP4_BE = MAE_FIELD_DST_IP4,
445 EFX_MAE_FIELD_ID_IP_PROTO = MAE_FIELD_IP_PROTO,
446 EFX_MAE_FIELD_ID_IP_TOS = MAE_FIELD_IP_TOS,
447 EFX_MAE_FIELD_ID_IP_TTL = MAE_FIELD_IP_TTL,
448 EFX_MAE_FIELD_ID_SRC_IP6_BE = MAE_FIELD_SRC_IP6,
449 EFX_MAE_FIELD_ID_DST_IP6_BE = MAE_FIELD_DST_IP6,
450 EFX_MAE_FIELD_ID_L4_SPORT_BE = MAE_FIELD_L4_SPORT,
451 EFX_MAE_FIELD_ID_L4_DPORT_BE = MAE_FIELD_L4_DPORT,
452 EFX_MAE_FIELD_ID_TCP_FLAGS_BE = MAE_FIELD_TCP_FLAGS,
453 EFX_MAE_FIELD_ID_ENC_ETHER_TYPE_BE = MAE_FIELD_ENC_ETHER_TYPE,
454 EFX_MAE_FIELD_ID_ENC_ETH_SADDR_BE = MAE_FIELD_ENC_ETH_SADDR,
455 EFX_MAE_FIELD_ID_ENC_ETH_DADDR_BE = MAE_FIELD_ENC_ETH_DADDR,
456 EFX_MAE_FIELD_ID_ENC_VLAN0_TCI_BE = MAE_FIELD_ENC_VLAN0_TCI,
457 EFX_MAE_FIELD_ID_ENC_VLAN0_PROTO_BE = MAE_FIELD_ENC_VLAN0_PROTO,
458 EFX_MAE_FIELD_ID_ENC_VLAN1_TCI_BE = MAE_FIELD_ENC_VLAN1_TCI,
459 EFX_MAE_FIELD_ID_ENC_VLAN1_PROTO_BE = MAE_FIELD_ENC_VLAN1_PROTO,
460 EFX_MAE_FIELD_ID_ENC_SRC_IP4_BE = MAE_FIELD_ENC_SRC_IP4,
461 EFX_MAE_FIELD_ID_ENC_DST_IP4_BE = MAE_FIELD_ENC_DST_IP4,
462 EFX_MAE_FIELD_ID_ENC_IP_PROTO = MAE_FIELD_ENC_IP_PROTO,
463 EFX_MAE_FIELD_ID_ENC_IP_TOS = MAE_FIELD_ENC_IP_TOS,
464 EFX_MAE_FIELD_ID_ENC_IP_TTL = MAE_FIELD_ENC_IP_TTL,
465 EFX_MAE_FIELD_ID_ENC_SRC_IP6_BE = MAE_FIELD_ENC_SRC_IP6,
466 EFX_MAE_FIELD_ID_ENC_DST_IP6_BE = MAE_FIELD_ENC_DST_IP6,
467 EFX_MAE_FIELD_ID_ENC_L4_SPORT_BE = MAE_FIELD_ENC_L4_SPORT,
468 EFX_MAE_FIELD_ID_ENC_L4_DPORT_BE = MAE_FIELD_ENC_L4_DPORT,
469 EFX_MAE_FIELD_ID_ENC_VNET_ID_BE = MAE_FIELD_ENC_VNET_ID,
470 EFX_MAE_FIELD_ID_OUTER_RULE_ID = MAE_FIELD_OUTER_RULE_ID,
471 EFX_MAE_FIELD_ID_HAS_OVLAN = MAE_FIELD_HAS_OVLAN,
472 EFX_MAE_FIELD_ID_HAS_IVLAN = MAE_FIELD_HAS_IVLAN,
473 EFX_MAE_FIELD_ID_ENC_HAS_OVLAN = MAE_FIELD_ENC_HAS_OVLAN,
474 EFX_MAE_FIELD_ID_ENC_HAS_IVLAN = MAE_FIELD_ENC_HAS_IVLAN,
476 EFX_MAE_FIELD_CAP_NIDS
477 } efx_mae_field_cap_id_t;
479 typedef enum efx_mae_field_endianness_e {
480 EFX_MAE_FIELD_LE = 0,
483 EFX_MAE_FIELD_ENDIANNESS_NTYPES
484 } efx_mae_field_endianness_t;
487 * The following structure is a means to describe an MAE field.
488 * The information in it is meant to be used internally by
489 * APIs for addressing a given field in a mask-value pairs
490 * structure and for validation purposes.
492 * A field may have an alternative one. This structure
493 * has additional members to reference the alternative
494 * field's mask. See efx_mae_match_spec_is_valid().
496 typedef struct efx_mae_mv_desc_s {
497 efx_mae_field_cap_id_t emmd_field_cap_id;
499 size_t emmd_value_size;
500 size_t emmd_value_offset;
501 size_t emmd_mask_size;
502 size_t emmd_mask_offset;
505 * Having the alternative field's mask size set to 0
506 * means that there's no alternative field specified.
508 size_t emmd_alt_mask_size;
509 size_t emmd_alt_mask_offset;
511 /* Primary field and the alternative one are of the same endianness. */
512 efx_mae_field_endianness_t emmd_endianness;
515 /* Indices to this array are provided by efx_mae_field_id_t */
516 static const efx_mae_mv_desc_t __efx_mae_action_rule_mv_desc_set[] = {
517 #define EFX_MAE_MV_DESC(_name, _endianness) \
518 [EFX_MAE_FIELD_##_name] = \
520 EFX_MAE_FIELD_ID_##_name, \
521 MAE_FIELD_MASK_VALUE_PAIRS_##_name##_LEN, \
522 MAE_FIELD_MASK_VALUE_PAIRS_##_name##_OFST, \
523 MAE_FIELD_MASK_VALUE_PAIRS_##_name##_MASK_LEN, \
524 MAE_FIELD_MASK_VALUE_PAIRS_##_name##_MASK_OFST, \
525 0, 0 /* no alternative field */, \
529 EFX_MAE_MV_DESC(INGRESS_MPORT_SELECTOR, EFX_MAE_FIELD_LE),
530 EFX_MAE_MV_DESC(ETHER_TYPE_BE, EFX_MAE_FIELD_BE),
531 EFX_MAE_MV_DESC(ETH_SADDR_BE, EFX_MAE_FIELD_BE),
532 EFX_MAE_MV_DESC(ETH_DADDR_BE, EFX_MAE_FIELD_BE),
533 EFX_MAE_MV_DESC(VLAN0_TCI_BE, EFX_MAE_FIELD_BE),
534 EFX_MAE_MV_DESC(VLAN0_PROTO_BE, EFX_MAE_FIELD_BE),
535 EFX_MAE_MV_DESC(VLAN1_TCI_BE, EFX_MAE_FIELD_BE),
536 EFX_MAE_MV_DESC(VLAN1_PROTO_BE, EFX_MAE_FIELD_BE),
537 EFX_MAE_MV_DESC(SRC_IP4_BE, EFX_MAE_FIELD_BE),
538 EFX_MAE_MV_DESC(DST_IP4_BE, EFX_MAE_FIELD_BE),
539 EFX_MAE_MV_DESC(IP_PROTO, EFX_MAE_FIELD_BE),
540 EFX_MAE_MV_DESC(IP_TOS, EFX_MAE_FIELD_BE),
541 EFX_MAE_MV_DESC(IP_TTL, EFX_MAE_FIELD_BE),
542 EFX_MAE_MV_DESC(SRC_IP6_BE, EFX_MAE_FIELD_BE),
543 EFX_MAE_MV_DESC(DST_IP6_BE, EFX_MAE_FIELD_BE),
544 EFX_MAE_MV_DESC(L4_SPORT_BE, EFX_MAE_FIELD_BE),
545 EFX_MAE_MV_DESC(L4_DPORT_BE, EFX_MAE_FIELD_BE),
546 EFX_MAE_MV_DESC(TCP_FLAGS_BE, EFX_MAE_FIELD_BE),
547 EFX_MAE_MV_DESC(ENC_VNET_ID_BE, EFX_MAE_FIELD_BE),
548 EFX_MAE_MV_DESC(OUTER_RULE_ID, EFX_MAE_FIELD_LE),
550 #undef EFX_MAE_MV_DESC
553 /* Indices to this array are provided by efx_mae_field_id_t */
554 static const efx_mae_mv_desc_t __efx_mae_outer_rule_mv_desc_set[] = {
555 #define EFX_MAE_MV_DESC(_name, _endianness) \
556 [EFX_MAE_FIELD_##_name] = \
558 EFX_MAE_FIELD_ID_##_name, \
559 MAE_ENC_FIELD_PAIRS_##_name##_LEN, \
560 MAE_ENC_FIELD_PAIRS_##_name##_OFST, \
561 MAE_ENC_FIELD_PAIRS_##_name##_MASK_LEN, \
562 MAE_ENC_FIELD_PAIRS_##_name##_MASK_OFST, \
563 0, 0 /* no alternative field */, \
567 /* Same as EFX_MAE_MV_DESC(), but also indicates an alternative field. */
568 #define EFX_MAE_MV_DESC_ALT(_name, _alt_name, _endianness) \
569 [EFX_MAE_FIELD_##_name] = \
571 EFX_MAE_FIELD_ID_##_name, \
572 MAE_ENC_FIELD_PAIRS_##_name##_LEN, \
573 MAE_ENC_FIELD_PAIRS_##_name##_OFST, \
574 MAE_ENC_FIELD_PAIRS_##_name##_MASK_LEN, \
575 MAE_ENC_FIELD_PAIRS_##_name##_MASK_OFST, \
576 MAE_ENC_FIELD_PAIRS_##_alt_name##_MASK_LEN, \
577 MAE_ENC_FIELD_PAIRS_##_alt_name##_MASK_OFST, \
581 EFX_MAE_MV_DESC(INGRESS_MPORT_SELECTOR, EFX_MAE_FIELD_LE),
582 EFX_MAE_MV_DESC(ENC_ETHER_TYPE_BE, EFX_MAE_FIELD_BE),
583 EFX_MAE_MV_DESC(ENC_ETH_SADDR_BE, EFX_MAE_FIELD_BE),
584 EFX_MAE_MV_DESC(ENC_ETH_DADDR_BE, EFX_MAE_FIELD_BE),
585 EFX_MAE_MV_DESC(ENC_VLAN0_TCI_BE, EFX_MAE_FIELD_BE),
586 EFX_MAE_MV_DESC(ENC_VLAN0_PROTO_BE, EFX_MAE_FIELD_BE),
587 EFX_MAE_MV_DESC(ENC_VLAN1_TCI_BE, EFX_MAE_FIELD_BE),
588 EFX_MAE_MV_DESC(ENC_VLAN1_PROTO_BE, EFX_MAE_FIELD_BE),
589 EFX_MAE_MV_DESC_ALT(ENC_SRC_IP4_BE, ENC_SRC_IP6_BE, EFX_MAE_FIELD_BE),
590 EFX_MAE_MV_DESC_ALT(ENC_DST_IP4_BE, ENC_DST_IP6_BE, EFX_MAE_FIELD_BE),
591 EFX_MAE_MV_DESC(ENC_IP_PROTO, EFX_MAE_FIELD_BE),
592 EFX_MAE_MV_DESC(ENC_IP_TOS, EFX_MAE_FIELD_BE),
593 EFX_MAE_MV_DESC(ENC_IP_TTL, EFX_MAE_FIELD_BE),
594 EFX_MAE_MV_DESC_ALT(ENC_SRC_IP6_BE, ENC_SRC_IP4_BE, EFX_MAE_FIELD_BE),
595 EFX_MAE_MV_DESC_ALT(ENC_DST_IP6_BE, ENC_DST_IP4_BE, EFX_MAE_FIELD_BE),
596 EFX_MAE_MV_DESC(ENC_L4_SPORT_BE, EFX_MAE_FIELD_BE),
597 EFX_MAE_MV_DESC(ENC_L4_DPORT_BE, EFX_MAE_FIELD_BE),
599 #undef EFX_MAE_MV_DESC_ALT
600 #undef EFX_MAE_MV_DESC
604 * The following structure is a means to describe an MAE bit.
605 * The information in it is meant to be used internally by
606 * APIs for addressing a given flag in a mask-value pairs
607 * structure and for validation purposes.
609 typedef struct efx_mae_mv_bit_desc_s {
611 * Arrays using this struct are indexed by field IDs.
612 * Fields which aren't meant to be referenced by these
613 * arrays comprise gaps (invalid entries). Below field
614 * helps to identify such entries.
616 boolean_t emmbd_entry_is_valid;
617 efx_mae_field_cap_id_t emmbd_bit_cap_id;
618 size_t emmbd_value_ofst;
619 unsigned int emmbd_value_lbn;
620 size_t emmbd_mask_ofst;
621 unsigned int emmbd_mask_lbn;
622 } efx_mae_mv_bit_desc_t;
624 static const efx_mae_mv_bit_desc_t __efx_mae_outer_rule_mv_bit_desc_set[] = {
625 #define EFX_MAE_MV_BIT_DESC(_name) \
626 [EFX_MAE_FIELD_##_name] = \
629 EFX_MAE_FIELD_ID_##_name, \
630 MAE_ENC_FIELD_PAIRS_##_name##_OFST, \
631 MAE_ENC_FIELD_PAIRS_##_name##_LBN, \
632 MAE_ENC_FIELD_PAIRS_##_name##_MASK_OFST, \
633 MAE_ENC_FIELD_PAIRS_##_name##_MASK_LBN, \
636 EFX_MAE_MV_BIT_DESC(ENC_HAS_OVLAN),
637 EFX_MAE_MV_BIT_DESC(ENC_HAS_IVLAN),
639 #undef EFX_MAE_MV_BIT_DESC
642 static const efx_mae_mv_bit_desc_t __efx_mae_action_rule_mv_bit_desc_set[] = {
643 #define EFX_MAE_MV_BIT_DESC(_name) \
644 [EFX_MAE_FIELD_##_name] = \
647 EFX_MAE_FIELD_ID_##_name, \
648 MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_OFST, \
649 MAE_FIELD_MASK_VALUE_PAIRS_V2_##_name##_LBN, \
650 MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_OFST, \
651 MAE_FIELD_MASK_VALUE_PAIRS_V2_##_name##_LBN, \
654 EFX_MAE_MV_BIT_DESC(HAS_OVLAN),
655 EFX_MAE_MV_BIT_DESC(HAS_IVLAN),
656 EFX_MAE_MV_BIT_DESC(ENC_HAS_OVLAN),
657 EFX_MAE_MV_BIT_DESC(ENC_HAS_IVLAN),
659 #undef EFX_MAE_MV_BIT_DESC
662 __checkReturn efx_rc_t
663 efx_mae_mport_by_phy_port(
664 __in uint32_t phy_port,
665 __out efx_mport_sel_t *mportp)
670 if (phy_port > EFX_MASK32(MAE_MPORT_SELECTOR_PPORT_ID)) {
675 EFX_POPULATE_DWORD_2(dword,
676 MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT,
677 MAE_MPORT_SELECTOR_PPORT_ID, phy_port);
679 memset(mportp, 0, sizeof (*mportp));
681 * The constructed DWORD is little-endian,
682 * but the resulting value is meant to be
683 * passed to MCDIs, where it will undergo
684 * host-order to little endian conversion.
686 mportp->sel = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
691 EFSYS_PROBE1(fail1, efx_rc_t, rc);
695 __checkReturn efx_rc_t
696 efx_mae_mport_by_pcie_function(
699 __out efx_mport_sel_t *mportp)
704 EFX_STATIC_ASSERT(EFX_PCI_VF_INVALID ==
705 MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL);
707 if (pf > EFX_MASK32(MAE_MPORT_SELECTOR_FUNC_PF_ID)) {
712 if (vf > EFX_MASK32(MAE_MPORT_SELECTOR_FUNC_VF_ID)) {
717 EFX_POPULATE_DWORD_3(dword,
718 MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
719 MAE_MPORT_SELECTOR_FUNC_PF_ID, pf,
720 MAE_MPORT_SELECTOR_FUNC_VF_ID, vf);
722 memset(mportp, 0, sizeof (*mportp));
724 * The constructed DWORD is little-endian,
725 * but the resulting value is meant to be
726 * passed to MCDIs, where it will undergo
727 * host-order to little endian conversion.
729 mportp->sel = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
736 EFSYS_PROBE1(fail1, efx_rc_t, rc);
740 __checkReturn efx_rc_t
741 efx_mae_match_spec_field_set(
742 __in efx_mae_match_spec_t *spec,
743 __in efx_mae_field_id_t field_id,
744 __in size_t value_size,
745 __in_bcount(value_size) const uint8_t *value,
746 __in size_t mask_size,
747 __in_bcount(mask_size) const uint8_t *mask)
749 const efx_mae_mv_desc_t *descp;
750 unsigned int desc_set_nentries;
754 switch (spec->emms_type) {
755 case EFX_MAE_RULE_OUTER:
757 EFX_ARRAY_SIZE(__efx_mae_outer_rule_mv_desc_set);
758 descp = &__efx_mae_outer_rule_mv_desc_set[field_id];
759 mvp = spec->emms_mask_value_pairs.outer;
761 case EFX_MAE_RULE_ACTION:
763 EFX_ARRAY_SIZE(__efx_mae_action_rule_mv_desc_set);
764 descp = &__efx_mae_action_rule_mv_desc_set[field_id];
765 mvp = spec->emms_mask_value_pairs.action;
772 if ((unsigned int)field_id >= desc_set_nentries) {
777 if (descp->emmd_mask_size == 0) {
778 /* The ID points to a gap in the array of field descriptors. */
783 if (value_size != descp->emmd_value_size) {
788 if (mask_size != descp->emmd_mask_size) {
793 if (descp->emmd_endianness == EFX_MAE_FIELD_BE) {
797 * The mask/value are in network (big endian) order.
798 * The MCDI request field is also big endian.
801 EFSYS_ASSERT3U(value_size, ==, mask_size);
803 for (i = 0; i < value_size; ++i) {
804 uint8_t *v_bytep = mvp + descp->emmd_value_offset + i;
805 uint8_t *m_bytep = mvp + descp->emmd_mask_offset + i;
808 * Apply the mask (which may be all-zeros) to the value.
810 * If this API is provided with some value to set for a
811 * given field in one specification and with some other
812 * value to set for this field in another specification,
813 * then, if the two masks are all-zeros, the field will
814 * avoid being counted as a mismatch when comparing the
815 * specifications using efx_mae_match_specs_equal() API.
817 *v_bytep = value[i] & mask[i];
824 * The mask/value are in host byte order.
825 * The MCDI request field is little endian.
827 switch (value_size) {
829 EFX_POPULATE_DWORD_1(dword,
830 EFX_DWORD_0, *(const uint32_t *)value);
832 memcpy(mvp + descp->emmd_value_offset,
833 &dword, sizeof (dword));
836 EFSYS_ASSERT(B_FALSE);
841 EFX_POPULATE_DWORD_1(dword,
842 EFX_DWORD_0, *(const uint32_t *)mask);
844 memcpy(mvp + descp->emmd_mask_offset,
845 &dword, sizeof (dword));
848 EFSYS_ASSERT(B_FALSE);
863 EFSYS_PROBE1(fail1, efx_rc_t, rc);
867 __checkReturn efx_rc_t
868 efx_mae_match_spec_bit_set(
869 __in efx_mae_match_spec_t *spec,
870 __in efx_mae_field_id_t field_id,
871 __in boolean_t value)
873 const efx_mae_mv_bit_desc_t *bit_descp;
874 unsigned int bit_desc_set_nentries;
875 unsigned int byte_idx;
876 unsigned int bit_idx;
880 switch (spec->emms_type) {
881 case EFX_MAE_RULE_OUTER:
882 bit_desc_set_nentries =
883 EFX_ARRAY_SIZE(__efx_mae_outer_rule_mv_bit_desc_set);
884 bit_descp = &__efx_mae_outer_rule_mv_bit_desc_set[field_id];
885 mvp = spec->emms_mask_value_pairs.outer;
887 case EFX_MAE_RULE_ACTION:
888 bit_desc_set_nentries =
889 EFX_ARRAY_SIZE(__efx_mae_action_rule_mv_bit_desc_set);
890 bit_descp = &__efx_mae_action_rule_mv_bit_desc_set[field_id];
891 mvp = spec->emms_mask_value_pairs.action;
898 if ((unsigned int)field_id >= bit_desc_set_nentries) {
903 if (bit_descp->emmbd_entry_is_valid == B_FALSE) {
908 byte_idx = bit_descp->emmbd_value_ofst + bit_descp->emmbd_value_lbn / 8;
909 bit_idx = bit_descp->emmbd_value_lbn % 8;
911 if (value != B_FALSE)
912 mvp[byte_idx] |= (1U << bit_idx);
914 mvp[byte_idx] &= ~(1U << bit_idx);
916 byte_idx = bit_descp->emmbd_mask_ofst + bit_descp->emmbd_mask_lbn / 8;
917 bit_idx = bit_descp->emmbd_mask_lbn % 8;
918 mvp[byte_idx] |= (1U << bit_idx);
927 EFSYS_PROBE1(fail1, efx_rc_t, rc);
931 __checkReturn efx_rc_t
932 efx_mae_match_spec_mport_set(
933 __in efx_mae_match_spec_t *spec,
934 __in const efx_mport_sel_t *valuep,
935 __in_opt const efx_mport_sel_t *maskp)
937 uint32_t full_mask = UINT32_MAX;
942 if (valuep == NULL) {
947 vp = (const uint8_t *)&valuep->sel;
949 mp = (const uint8_t *)&maskp->sel;
951 mp = (const uint8_t *)&full_mask;
953 rc = efx_mae_match_spec_field_set(spec,
954 EFX_MAE_FIELD_INGRESS_MPORT_SELECTOR,
955 sizeof (valuep->sel), vp, sizeof (maskp->sel), mp);
964 EFSYS_PROBE1(fail1, efx_rc_t, rc);
968 __checkReturn boolean_t
969 efx_mae_match_specs_equal(
970 __in const efx_mae_match_spec_t *left,
971 __in const efx_mae_match_spec_t *right)
973 return ((memcmp(left, right, sizeof (*left)) == 0) ? B_TRUE : B_FALSE);
976 #define EFX_MASK_BIT_IS_SET(_mask, _mask_page_nbits, _bit) \
977 ((_mask)[(_bit) / (_mask_page_nbits)] & \
978 (1ULL << ((_bit) & ((_mask_page_nbits) - 1))))
982 __in size_t mask_nbytes,
983 __in_bcount(mask_nbytes) const uint8_t *maskp)
985 boolean_t prev_bit_is_set = B_TRUE;
988 for (i = 0; i < 8 * mask_nbytes; ++i) {
989 boolean_t bit_is_set = EFX_MASK_BIT_IS_SET(maskp, 8, i);
991 if (!prev_bit_is_set && bit_is_set)
994 prev_bit_is_set = bit_is_set;
1001 efx_mask_is_all_ones(
1002 __in size_t mask_nbytes,
1003 __in_bcount(mask_nbytes) const uint8_t *maskp)
1008 for (i = 0; i < mask_nbytes; ++i)
1011 return (t == (uint8_t)(~0));
1015 efx_mask_is_all_zeros(
1016 __in size_t mask_nbytes,
1017 __in_bcount(mask_nbytes) const uint8_t *maskp)
1022 for (i = 0; i < mask_nbytes; ++i)
1028 __checkReturn boolean_t
1029 efx_mae_match_spec_is_valid(
1030 __in efx_nic_t *enp,
1031 __in const efx_mae_match_spec_t *spec)
1033 efx_mae_t *maep = enp->en_maep;
1034 unsigned int field_ncaps = maep->em_max_nfields;
1035 const efx_mae_field_cap_t *field_caps;
1036 const efx_mae_mv_desc_t *desc_setp;
1037 unsigned int desc_set_nentries;
1038 const efx_mae_mv_bit_desc_t *bit_desc_setp;
1039 unsigned int bit_desc_set_nentries;
1040 boolean_t is_valid = B_TRUE;
1041 efx_mae_field_id_t field_id;
1044 switch (spec->emms_type) {
1045 case EFX_MAE_RULE_OUTER:
1046 field_caps = maep->em_outer_rule_field_caps;
1047 desc_setp = __efx_mae_outer_rule_mv_desc_set;
1049 EFX_ARRAY_SIZE(__efx_mae_outer_rule_mv_desc_set);
1050 bit_desc_setp = __efx_mae_outer_rule_mv_bit_desc_set;
1051 bit_desc_set_nentries =
1052 EFX_ARRAY_SIZE(__efx_mae_outer_rule_mv_bit_desc_set);
1053 mvp = spec->emms_mask_value_pairs.outer;
1055 case EFX_MAE_RULE_ACTION:
1056 field_caps = maep->em_action_rule_field_caps;
1057 desc_setp = __efx_mae_action_rule_mv_desc_set;
1059 EFX_ARRAY_SIZE(__efx_mae_action_rule_mv_desc_set);
1060 bit_desc_setp = __efx_mae_action_rule_mv_bit_desc_set;
1061 bit_desc_set_nentries =
1062 EFX_ARRAY_SIZE(__efx_mae_action_rule_mv_bit_desc_set);
1063 mvp = spec->emms_mask_value_pairs.action;
1069 if (field_caps == NULL)
1072 for (field_id = 0; (unsigned int)field_id < desc_set_nentries;
1074 const efx_mae_mv_desc_t *descp = &desc_setp[field_id];
1075 efx_mae_field_cap_id_t field_cap_id = descp->emmd_field_cap_id;
1076 const uint8_t *alt_m_buf = mvp + descp->emmd_alt_mask_offset;
1077 const uint8_t *m_buf = mvp + descp->emmd_mask_offset;
1078 size_t alt_m_size = descp->emmd_alt_mask_size;
1079 size_t m_size = descp->emmd_mask_size;
1082 continue; /* Skip array gap */
1084 if ((unsigned int)field_cap_id >= field_ncaps) {
1086 * The FW has not reported capability status for
1087 * this field. Make sure that its mask is zeroed.
1089 is_valid = efx_mask_is_all_zeros(m_size, m_buf);
1090 if (is_valid != B_FALSE)
1096 switch (field_caps[field_cap_id].emfc_support) {
1097 case MAE_FIELD_SUPPORTED_MATCH_MASK:
1100 case MAE_FIELD_SUPPORTED_MATCH_PREFIX:
1101 is_valid = efx_mask_is_prefix(m_size, m_buf);
1103 case MAE_FIELD_SUPPORTED_MATCH_OPTIONAL:
1104 is_valid = (efx_mask_is_all_ones(m_size, m_buf) ||
1105 efx_mask_is_all_zeros(m_size, m_buf));
1107 case MAE_FIELD_SUPPORTED_MATCH_ALWAYS:
1108 is_valid = efx_mask_is_all_ones(m_size, m_buf);
1110 if ((is_valid == B_FALSE) && (alt_m_size != 0)) {
1112 * This field has an alternative one. The FW
1113 * reports ALWAYS for both implying that one
1114 * of them is required to have all-ones mask.
1116 * The primary field's mask is incorrect; go
1117 * on to check that of the alternative field.
1119 is_valid = efx_mask_is_all_ones(alt_m_size,
1123 case MAE_FIELD_SUPPORTED_MATCH_NEVER:
1124 case MAE_FIELD_UNSUPPORTED:
1126 is_valid = efx_mask_is_all_zeros(m_size, m_buf);
1130 if (is_valid == B_FALSE)
1134 for (field_id = 0; (unsigned int)field_id < bit_desc_set_nentries;
1136 const efx_mae_mv_bit_desc_t *bit_descp =
1137 &bit_desc_setp[field_id];
1138 unsigned int byte_idx =
1139 bit_descp->emmbd_mask_ofst +
1140 bit_descp->emmbd_mask_lbn / 8;
1141 unsigned int bit_idx =
1142 bit_descp->emmbd_mask_lbn % 8;
1143 efx_mae_field_cap_id_t bit_cap_id =
1144 bit_descp->emmbd_bit_cap_id;
1146 if (bit_descp->emmbd_entry_is_valid == B_FALSE)
1147 continue; /* Skip array gap */
1149 if ((unsigned int)bit_cap_id >= field_ncaps) {
1150 /* No capability for this bit = unsupported. */
1151 is_valid = ((mvp[byte_idx] & (1U << bit_idx)) == 0);
1152 if (is_valid == B_FALSE)
1158 switch (field_caps[bit_cap_id].emfc_support) {
1159 case MAE_FIELD_SUPPORTED_MATCH_OPTIONAL:
1162 case MAE_FIELD_SUPPORTED_MATCH_ALWAYS:
1163 is_valid = ((mvp[byte_idx] & (1U << bit_idx)) != 0);
1165 case MAE_FIELD_SUPPORTED_MATCH_NEVER:
1166 case MAE_FIELD_UNSUPPORTED:
1168 is_valid = ((mvp[byte_idx] & (1U << bit_idx)) == 0);
1172 if (is_valid == B_FALSE)
1179 __checkReturn efx_rc_t
1180 efx_mae_action_set_spec_init(
1181 __in efx_nic_t *enp,
1182 __out efx_mae_actions_t **specp)
1184 efx_mae_actions_t *spec;
1187 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), spec);
1193 spec->ema_rsrc.emar_eh_id.id = EFX_MAE_RSRC_ID_INVALID;
1200 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1205 efx_mae_action_set_spec_fini(
1206 __in efx_nic_t *enp,
1207 __in efx_mae_actions_t *spec)
1209 EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);
1212 static __checkReturn efx_rc_t
1213 efx_mae_action_set_add_decap(
1214 __in efx_mae_actions_t *spec,
1215 __in size_t arg_size,
1216 __in_bcount(arg_size) const uint8_t *arg)
1220 _NOTE(ARGUNUSED(spec))
1222 if (arg_size != 0) {
1232 /* This action does not have any arguments, so do nothing here. */
1239 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1243 static __checkReturn efx_rc_t
1244 efx_mae_action_set_add_vlan_pop(
1245 __in efx_mae_actions_t *spec,
1246 __in size_t arg_size,
1247 __in_bcount(arg_size) const uint8_t *arg)
1251 if (arg_size != 0) {
1261 if (spec->ema_n_vlan_tags_to_pop == EFX_MAE_VLAN_POP_MAX_NTAGS) {
1266 ++spec->ema_n_vlan_tags_to_pop;
1275 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1279 static __checkReturn efx_rc_t
1280 efx_mae_action_set_add_vlan_push(
1281 __in efx_mae_actions_t *spec,
1282 __in size_t arg_size,
1283 __in_bcount(arg_size) const uint8_t *arg)
1285 unsigned int n_tags = spec->ema_n_vlan_tags_to_push;
1288 if (arg_size != sizeof (*spec->ema_vlan_push_descs)) {
1298 if (n_tags == EFX_MAE_VLAN_PUSH_MAX_NTAGS) {
1303 memcpy(&spec->ema_vlan_push_descs[n_tags], arg, arg_size);
1304 ++(spec->ema_n_vlan_tags_to_push);
1313 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1317 static __checkReturn efx_rc_t
1318 efx_mae_action_set_add_encap(
1319 __in efx_mae_actions_t *spec,
1320 __in size_t arg_size,
1321 __in_bcount(arg_size) const uint8_t *arg)
1326 * Adding this specific action to an action set spec and setting encap.
1327 * header ID in the spec are two individual steps. This design allows
1328 * the client driver to avoid encap. header allocation when it simply
1329 * needs to check the order of actions submitted by user ("validate"),
1330 * without actually allocating an action set and inserting a rule.
1332 * For now, mark encap. header ID as invalid; the caller will invoke
1333 * efx_mae_action_set_fill_in_eh_id() to override the field prior
1334 * to action set allocation; otherwise, the allocation will fail.
1336 spec->ema_rsrc.emar_eh_id.id = EFX_MAE_RSRC_ID_INVALID;
1339 * As explained above, there are no arguments to handle here.
1340 * efx_mae_action_set_fill_in_eh_id() will take care of them.
1342 if (arg_size != 0) {
1357 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1361 static __checkReturn efx_rc_t
1362 efx_mae_action_set_add_flag(
1363 __in efx_mae_actions_t *spec,
1364 __in size_t arg_size,
1365 __in_bcount(arg_size) const uint8_t *arg)
1369 _NOTE(ARGUNUSED(spec))
1371 if (arg_size != 0) {
1381 /* This action does not have any arguments, so do nothing here. */
1388 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1392 static __checkReturn efx_rc_t
1393 efx_mae_action_set_add_mark(
1394 __in efx_mae_actions_t *spec,
1395 __in size_t arg_size,
1396 __in_bcount(arg_size) const uint8_t *arg)
1400 if (arg_size != sizeof (spec->ema_mark_value)) {
1410 memcpy(&spec->ema_mark_value, arg, arg_size);
1417 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1421 static __checkReturn efx_rc_t
1422 efx_mae_action_set_add_deliver(
1423 __in efx_mae_actions_t *spec,
1424 __in size_t arg_size,
1425 __in_bcount(arg_size) const uint8_t *arg)
1429 if (arg_size != sizeof (spec->ema_deliver_mport)) {
1439 memcpy(&spec->ema_deliver_mport, arg, arg_size);
1446 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1450 typedef struct efx_mae_action_desc_s {
1451 /* Action specific handler */
1452 efx_rc_t (*emad_add)(efx_mae_actions_t *,
1453 size_t, const uint8_t *);
1454 } efx_mae_action_desc_t;
1456 static const efx_mae_action_desc_t efx_mae_actions[EFX_MAE_NACTIONS] = {
1457 [EFX_MAE_ACTION_DECAP] = {
1458 .emad_add = efx_mae_action_set_add_decap
1460 [EFX_MAE_ACTION_VLAN_POP] = {
1461 .emad_add = efx_mae_action_set_add_vlan_pop
1463 [EFX_MAE_ACTION_VLAN_PUSH] = {
1464 .emad_add = efx_mae_action_set_add_vlan_push
1466 [EFX_MAE_ACTION_ENCAP] = {
1467 .emad_add = efx_mae_action_set_add_encap
1469 [EFX_MAE_ACTION_FLAG] = {
1470 .emad_add = efx_mae_action_set_add_flag
1472 [EFX_MAE_ACTION_MARK] = {
1473 .emad_add = efx_mae_action_set_add_mark
1475 [EFX_MAE_ACTION_DELIVER] = {
1476 .emad_add = efx_mae_action_set_add_deliver
1480 static const uint32_t efx_mae_action_ordered_map =
1481 (1U << EFX_MAE_ACTION_DECAP) |
1482 (1U << EFX_MAE_ACTION_VLAN_POP) |
1483 (1U << EFX_MAE_ACTION_VLAN_PUSH) |
1484 (1U << EFX_MAE_ACTION_ENCAP) |
1485 (1U << EFX_MAE_ACTION_FLAG) |
1486 (1U << EFX_MAE_ACTION_MARK) |
1487 (1U << EFX_MAE_ACTION_DELIVER);
1490 * These actions must not be added after DELIVER, but
1491 * they can have any place among the rest of
1492 * strictly ordered actions.
1494 static const uint32_t efx_mae_action_nonstrict_map =
1495 (1U << EFX_MAE_ACTION_FLAG) |
1496 (1U << EFX_MAE_ACTION_MARK);
1498 static const uint32_t efx_mae_action_repeat_map =
1499 (1U << EFX_MAE_ACTION_VLAN_POP) |
1500 (1U << EFX_MAE_ACTION_VLAN_PUSH);
1503 * Add an action to an action set.
1505 * This has to be invoked in the desired action order.
1506 * An out-of-order action request will be turned down.
1508 static __checkReturn efx_rc_t
1509 efx_mae_action_set_spec_populate(
1510 __in efx_mae_actions_t *spec,
1511 __in efx_mae_action_t type,
1512 __in size_t arg_size,
1513 __in_bcount(arg_size) const uint8_t *arg)
1515 uint32_t action_mask;
1518 EFX_STATIC_ASSERT(EFX_MAE_NACTIONS <=
1519 (sizeof (efx_mae_action_ordered_map) * 8));
1520 EFX_STATIC_ASSERT(EFX_MAE_NACTIONS <=
1521 (sizeof (efx_mae_action_repeat_map) * 8));
1523 EFX_STATIC_ASSERT(EFX_MAE_ACTION_DELIVER + 1 == EFX_MAE_NACTIONS);
1524 EFX_STATIC_ASSERT(EFX_MAE_ACTION_FLAG + 1 == EFX_MAE_ACTION_MARK);
1525 EFX_STATIC_ASSERT(EFX_MAE_ACTION_MARK + 1 == EFX_MAE_ACTION_DELIVER);
1527 if (type >= EFX_ARRAY_SIZE(efx_mae_actions)) {
1532 action_mask = (1U << type);
1534 if ((spec->ema_actions & action_mask) != 0) {
1535 /* The action set already contains this action. */
1536 if ((efx_mae_action_repeat_map & action_mask) == 0) {
1537 /* Cannot add another non-repeatable action. */
1543 if ((efx_mae_action_ordered_map & action_mask) != 0) {
1544 uint32_t strict_ordered_map =
1545 efx_mae_action_ordered_map & ~efx_mae_action_nonstrict_map;
1546 uint32_t later_actions_mask =
1547 strict_ordered_map & ~(action_mask | (action_mask - 1));
1549 if ((spec->ema_actions & later_actions_mask) != 0) {
1550 /* Cannot add an action after later ordered actions. */
1556 if (efx_mae_actions[type].emad_add != NULL) {
1557 rc = efx_mae_actions[type].emad_add(spec, arg_size, arg);
1562 spec->ema_actions |= action_mask;
1573 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1577 __checkReturn efx_rc_t
1578 efx_mae_action_set_populate_decap(
1579 __in efx_mae_actions_t *spec)
1581 return (efx_mae_action_set_spec_populate(spec,
1582 EFX_MAE_ACTION_DECAP, 0, NULL));
1585 __checkReturn efx_rc_t
1586 efx_mae_action_set_populate_vlan_pop(
1587 __in efx_mae_actions_t *spec)
1589 return (efx_mae_action_set_spec_populate(spec,
1590 EFX_MAE_ACTION_VLAN_POP, 0, NULL));
1593 __checkReturn efx_rc_t
1594 efx_mae_action_set_populate_vlan_push(
1595 __in efx_mae_actions_t *spec,
1596 __in uint16_t tpid_be,
1597 __in uint16_t tci_be)
1599 efx_mae_action_vlan_push_t action;
1600 const uint8_t *arg = (const uint8_t *)&action;
1602 action.emavp_tpid_be = tpid_be;
1603 action.emavp_tci_be = tci_be;
1605 return (efx_mae_action_set_spec_populate(spec,
1606 EFX_MAE_ACTION_VLAN_PUSH, sizeof (action), arg));
1609 __checkReturn efx_rc_t
1610 efx_mae_action_set_populate_encap(
1611 __in efx_mae_actions_t *spec)
1614 * There is no argument to pass encap. header ID, thus, one does not
1615 * need to allocate an encap. header while parsing application input.
1616 * This is useful since building an action set may be done simply to
1617 * validate a rule, whilst resource allocation usually consumes time.
1619 return (efx_mae_action_set_spec_populate(spec,
1620 EFX_MAE_ACTION_ENCAP, 0, NULL));
1623 __checkReturn efx_rc_t
1624 efx_mae_action_set_populate_flag(
1625 __in efx_mae_actions_t *spec)
1627 return (efx_mae_action_set_spec_populate(spec,
1628 EFX_MAE_ACTION_FLAG, 0, NULL));
1631 __checkReturn efx_rc_t
1632 efx_mae_action_set_populate_mark(
1633 __in efx_mae_actions_t *spec,
1634 __in uint32_t mark_value)
1636 const uint8_t *arg = (const uint8_t *)&mark_value;
1638 return (efx_mae_action_set_spec_populate(spec,
1639 EFX_MAE_ACTION_MARK, sizeof (mark_value), arg));
1642 __checkReturn efx_rc_t
1643 efx_mae_action_set_populate_deliver(
1644 __in efx_mae_actions_t *spec,
1645 __in const efx_mport_sel_t *mportp)
1650 if (mportp == NULL) {
1655 arg = (const uint8_t *)&mportp->sel;
1657 return (efx_mae_action_set_spec_populate(spec,
1658 EFX_MAE_ACTION_DELIVER, sizeof (mportp->sel), arg));
1661 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1665 __checkReturn efx_rc_t
1666 efx_mae_action_set_populate_drop(
1667 __in efx_mae_actions_t *spec)
1669 efx_mport_sel_t mport;
1673 EFX_POPULATE_DWORD_1(dword,
1674 MAE_MPORT_SELECTOR_FLAT, MAE_MPORT_SELECTOR_NULL);
1677 * The constructed DWORD is little-endian,
1678 * but the resulting value is meant to be
1679 * passed to MCDIs, where it will undergo
1680 * host-order to little endian conversion.
1682 mport.sel = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
1684 arg = (const uint8_t *)&mport.sel;
1686 return (efx_mae_action_set_spec_populate(spec,
1687 EFX_MAE_ACTION_DELIVER, sizeof (mport.sel), arg));
1690 __checkReturn boolean_t
1691 efx_mae_action_set_specs_equal(
1692 __in const efx_mae_actions_t *left,
1693 __in const efx_mae_actions_t *right)
1695 size_t cmp_size = EFX_FIELD_OFFSET(efx_mae_actions_t, ema_rsrc);
1698 * An action set specification consists of two parts. The first part
1699 * indicates what actions are included in the action set, as well as
1700 * extra quantitative values (in example, the number of VLAN tags to
1701 * push). The second part comprises resource IDs used by the actions.
1703 * A resource, in example, a counter, is allocated from the hardware
1704 * by the client, and it's the client who is responsible for keeping
1705 * track of allocated resources and comparing resource IDs if needed.
1707 * In this API, don't compare resource IDs in the two specifications.
1710 return ((memcmp(left, right, cmp_size) == 0) ? B_TRUE : B_FALSE);
1713 __checkReturn efx_rc_t
1714 efx_mae_match_specs_class_cmp(
1715 __in efx_nic_t *enp,
1716 __in const efx_mae_match_spec_t *left,
1717 __in const efx_mae_match_spec_t *right,
1718 __out boolean_t *have_same_classp)
1720 efx_mae_t *maep = enp->en_maep;
1721 unsigned int field_ncaps = maep->em_max_nfields;
1722 const efx_mae_field_cap_t *field_caps;
1723 const efx_mae_mv_desc_t *desc_setp;
1724 unsigned int desc_set_nentries;
1725 const efx_mae_mv_bit_desc_t *bit_desc_setp;
1726 unsigned int bit_desc_set_nentries;
1727 boolean_t have_same_class = B_TRUE;
1728 efx_mae_field_id_t field_id;
1729 const uint8_t *mvpl;
1730 const uint8_t *mvpr;
1733 switch (left->emms_type) {
1734 case EFX_MAE_RULE_OUTER:
1735 field_caps = maep->em_outer_rule_field_caps;
1736 desc_setp = __efx_mae_outer_rule_mv_desc_set;
1738 EFX_ARRAY_SIZE(__efx_mae_outer_rule_mv_desc_set);
1739 bit_desc_setp = __efx_mae_outer_rule_mv_bit_desc_set;
1740 bit_desc_set_nentries =
1741 EFX_ARRAY_SIZE(__efx_mae_outer_rule_mv_bit_desc_set);
1742 mvpl = left->emms_mask_value_pairs.outer;
1743 mvpr = right->emms_mask_value_pairs.outer;
1745 case EFX_MAE_RULE_ACTION:
1746 field_caps = maep->em_action_rule_field_caps;
1747 desc_setp = __efx_mae_action_rule_mv_desc_set;
1749 EFX_ARRAY_SIZE(__efx_mae_action_rule_mv_desc_set);
1750 bit_desc_setp = __efx_mae_action_rule_mv_bit_desc_set;
1751 bit_desc_set_nentries =
1752 EFX_ARRAY_SIZE(__efx_mae_action_rule_mv_bit_desc_set);
1753 mvpl = left->emms_mask_value_pairs.action;
1754 mvpr = right->emms_mask_value_pairs.action;
1761 if (field_caps == NULL) {
1766 if (left->emms_type != right->emms_type ||
1767 left->emms_prio != right->emms_prio) {
1769 * Rules of different types can never map to the same class.
1771 * The FW can support some set of match criteria for one
1772 * priority and not support the very same set for
1773 * another priority. Thus, two rules which have
1774 * different priorities can never map to
1777 *have_same_classp = B_FALSE;
1781 for (field_id = 0; (unsigned int)field_id < desc_set_nentries;
1783 const efx_mae_mv_desc_t *descp = &desc_setp[field_id];
1784 efx_mae_field_cap_id_t field_cap_id = descp->emmd_field_cap_id;
1785 const uint8_t *lmaskp = mvpl + descp->emmd_mask_offset;
1786 const uint8_t *rmaskp = mvpr + descp->emmd_mask_offset;
1787 size_t mask_size = descp->emmd_mask_size;
1788 const uint8_t *lvalp = mvpl + descp->emmd_value_offset;
1789 const uint8_t *rvalp = mvpr + descp->emmd_value_offset;
1790 size_t value_size = descp->emmd_value_size;
1793 continue; /* Skip array gap */
1795 if ((unsigned int)field_cap_id >= field_ncaps) {
1797 * The FW has not reported capability status for this
1798 * field. It's unknown whether any difference between
1799 * the two masks / values affects the class. The only
1800 * case when the class must be the same is when these
1801 * mask-value pairs match. Otherwise, report mismatch.
1803 if ((memcmp(lmaskp, rmaskp, mask_size) == 0) &&
1804 (memcmp(lvalp, rvalp, value_size) == 0))
1810 if (field_caps[field_cap_id].emfc_mask_affects_class) {
1811 if (memcmp(lmaskp, rmaskp, mask_size) != 0) {
1812 have_same_class = B_FALSE;
1817 if (field_caps[field_cap_id].emfc_match_affects_class) {
1818 if (memcmp(lvalp, rvalp, value_size) != 0) {
1819 have_same_class = B_FALSE;
1825 if (have_same_class == B_FALSE)
1828 for (field_id = 0; (unsigned int)field_id < bit_desc_set_nentries;
1830 const efx_mae_mv_bit_desc_t *bit_descp =
1831 &bit_desc_setp[field_id];
1832 efx_mae_field_cap_id_t bit_cap_id =
1833 bit_descp->emmbd_bit_cap_id;
1834 unsigned int byte_idx;
1835 unsigned int bit_idx;
1837 if (bit_descp->emmbd_entry_is_valid == B_FALSE)
1838 continue; /* Skip array gap */
1840 if ((unsigned int)bit_cap_id >= field_ncaps)
1844 bit_descp->emmbd_mask_ofst +
1845 bit_descp->emmbd_mask_lbn / 8;
1847 bit_descp->emmbd_mask_lbn % 8;
1849 if (field_caps[bit_cap_id].emfc_mask_affects_class &&
1850 (mvpl[byte_idx] & (1U << bit_idx)) !=
1851 (mvpr[byte_idx] & (1U << bit_idx))) {
1852 have_same_class = B_FALSE;
1857 bit_descp->emmbd_value_ofst +
1858 bit_descp->emmbd_value_lbn / 8;
1860 bit_descp->emmbd_value_lbn % 8;
1862 if (field_caps[bit_cap_id].emfc_match_affects_class &&
1863 (mvpl[byte_idx] & (1U << bit_idx)) !=
1864 (mvpr[byte_idx] & (1U << bit_idx))) {
1865 have_same_class = B_FALSE;
1871 *have_same_classp = have_same_class;
1878 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1882 __checkReturn efx_rc_t
1883 efx_mae_outer_rule_insert(
1884 __in efx_nic_t *enp,
1885 __in const efx_mae_match_spec_t *spec,
1886 __in efx_tunnel_protocol_t encap_type,
1887 __out efx_mae_rule_id_t *or_idp)
1889 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
1891 EFX_MCDI_DECLARE_BUF(payload,
1892 MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX_MCDI2,
1893 MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN);
1894 uint32_t encap_type_mcdi;
1895 efx_mae_rule_id_t or_id;
1899 EFX_STATIC_ASSERT(sizeof (or_idp->id) ==
1900 MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_LEN);
1902 EFX_STATIC_ASSERT(EFX_MAE_RSRC_ID_INVALID ==
1903 MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL);
1905 if (encp->enc_mae_supported == B_FALSE) {
1910 if (spec->emms_type != EFX_MAE_RULE_OUTER) {
1915 switch (encap_type) {
1916 case EFX_TUNNEL_PROTOCOL_NONE:
1917 encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_NONE;
1919 case EFX_TUNNEL_PROTOCOL_VXLAN:
1920 encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_VXLAN;
1922 case EFX_TUNNEL_PROTOCOL_GENEVE:
1923 encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_GENEVE;
1925 case EFX_TUNNEL_PROTOCOL_NVGRE:
1926 encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_NVGRE;
1933 req.emr_cmd = MC_CMD_MAE_OUTER_RULE_INSERT;
1934 req.emr_in_buf = payload;
1935 req.emr_in_length = MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX_MCDI2;
1936 req.emr_out_buf = payload;
1937 req.emr_out_length = MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN;
1939 MCDI_IN_SET_DWORD(req,
1940 MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, encap_type_mcdi);
1942 MCDI_IN_SET_DWORD(req, MAE_OUTER_RULE_INSERT_IN_PRIO, spec->emms_prio);
1945 * Mask-value pairs have been stored in the byte order needed for the
1946 * MCDI request and are thus safe to be copied directly to the buffer.
1947 * The library cares about byte order in efx_mae_match_spec_field_set().
1949 EFX_STATIC_ASSERT(sizeof (spec->emms_mask_value_pairs.outer) >=
1950 MAE_ENC_FIELD_PAIRS_LEN);
1951 offset = MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_OFST;
1952 memcpy(payload + offset, spec->emms_mask_value_pairs.outer,
1953 MAE_ENC_FIELD_PAIRS_LEN);
1955 efx_mcdi_execute(enp, &req);
1957 if (req.emr_rc != 0) {
1962 if (req.emr_out_length_used < MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN) {
1967 or_id.id = MCDI_OUT_DWORD(req, MAE_OUTER_RULE_INSERT_OUT_OR_ID);
1968 if (or_id.id == EFX_MAE_RSRC_ID_INVALID) {
1973 or_idp->id = or_id.id;
1988 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1992 __checkReturn efx_rc_t
1993 efx_mae_outer_rule_remove(
1994 __in efx_nic_t *enp,
1995 __in const efx_mae_rule_id_t *or_idp)
1997 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
1999 EFX_MCDI_DECLARE_BUF(payload,
2000 MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(1),
2001 MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(1));
2004 if (encp->enc_mae_supported == B_FALSE) {
2009 req.emr_cmd = MC_CMD_MAE_OUTER_RULE_REMOVE;
2010 req.emr_in_buf = payload;
2011 req.emr_in_length = MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(1);
2012 req.emr_out_buf = payload;
2013 req.emr_out_length = MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(1);
2015 MCDI_IN_SET_DWORD(req, MAE_OUTER_RULE_REMOVE_IN_OR_ID, or_idp->id);
2017 efx_mcdi_execute(enp, &req);
2019 if (req.emr_rc != 0) {
2024 if (req.emr_out_length_used < MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMIN) {
2029 if (MCDI_OUT_DWORD(req, MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID) !=
2031 /* Firmware failed to remove the outer rule. */
2045 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2049 __checkReturn efx_rc_t
2050 efx_mae_match_spec_outer_rule_id_set(
2051 __in efx_mae_match_spec_t *spec,
2052 __in const efx_mae_rule_id_t *or_idp)
2054 uint32_t full_mask = UINT32_MAX;
2057 if (spec->emms_type != EFX_MAE_RULE_ACTION) {
2062 if (or_idp == NULL) {
2067 rc = efx_mae_match_spec_field_set(spec, EFX_MAE_FIELD_OUTER_RULE_ID,
2068 sizeof (or_idp->id), (const uint8_t *)&or_idp->id,
2069 sizeof (full_mask), (const uint8_t *)&full_mask);
2080 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2084 __checkReturn efx_rc_t
2085 efx_mae_encap_header_alloc(
2086 __in efx_nic_t *enp,
2087 __in efx_tunnel_protocol_t encap_type,
2088 __in_bcount(header_size) uint8_t *header_data,
2089 __in size_t header_size,
2090 __out efx_mae_eh_id_t *eh_idp)
2092 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
2094 EFX_MCDI_DECLARE_BUF(payload,
2095 MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX_MCDI2,
2096 MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_LEN);
2097 uint32_t encap_type_mcdi;
2098 efx_mae_eh_id_t eh_id;
2101 EFX_STATIC_ASSERT(sizeof (eh_idp->id) ==
2102 MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_LEN);
2104 EFX_STATIC_ASSERT(EFX_MAE_RSRC_ID_INVALID ==
2105 MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
2107 if (encp->enc_mae_supported == B_FALSE) {
2112 switch (encap_type) {
2113 case EFX_TUNNEL_PROTOCOL_NONE:
2114 encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_NONE;
2116 case EFX_TUNNEL_PROTOCOL_VXLAN:
2117 encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_VXLAN;
2119 case EFX_TUNNEL_PROTOCOL_GENEVE:
2120 encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_GENEVE;
2122 case EFX_TUNNEL_PROTOCOL_NVGRE:
2123 encap_type_mcdi = MAE_MCDI_ENCAP_TYPE_NVGRE;
2131 MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM_MCDI2) {
2136 req.emr_cmd = MC_CMD_MAE_ENCAP_HEADER_ALLOC;
2137 req.emr_in_buf = payload;
2138 req.emr_in_length = MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LEN(header_size);
2139 req.emr_out_buf = payload;
2140 req.emr_out_length = MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_LEN;
2142 MCDI_IN_SET_DWORD(req,
2143 MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE, encap_type_mcdi);
2145 memcpy(payload + MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_OFST,
2146 header_data, header_size);
2148 efx_mcdi_execute(enp, &req);
2150 if (req.emr_rc != 0) {
2155 if (req.emr_out_length_used < MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_LEN) {
2160 eh_id.id = MCDI_OUT_DWORD(req,
2161 MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID);
2163 if (eh_id.id == EFX_MAE_RSRC_ID_INVALID) {
2168 eh_idp->id = eh_id.id;
2183 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2187 __checkReturn efx_rc_t
2188 efx_mae_encap_header_free(
2189 __in efx_nic_t *enp,
2190 __in const efx_mae_eh_id_t *eh_idp)
2192 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
2194 EFX_MCDI_DECLARE_BUF(payload,
2195 MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LEN(1),
2196 MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LEN(1));
2199 if (encp->enc_mae_supported == B_FALSE) {
2204 req.emr_cmd = MC_CMD_MAE_ENCAP_HEADER_FREE;
2205 req.emr_in_buf = payload;
2206 req.emr_in_length = MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LEN(1);
2207 req.emr_out_buf = payload;
2208 req.emr_out_length = MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LEN(1);
2210 MCDI_IN_SET_DWORD(req, MAE_ENCAP_HEADER_FREE_IN_EH_ID, eh_idp->id);
2212 efx_mcdi_execute(enp, &req);
2214 if (req.emr_rc != 0) {
2219 if (MCDI_OUT_DWORD(req, MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID) !=
2221 /* Firmware failed to remove the encap. header. */
2233 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2237 __checkReturn efx_rc_t
2238 efx_mae_action_set_fill_in_eh_id(
2239 __in efx_mae_actions_t *spec,
2240 __in const efx_mae_eh_id_t *eh_idp)
2244 if ((spec->ema_actions & (1U << EFX_MAE_ACTION_ENCAP)) == 0) {
2246 * The caller has not intended to have action ENCAP originally,
2247 * hence, this attempt to indicate encap. header ID is invalid.
2253 if (spec->ema_rsrc.emar_eh_id.id != EFX_MAE_RSRC_ID_INVALID) {
2254 /* The caller attempts to indicate encap. header ID twice. */
2259 if (eh_idp->id == EFX_MAE_RSRC_ID_INVALID) {
2264 spec->ema_rsrc.emar_eh_id.id = eh_idp->id;
2273 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2277 __checkReturn efx_rc_t
2278 efx_mae_action_set_alloc(
2279 __in efx_nic_t *enp,
2280 __in const efx_mae_actions_t *spec,
2281 __out efx_mae_aset_id_t *aset_idp)
2283 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
2285 EFX_MCDI_DECLARE_BUF(payload,
2286 MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN,
2287 MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
2288 efx_mae_aset_id_t aset_id;
2291 if (encp->enc_mae_supported == B_FALSE) {
2296 req.emr_cmd = MC_CMD_MAE_ACTION_SET_ALLOC;
2297 req.emr_in_buf = payload;
2298 req.emr_in_length = MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN;
2299 req.emr_out_buf = payload;
2300 req.emr_out_length = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN;
2303 * TODO: Remove these EFX_MAE_RSRC_ID_INVALID assignments once the
2304 * corresponding resource types are supported by the implementation.
2305 * Use proper resource ID assignments instead.
2307 MCDI_IN_SET_DWORD(req,
2308 MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID, EFX_MAE_RSRC_ID_INVALID);
2309 MCDI_IN_SET_DWORD(req,
2310 MAE_ACTION_SET_ALLOC_IN_COUNTER_ID, EFX_MAE_RSRC_ID_INVALID);
2312 if ((spec->ema_actions & (1U << EFX_MAE_ACTION_DECAP)) != 0) {
2313 MCDI_IN_SET_DWORD_FIELD(req, MAE_ACTION_SET_ALLOC_IN_FLAGS,
2314 MAE_ACTION_SET_ALLOC_IN_DECAP, 1);
2317 MCDI_IN_SET_DWORD_FIELD(req, MAE_ACTION_SET_ALLOC_IN_FLAGS,
2318 MAE_ACTION_SET_ALLOC_IN_VLAN_POP, spec->ema_n_vlan_tags_to_pop);
2320 if (spec->ema_n_vlan_tags_to_push > 0) {
2321 unsigned int outer_tag_idx;
2323 MCDI_IN_SET_DWORD_FIELD(req, MAE_ACTION_SET_ALLOC_IN_FLAGS,
2324 MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH,
2325 spec->ema_n_vlan_tags_to_push);
2327 if (spec->ema_n_vlan_tags_to_push ==
2328 EFX_MAE_VLAN_PUSH_MAX_NTAGS) {
2329 MCDI_IN_SET_WORD(req,
2330 MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE,
2331 spec->ema_vlan_push_descs[0].emavp_tpid_be);
2332 MCDI_IN_SET_WORD(req,
2333 MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE,
2334 spec->ema_vlan_push_descs[0].emavp_tci_be);
2337 outer_tag_idx = spec->ema_n_vlan_tags_to_push - 1;
2339 MCDI_IN_SET_WORD(req, MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE,
2340 spec->ema_vlan_push_descs[outer_tag_idx].emavp_tpid_be);
2341 MCDI_IN_SET_WORD(req, MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE,
2342 spec->ema_vlan_push_descs[outer_tag_idx].emavp_tci_be);
2345 MCDI_IN_SET_DWORD(req, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
2346 spec->ema_rsrc.emar_eh_id.id);
2348 if ((spec->ema_actions & (1U << EFX_MAE_ACTION_FLAG)) != 0) {
2349 MCDI_IN_SET_DWORD_FIELD(req, MAE_ACTION_SET_ALLOC_IN_FLAGS,
2350 MAE_ACTION_SET_ALLOC_IN_FLAG, 1);
2353 if ((spec->ema_actions & (1U << EFX_MAE_ACTION_MARK)) != 0) {
2354 MCDI_IN_SET_DWORD_FIELD(req, MAE_ACTION_SET_ALLOC_IN_FLAGS,
2355 MAE_ACTION_SET_ALLOC_IN_MARK, 1);
2357 MCDI_IN_SET_DWORD(req,
2358 MAE_ACTION_SET_ALLOC_IN_MARK_VALUE, spec->ema_mark_value);
2361 MCDI_IN_SET_DWORD(req,
2362 MAE_ACTION_SET_ALLOC_IN_DELIVER, spec->ema_deliver_mport.sel);
2364 MCDI_IN_SET_DWORD(req, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
2365 MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
2366 MCDI_IN_SET_DWORD(req, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
2367 MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
2369 efx_mcdi_execute(enp, &req);
2371 if (req.emr_rc != 0) {
2376 if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN) {
2381 aset_id.id = MCDI_OUT_DWORD(req, MAE_ACTION_SET_ALLOC_OUT_AS_ID);
2382 if (aset_id.id == EFX_MAE_RSRC_ID_INVALID) {
2387 aset_idp->id = aset_id.id;
2398 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2402 __checkReturn efx_rc_t
2403 efx_mae_counters_alloc(
2404 __in efx_nic_t *enp,
2405 __in uint32_t n_counters,
2406 __out uint32_t *n_allocatedp,
2407 __out_ecount(n_counters) efx_counter_t *countersp,
2408 __out_opt uint32_t *gen_countp)
2410 EFX_MCDI_DECLARE_BUF(payload,
2411 MC_CMD_MAE_COUNTER_ALLOC_IN_LEN,
2412 MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX_MCDI2);
2413 efx_mae_t *maep = enp->en_maep;
2414 uint32_t n_allocated;
2419 if (n_counters > maep->em_max_ncounters ||
2420 n_counters < MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM ||
2421 n_counters > MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2) {
2426 req.emr_cmd = MC_CMD_MAE_COUNTER_ALLOC;
2427 req.emr_in_buf = payload;
2428 req.emr_in_length = MC_CMD_MAE_COUNTER_ALLOC_IN_LEN;
2429 req.emr_out_buf = payload;
2430 req.emr_out_length = MC_CMD_MAE_COUNTER_ALLOC_OUT_LEN(n_counters);
2432 MCDI_IN_SET_DWORD(req, MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT,
2435 efx_mcdi_execute(enp, &req);
2437 if (req.emr_rc != 0) {
2442 if (req.emr_out_length_used < MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMIN) {
2447 n_allocated = MCDI_OUT_DWORD(req,
2448 MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT);
2449 if (n_allocated < MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM) {
2454 for (i = 0; i < n_allocated; i++) {
2455 countersp[i].id = MCDI_OUT_INDEXED_DWORD(req,
2456 MAE_COUNTER_ALLOC_OUT_COUNTER_ID, i);
2459 if (gen_countp != NULL) {
2460 *gen_countp = MCDI_OUT_DWORD(req,
2461 MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT);
2464 *n_allocatedp = n_allocated;
2475 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2480 __checkReturn efx_rc_t
2481 efx_mae_counters_free(
2482 __in efx_nic_t *enp,
2483 __in uint32_t n_counters,
2484 __out uint32_t *n_freedp,
2485 __in_ecount(n_counters) const efx_counter_t *countersp,
2486 __out_opt uint32_t *gen_countp)
2488 EFX_MCDI_DECLARE_BUF(payload,
2489 MC_CMD_MAE_COUNTER_FREE_IN_LENMAX_MCDI2,
2490 MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX_MCDI2);
2491 efx_mae_t *maep = enp->en_maep;
2497 if (n_counters > maep->em_max_ncounters ||
2498 n_counters < MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MINNUM ||
2500 MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM_MCDI2) {
2505 req.emr_cmd = MC_CMD_MAE_COUNTER_FREE;
2506 req.emr_in_buf = payload;
2507 req.emr_in_length = MC_CMD_MAE_COUNTER_FREE_IN_LEN(n_counters);
2508 req.emr_out_buf = payload;
2509 req.emr_out_length = MC_CMD_MAE_COUNTER_FREE_OUT_LEN(n_counters);
2511 for (i = 0; i < n_counters; i++) {
2512 MCDI_IN_SET_INDEXED_DWORD(req,
2513 MAE_COUNTER_FREE_IN_FREE_COUNTER_ID, i, countersp[i].id);
2515 MCDI_IN_SET_DWORD(req, MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT,
2518 efx_mcdi_execute(enp, &req);
2520 if (req.emr_rc != 0) {
2525 if (req.emr_out_length_used < MC_CMD_MAE_COUNTER_FREE_OUT_LENMIN) {
2530 n_freed = MCDI_OUT_DWORD(req, MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT);
2532 if (n_freed < MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MINNUM) {
2537 if (gen_countp != NULL) {
2538 *gen_countp = MCDI_OUT_DWORD(req,
2539 MAE_COUNTER_FREE_OUT_GENERATION_COUNT);
2542 *n_freedp = n_freed;
2553 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2557 __checkReturn efx_rc_t
2558 efx_mae_action_set_free(
2559 __in efx_nic_t *enp,
2560 __in const efx_mae_aset_id_t *aset_idp)
2562 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
2564 EFX_MCDI_DECLARE_BUF(payload,
2565 MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1),
2566 MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1));
2569 if (encp->enc_mae_supported == B_FALSE) {
2574 req.emr_cmd = MC_CMD_MAE_ACTION_SET_FREE;
2575 req.emr_in_buf = payload;
2576 req.emr_in_length = MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1);
2577 req.emr_out_buf = payload;
2578 req.emr_out_length = MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1);
2580 MCDI_IN_SET_DWORD(req, MAE_ACTION_SET_FREE_IN_AS_ID, aset_idp->id);
2582 efx_mcdi_execute(enp, &req);
2584 if (req.emr_rc != 0) {
2589 if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMIN) {
2594 if (MCDI_OUT_DWORD(req, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) !=
2596 /* Firmware failed to free the action set. */
2610 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2614 __checkReturn efx_rc_t
2615 efx_mae_action_rule_insert(
2616 __in efx_nic_t *enp,
2617 __in const efx_mae_match_spec_t *spec,
2618 __in const efx_mae_aset_list_id_t *asl_idp,
2619 __in const efx_mae_aset_id_t *as_idp,
2620 __out efx_mae_rule_id_t *ar_idp)
2622 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
2624 EFX_MCDI_DECLARE_BUF(payload,
2625 MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX_MCDI2,
2626 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
2627 efx_oword_t *rule_response;
2628 efx_mae_rule_id_t ar_id;
2632 EFX_STATIC_ASSERT(sizeof (ar_idp->id) ==
2633 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_LEN);
2635 EFX_STATIC_ASSERT(EFX_MAE_RSRC_ID_INVALID ==
2636 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
2638 if (encp->enc_mae_supported == B_FALSE) {
2643 if (spec->emms_type != EFX_MAE_RULE_ACTION ||
2644 (asl_idp != NULL && as_idp != NULL) ||
2645 (asl_idp == NULL && as_idp == NULL)) {
2650 req.emr_cmd = MC_CMD_MAE_ACTION_RULE_INSERT;
2651 req.emr_in_buf = payload;
2652 req.emr_in_length = MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX_MCDI2;
2653 req.emr_out_buf = payload;
2654 req.emr_out_length = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN;
2656 EFX_STATIC_ASSERT(sizeof (*rule_response) <=
2657 MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_LEN);
2658 offset = MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_OFST;
2659 rule_response = (efx_oword_t *)(payload + offset);
2660 EFX_POPULATE_OWORD_3(*rule_response,
2661 MAE_ACTION_RULE_RESPONSE_ASL_ID,
2662 (asl_idp != NULL) ? asl_idp->id : EFX_MAE_RSRC_ID_INVALID,
2663 MAE_ACTION_RULE_RESPONSE_AS_ID,
2664 (as_idp != NULL) ? as_idp->id : EFX_MAE_RSRC_ID_INVALID,
2665 MAE_ACTION_RULE_RESPONSE_COUNTER_ID, EFX_MAE_RSRC_ID_INVALID);
2667 MCDI_IN_SET_DWORD(req, MAE_ACTION_RULE_INSERT_IN_PRIO, spec->emms_prio);
2670 * Mask-value pairs have been stored in the byte order needed for the
2671 * MCDI request and are thus safe to be copied directly to the buffer.
2673 EFX_STATIC_ASSERT(sizeof (spec->emms_mask_value_pairs.action) >=
2674 MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN);
2675 offset = MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_OFST;
2676 memcpy(payload + offset, spec->emms_mask_value_pairs.action,
2677 MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN);
2679 efx_mcdi_execute(enp, &req);
2681 if (req.emr_rc != 0) {
2686 if (req.emr_out_length_used < MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN) {
2691 ar_id.id = MCDI_OUT_DWORD(req, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
2692 if (ar_id.id == EFX_MAE_RSRC_ID_INVALID) {
2697 ar_idp->id = ar_id.id;
2710 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2714 __checkReturn efx_rc_t
2715 efx_mae_action_rule_remove(
2716 __in efx_nic_t *enp,
2717 __in const efx_mae_rule_id_t *ar_idp)
2719 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
2721 EFX_MCDI_DECLARE_BUF(payload,
2722 MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1),
2723 MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1));
2726 if (encp->enc_mae_supported == B_FALSE) {
2731 req.emr_cmd = MC_CMD_MAE_ACTION_RULE_DELETE;
2732 req.emr_in_buf = payload;
2733 req.emr_in_length = MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1);
2734 req.emr_out_buf = payload;
2735 req.emr_out_length = MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1);
2737 MCDI_IN_SET_DWORD(req, MAE_ACTION_RULE_DELETE_IN_AR_ID, ar_idp->id);
2739 efx_mcdi_execute(enp, &req);
2741 if (req.emr_rc != 0) {
2746 if (req.emr_out_length_used <
2747 MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMIN) {
2752 if (MCDI_OUT_DWORD(req, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) !=
2754 /* Firmware failed to delete the action rule. */
2768 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2772 #endif /* EFSYS_OPT_MAE */