1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
15 #include <rte_spinlock.h>
19 #include "sfc_stats.h"
25 /** FW-allocatable resource context */
26 struct sfc_mae_fw_rsrc {
30 efx_mae_aset_id_t aset_id;
31 efx_mae_rule_id_t rule_id;
32 efx_mae_mac_id_t mac_id;
33 efx_mae_eh_id_t eh_id;
37 /** Outer rule registry entry */
38 struct sfc_mae_outer_rule {
39 TAILQ_ENTRY(sfc_mae_outer_rule) entries;
41 efx_mae_match_spec_t *match_spec;
42 efx_tunnel_protocol_t encap_type;
43 struct sfc_mae_fw_rsrc fw_rsrc;
46 TAILQ_HEAD(sfc_mae_outer_rules, sfc_mae_outer_rule);
48 /** MAC address registry entry */
49 struct sfc_mae_mac_addr {
50 TAILQ_ENTRY(sfc_mae_mac_addr) entries;
52 uint8_t addr_bytes[EFX_MAC_ADDR_LEN];
53 struct sfc_mae_fw_rsrc fw_rsrc;
56 TAILQ_HEAD(sfc_mae_mac_addrs, sfc_mae_mac_addr);
58 /** Encap. header registry entry */
59 struct sfc_mae_encap_header {
60 TAILQ_ENTRY(sfc_mae_encap_header) entries;
64 efx_tunnel_protocol_t type;
65 struct sfc_mae_fw_rsrc fw_rsrc;
68 TAILQ_HEAD(sfc_mae_encap_headers, sfc_mae_encap_header);
71 struct sfc_mae_counter_id {
72 /* ID of a counter in MAE */
74 /* ID of a counter in RTE */
76 /* RTE counter ID validity status */
79 /* Flow Tunnel (FT) SWITCH hit counter (or NULL) */
80 uint64_t *ft_switch_hit_counter;
81 /* Flow Tunnel (FT) context (for TUNNEL rules; otherwise, NULL) */
82 struct sfc_ft_ctx *ft_ctx;
85 /** Action set registry entry */
86 struct sfc_mae_action_set {
87 TAILQ_ENTRY(sfc_mae_action_set) entries;
89 struct sfc_mae_counter_id *counters;
91 efx_mae_actions_t *spec;
92 struct sfc_mae_encap_header *encap_header;
93 struct sfc_mae_mac_addr *dst_mac_addr;
94 struct sfc_mae_mac_addr *src_mac_addr;
95 struct sfc_mae_fw_rsrc fw_rsrc;
98 TAILQ_HEAD(sfc_mae_action_sets, sfc_mae_action_set);
100 /** Options for MAE support status */
101 enum sfc_mae_status {
102 SFC_MAE_STATUS_UNKNOWN = 0,
103 SFC_MAE_STATUS_UNSUPPORTED,
104 SFC_MAE_STATUS_SUPPORTED,
105 SFC_MAE_STATUS_ADMIN,
109 * Encap. header bounce buffer. It is used to store header data
110 * when parsing the header definition in the action VXLAN_ENCAP.
112 struct sfc_mae_bounce_eh {
116 efx_tunnel_protocol_t type;
119 /** Counter collection entry */
120 struct sfc_mae_counter {
122 uint32_t generation_count;
123 union sfc_pkts_bytes value;
124 union sfc_pkts_bytes reset;
126 uint64_t *ft_switch_hit_counter;
129 struct sfc_mae_counters_xstats {
130 uint64_t not_inuse_update;
131 uint64_t realloc_update;
134 struct sfc_mae_counters {
135 /** An array of all MAE counters */
136 struct sfc_mae_counter *mae_counters;
137 /** Extra statistics for counters */
138 struct sfc_mae_counters_xstats xstats;
139 /** Count of all MAE counters */
140 unsigned int n_mae_counters;
143 /** Options for MAE counter polling mode */
144 enum sfc_mae_counter_polling_mode {
145 SFC_MAE_COUNTER_POLLING_OFF = 0,
146 SFC_MAE_COUNTER_POLLING_SERVICE,
147 SFC_MAE_COUNTER_POLLING_THREAD,
150 struct sfc_mae_counter_registry {
151 /* Common counter information */
152 /** Counters collection */
153 struct sfc_mae_counters counters;
155 /* Information used by counter update service */
156 /** Callback to get packets from RxQ */
157 eth_rx_burst_t rx_pkt_burst;
158 /** Data for the callback to get packets */
159 struct sfc_dp_rxq *rx_dp;
160 /** Number of buffers pushed to the RxQ */
161 unsigned int pushed_n_buffers;
162 /** Are credits used by counter stream */
165 /* Information used by configuration routines */
166 enum sfc_mae_counter_polling_mode polling_mode;
169 /** Counter service core ID */
171 /** Counter service ID */
175 /** Counter thread ID */
177 /** The thread should keep running */
184 * MAE rules used to capture traffic generated by VFs and direct it to
185 * representors (one for each VF).
187 #define SFC_MAE_NB_REPR_RULES_MAX (64)
189 /** Rules to forward traffic from PHY port to PF and from PF to PHY port */
190 #define SFC_MAE_NB_SWITCHDEV_RULES (2)
191 /** Maximum required internal MAE rules */
192 #define SFC_MAE_NB_RULES_MAX (SFC_MAE_NB_SWITCHDEV_RULES + \
193 SFC_MAE_NB_REPR_RULES_MAX)
195 struct sfc_mae_rule {
196 efx_mae_match_spec_t *spec;
197 efx_mae_actions_t *actions;
198 efx_mae_aset_id_t action_set;
199 efx_mae_rule_id_t rule_id;
202 struct sfc_mae_internal_rules {
204 * Rules required to sustain switchdev mode or to provide
205 * port representor functionality.
207 struct sfc_mae_rule rules[SFC_MAE_NB_RULES_MAX];
211 /** Assigned switch domain identifier */
212 uint16_t switch_domain_id;
213 /** Assigned switch port identifier */
214 uint16_t switch_port_id;
215 /** NIC support for MAE status */
216 enum sfc_mae_status status;
217 /** Priority level limit for MAE outer rules */
218 unsigned int nb_outer_rule_prios_max;
219 /** Priority level limit for MAE action rules */
220 unsigned int nb_action_rule_prios_max;
221 /** Encapsulation support status */
222 uint32_t encap_types_supported;
223 /** Outer rule registry */
224 struct sfc_mae_outer_rules outer_rules;
225 /** Encap. header registry */
226 struct sfc_mae_encap_headers encap_headers;
227 /** MAC address registry */
228 struct sfc_mae_mac_addrs mac_addrs;
229 /** Action set registry */
230 struct sfc_mae_action_sets action_sets;
231 /** Encap. header bounce buffer */
232 struct sfc_mae_bounce_eh bounce_eh;
233 /** Flag indicating whether counter-only RxQ is running */
234 bool counter_rxq_running;
235 /** Counter registry */
236 struct sfc_mae_counter_registry counter_registry;
237 /** Driver-internal flow rules */
238 struct sfc_mae_internal_rules internal_rules;
240 * Switchdev default rules. They forward traffic from PHY port
241 * to PF and vice versa.
243 struct sfc_mae_rule *switchdev_rule_pf_to_ext;
244 struct sfc_mae_rule *switchdev_rule_ext_to_pf;
248 struct sfc_flow_spec;
250 /** This implementation supports double-tagging */
251 #define SFC_MAE_MATCH_VLAN_MAX_NTAGS (2)
253 /** It is possible to keep track of one item ETH and two items VLAN */
254 #define SFC_MAE_L2_MAX_NITEMS (SFC_MAE_MATCH_VLAN_MAX_NTAGS + 1)
256 /** Auxiliary entry format to keep track of L2 "type" ("inner_type") */
257 struct sfc_mae_ethertype {
262 struct sfc_mae_pattern_data {
264 * Keeps track of "type" ("inner_type") mask and value for each
265 * parsed L2 item in a pattern. These values/masks get filled
266 * in MAE match specification at the end of parsing. Also, this
267 * information is used to conduct consistency checks:
269 * - If an item ETH is followed by a single item VLAN,
270 * the former must have "type" set to one of supported
271 * TPID values (0x8100, 0x88a8, 0x9100, 0x9200, 0x9300),
274 * - If an item ETH is followed by two items VLAN, the
275 * item ETH must have "type" set to one of supported TPID
276 * values (0x88a8, 0x9100, 0x9200, 0x9300), or 0x0000/0x0000,
277 * and the outermost VLAN item must have "inner_type" set
278 * to TPID value 0x8100, or 0x0000/0x0000
280 * - If a L2 item is followed by a L3 one, the former must
281 * indicate "type" ("inner_type") which corresponds to
282 * the protocol used in the L3 item, or 0x0000/0x0000.
284 * In turn, mapping between RTE convention (above requirements) and
285 * MAE fields is non-trivial. The following scheme indicates
286 * which item EtherTypes go to which MAE fields in the case
289 * ETH (0x8100) --> VLAN0_PROTO_BE
290 * VLAN (L3 EtherType) --> ETHER_TYPE_BE
292 * Similarly, in the case of double tagging:
294 * ETH (0x88a8) --> VLAN0_PROTO_BE
295 * VLAN (0x8100) --> VLAN1_PROTO_BE
296 * VLAN (L3 EtherType) --> ETHER_TYPE_BE
298 struct sfc_mae_ethertype ethertypes[SFC_MAE_L2_MAX_NITEMS];
300 rte_be16_t tci_masks[SFC_MAE_MATCH_VLAN_MAX_NTAGS];
302 unsigned int nb_vlan_tags;
305 * L3 requirement for the innermost L2 item's "type" ("inner_type").
306 * This contains one of:
307 * - 0x0800/0xffff: IPV4
308 * - 0x86dd/0xffff: IPV6
309 * - 0x0000/0x0000: no L3 item
311 struct sfc_mae_ethertype innermost_ethertype_restriction;
314 * The following two fields keep track of L3 "proto" mask and value.
315 * The corresponding fields get filled in MAE match specification
316 * at the end of parsing. Also, the information is used by a
317 * post-check to enforce consistency requirements:
319 * - If a L3 item is followed by an item TCP, the former has
320 * its "proto" set to either 0x06/0xff or 0x00/0x00.
322 * - If a L3 item is followed by an item UDP, the former has
323 * its "proto" set to either 0x11/0xff or 0x00/0x00.
325 uint8_t l3_next_proto_value;
326 uint8_t l3_next_proto_mask;
329 * L4 requirement for L3 item's "proto".
330 * This contains one of:
333 * - 0x00/0x00: no L4 item
335 uint8_t l3_next_proto_restriction_value;
336 uint8_t l3_next_proto_restriction_mask;
338 /* Projected state of EFX_MAE_FIELD_HAS_OVLAN match bit */
339 bool has_ovlan_value;
342 /* Projected state of EFX_MAE_FIELD_HAS_IVLAN match bit */
343 bool has_ivlan_value;
347 struct sfc_mae_parse_ctx {
348 struct sfc_adapter *sa;
349 efx_mae_match_spec_t *match_spec_action;
350 efx_mae_match_spec_t *match_spec_outer;
352 * This points to either of the above two specifications depending
353 * on which part of the pattern is being parsed (outer / inner).
355 efx_mae_match_spec_t *match_spec;
357 * This points to either "field_ids_remap_to_encap"
358 * or "field_ids_no_remap" (see sfc_mae.c) depending on
359 * which part of the pattern is being parsed.
361 const efx_mae_field_id_t *field_ids_remap;
362 /* These two fields correspond to the tunnel-specific default mask. */
363 size_t tunnel_def_mask_size;
364 const void *tunnel_def_mask;
365 bool match_mport_set;
366 enum sfc_ft_rule_type ft_rule_type;
367 struct sfc_mae_pattern_data pattern_data;
368 efx_tunnel_protocol_t encap_type;
369 const struct rte_flow_item *pattern;
370 unsigned int priority;
371 struct sfc_ft_ctx *ft_ctx;
374 int sfc_mae_attach(struct sfc_adapter *sa);
375 void sfc_mae_detach(struct sfc_adapter *sa);
376 sfc_flow_cleanup_cb_t sfc_mae_flow_cleanup;
377 int sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
378 const struct rte_flow_item pattern[],
379 struct sfc_flow_spec_mae *spec,
380 struct rte_flow_error *error);
381 int sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
382 const struct rte_flow_action actions[],
383 struct sfc_flow_spec_mae *spec_mae,
384 struct rte_flow_error *error);
385 sfc_flow_verify_cb_t sfc_mae_flow_verify;
386 sfc_flow_insert_cb_t sfc_mae_flow_insert;
387 sfc_flow_remove_cb_t sfc_mae_flow_remove;
388 sfc_flow_query_cb_t sfc_mae_flow_query;
391 * The value used to represent the lowest priority.
392 * Used in MAE rule API.
394 #define SFC_MAE_RULE_PRIO_LOWEST (-1)
397 * Insert a driver-internal flow rule that matches traffic originating from
398 * some m-port selector and redirects it to another one
399 * (eg. PF --> PHY, PHY --> PF).
401 * If requested priority is negative, use the lowest priority.
403 int sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
404 const efx_mport_sel_t *mport_match,
405 const efx_mport_sel_t *mport_deliver,
406 int prio, struct sfc_mae_rule **rulep);
407 void sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule);
408 int sfc_mae_switchdev_init(struct sfc_adapter *sa);
409 void sfc_mae_switchdev_fini(struct sfc_adapter *sa);
414 #endif /* _SFC_MAE_H */