1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
15 #include <rte_spinlock.h>
19 #include "sfc_stats.h"
25 /** FW-allocatable resource context */
26 struct sfc_mae_fw_rsrc {
30 efx_mae_aset_id_t aset_id;
31 efx_mae_rule_id_t rule_id;
32 efx_mae_eh_id_t eh_id;
36 /** Outer rule registry entry */
37 struct sfc_mae_outer_rule {
38 TAILQ_ENTRY(sfc_mae_outer_rule) entries;
40 efx_mae_match_spec_t *match_spec;
41 efx_tunnel_protocol_t encap_type;
42 struct sfc_mae_fw_rsrc fw_rsrc;
45 TAILQ_HEAD(sfc_mae_outer_rules, sfc_mae_outer_rule);
47 /** Encap. header registry entry */
48 struct sfc_mae_encap_header {
49 TAILQ_ENTRY(sfc_mae_encap_header) entries;
53 efx_tunnel_protocol_t type;
54 struct sfc_mae_fw_rsrc fw_rsrc;
57 TAILQ_HEAD(sfc_mae_encap_headers, sfc_mae_encap_header);
60 struct sfc_mae_counter_id {
61 /* ID of a counter in MAE */
63 /* ID of a counter in RTE */
65 /* RTE counter ID validity status */
68 /* Flow Tunnel (FT) GROUP hit counter (or NULL) */
69 uint64_t *ft_group_hit_counter;
70 /* Flow Tunnel (FT) context (for JUMP rules; otherwise, NULL) */
71 struct sfc_flow_tunnel *ft;
74 /** Action set registry entry */
75 struct sfc_mae_action_set {
76 TAILQ_ENTRY(sfc_mae_action_set) entries;
78 struct sfc_mae_counter_id *counters;
80 efx_mae_actions_t *spec;
81 struct sfc_mae_encap_header *encap_header;
82 struct sfc_mae_fw_rsrc fw_rsrc;
85 TAILQ_HEAD(sfc_mae_action_sets, sfc_mae_action_set);
87 /** Options for MAE support status */
89 SFC_MAE_STATUS_UNKNOWN = 0,
90 SFC_MAE_STATUS_UNSUPPORTED,
91 SFC_MAE_STATUS_SUPPORTED,
96 * Encap. header bounce buffer. It is used to store header data
97 * when parsing the header definition in the action VXLAN_ENCAP.
99 struct sfc_mae_bounce_eh {
103 efx_tunnel_protocol_t type;
106 /** Counter collection entry */
107 struct sfc_mae_counter {
109 uint32_t generation_count;
110 union sfc_pkts_bytes value;
111 union sfc_pkts_bytes reset;
113 uint64_t *ft_group_hit_counter;
116 struct sfc_mae_counters_xstats {
117 uint64_t not_inuse_update;
118 uint64_t realloc_update;
121 struct sfc_mae_counters {
122 /** An array of all MAE counters */
123 struct sfc_mae_counter *mae_counters;
124 /** Extra statistics for counters */
125 struct sfc_mae_counters_xstats xstats;
126 /** Count of all MAE counters */
127 unsigned int n_mae_counters;
130 /** Options for MAE counter polling mode */
131 enum sfc_mae_counter_polling_mode {
132 SFC_MAE_COUNTER_POLLING_OFF = 0,
133 SFC_MAE_COUNTER_POLLING_SERVICE,
134 SFC_MAE_COUNTER_POLLING_THREAD,
137 struct sfc_mae_counter_registry {
138 /* Common counter information */
139 /** Counters collection */
140 struct sfc_mae_counters counters;
142 /* Information used by counter update service */
143 /** Callback to get packets from RxQ */
144 eth_rx_burst_t rx_pkt_burst;
145 /** Data for the callback to get packets */
146 struct sfc_dp_rxq *rx_dp;
147 /** Number of buffers pushed to the RxQ */
148 unsigned int pushed_n_buffers;
149 /** Are credits used by counter stream */
152 /* Information used by configuration routines */
153 enum sfc_mae_counter_polling_mode polling_mode;
156 /** Counter service core ID */
158 /** Counter service ID */
162 /** Counter thread ID */
164 /** The thread should keep running */
171 * MAE rules used to capture traffic generated by VFs and direct it to
172 * representors (one for each VF).
174 #define SFC_MAE_NB_REPR_RULES_MAX (64)
176 /** Rules to forward traffic from PHY port to PF and from PF to PHY port */
177 #define SFC_MAE_NB_SWITCHDEV_RULES (2)
178 /** Maximum required internal MAE rules */
179 #define SFC_MAE_NB_RULES_MAX (SFC_MAE_NB_SWITCHDEV_RULES + \
180 SFC_MAE_NB_REPR_RULES_MAX)
182 struct sfc_mae_rule {
183 efx_mae_match_spec_t *spec;
184 efx_mae_actions_t *actions;
185 efx_mae_aset_id_t action_set;
186 efx_mae_rule_id_t rule_id;
189 struct sfc_mae_internal_rules {
191 * Rules required to sustain switchdev mode or to provide
192 * port representor functionality.
194 struct sfc_mae_rule rules[SFC_MAE_NB_RULES_MAX];
198 /** Assigned switch domain identifier */
199 uint16_t switch_domain_id;
200 /** Assigned switch port identifier */
201 uint16_t switch_port_id;
202 /** NIC support for MAE status */
203 enum sfc_mae_status status;
204 /** Priority level limit for MAE outer rules */
205 unsigned int nb_outer_rule_prios_max;
206 /** Priority level limit for MAE action rules */
207 unsigned int nb_action_rule_prios_max;
208 /** Encapsulation support status */
209 uint32_t encap_types_supported;
210 /** Outer rule registry */
211 struct sfc_mae_outer_rules outer_rules;
212 /** Encap. header registry */
213 struct sfc_mae_encap_headers encap_headers;
214 /** Action set registry */
215 struct sfc_mae_action_sets action_sets;
216 /** Encap. header bounce buffer */
217 struct sfc_mae_bounce_eh bounce_eh;
218 /** Flag indicating whether counter-only RxQ is running */
219 bool counter_rxq_running;
220 /** Counter registry */
221 struct sfc_mae_counter_registry counter_registry;
222 /** Driver-internal flow rules */
223 struct sfc_mae_internal_rules internal_rules;
225 * Switchdev default rules. They forward traffic from PHY port
226 * to PF and vice versa.
228 struct sfc_mae_rule *switchdev_rule_pf_to_ext;
229 struct sfc_mae_rule *switchdev_rule_ext_to_pf;
233 struct sfc_flow_spec;
235 /** This implementation supports double-tagging */
236 #define SFC_MAE_MATCH_VLAN_MAX_NTAGS (2)
238 /** It is possible to keep track of one item ETH and two items VLAN */
239 #define SFC_MAE_L2_MAX_NITEMS (SFC_MAE_MATCH_VLAN_MAX_NTAGS + 1)
241 /** Auxiliary entry format to keep track of L2 "type" ("inner_type") */
242 struct sfc_mae_ethertype {
247 struct sfc_mae_pattern_data {
249 * Keeps track of "type" ("inner_type") mask and value for each
250 * parsed L2 item in a pattern. These values/masks get filled
251 * in MAE match specification at the end of parsing. Also, this
252 * information is used to conduct consistency checks:
254 * - If an item ETH is followed by a single item VLAN,
255 * the former must have "type" set to one of supported
256 * TPID values (0x8100, 0x88a8, 0x9100, 0x9200, 0x9300),
259 * - If an item ETH is followed by two items VLAN, the
260 * item ETH must have "type" set to one of supported TPID
261 * values (0x88a8, 0x9100, 0x9200, 0x9300), or 0x0000/0x0000,
262 * and the outermost VLAN item must have "inner_type" set
263 * to TPID value 0x8100, or 0x0000/0x0000
265 * - If a L2 item is followed by a L3 one, the former must
266 * indicate "type" ("inner_type") which corresponds to
267 * the protocol used in the L3 item, or 0x0000/0x0000.
269 * In turn, mapping between RTE convention (above requirements) and
270 * MAE fields is non-trivial. The following scheme indicates
271 * which item EtherTypes go to which MAE fields in the case
274 * ETH (0x8100) --> VLAN0_PROTO_BE
275 * VLAN (L3 EtherType) --> ETHER_TYPE_BE
277 * Similarly, in the case of double tagging:
279 * ETH (0x88a8) --> VLAN0_PROTO_BE
280 * VLAN (0x8100) --> VLAN1_PROTO_BE
281 * VLAN (L3 EtherType) --> ETHER_TYPE_BE
283 struct sfc_mae_ethertype ethertypes[SFC_MAE_L2_MAX_NITEMS];
285 rte_be16_t tci_masks[SFC_MAE_MATCH_VLAN_MAX_NTAGS];
287 unsigned int nb_vlan_tags;
290 * L3 requirement for the innermost L2 item's "type" ("inner_type").
291 * This contains one of:
292 * - 0x0800/0xffff: IPV4
293 * - 0x86dd/0xffff: IPV6
294 * - 0x0000/0x0000: no L3 item
296 struct sfc_mae_ethertype innermost_ethertype_restriction;
299 * The following two fields keep track of L3 "proto" mask and value.
300 * The corresponding fields get filled in MAE match specification
301 * at the end of parsing. Also, the information is used by a
302 * post-check to enforce consistency requirements:
304 * - If a L3 item is followed by an item TCP, the former has
305 * its "proto" set to either 0x06/0xff or 0x00/0x00.
307 * - If a L3 item is followed by an item UDP, the former has
308 * its "proto" set to either 0x11/0xff or 0x00/0x00.
310 uint8_t l3_next_proto_value;
311 uint8_t l3_next_proto_mask;
314 * L4 requirement for L3 item's "proto".
315 * This contains one of:
318 * - 0x00/0x00: no L4 item
320 uint8_t l3_next_proto_restriction_value;
321 uint8_t l3_next_proto_restriction_mask;
323 /* Projected state of EFX_MAE_FIELD_HAS_OVLAN match bit */
324 bool has_ovlan_value;
327 /* Projected state of EFX_MAE_FIELD_HAS_IVLAN match bit */
328 bool has_ivlan_value;
332 struct sfc_mae_parse_ctx {
333 struct sfc_adapter *sa;
334 efx_mae_match_spec_t *match_spec_action;
335 efx_mae_match_spec_t *match_spec_outer;
337 * This points to either of the above two specifications depending
338 * on which part of the pattern is being parsed (outer / inner).
340 efx_mae_match_spec_t *match_spec;
342 * This points to either "field_ids_remap_to_encap"
343 * or "field_ids_no_remap" (see sfc_mae.c) depending on
344 * which part of the pattern is being parsed.
346 const efx_mae_field_id_t *field_ids_remap;
347 /* These two fields correspond to the tunnel-specific default mask. */
348 size_t tunnel_def_mask_size;
349 const void *tunnel_def_mask;
350 bool match_mport_set;
351 enum sfc_flow_tunnel_rule_type ft_rule_type;
352 struct sfc_mae_pattern_data pattern_data;
353 efx_tunnel_protocol_t encap_type;
354 const struct rte_flow_item *pattern;
355 unsigned int priority;
356 struct sfc_flow_tunnel *ft;
359 int sfc_mae_attach(struct sfc_adapter *sa);
360 void sfc_mae_detach(struct sfc_adapter *sa);
361 sfc_flow_cleanup_cb_t sfc_mae_flow_cleanup;
362 int sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
363 const struct rte_flow_item pattern[],
364 struct sfc_flow_spec_mae *spec,
365 struct rte_flow_error *error);
366 int sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
367 const struct rte_flow_action actions[],
368 struct sfc_flow_spec_mae *spec_mae,
369 struct rte_flow_error *error);
370 sfc_flow_verify_cb_t sfc_mae_flow_verify;
371 sfc_flow_insert_cb_t sfc_mae_flow_insert;
372 sfc_flow_remove_cb_t sfc_mae_flow_remove;
373 sfc_flow_query_cb_t sfc_mae_flow_query;
376 * The value used to represent the lowest priority.
377 * Used in MAE rule API.
379 #define SFC_MAE_RULE_PRIO_LOWEST (-1)
382 * Insert a driver-internal flow rule that matches traffic originating from
383 * some m-port selector and redirects it to another one
384 * (eg. PF --> PHY, PHY --> PF).
386 * If requested priority is negative, use the lowest priority.
388 int sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
389 const efx_mport_sel_t *mport_match,
390 const efx_mport_sel_t *mport_deliver,
391 int prio, struct sfc_mae_rule **rulep);
392 void sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule);
393 int sfc_mae_switchdev_init(struct sfc_adapter *sa);
394 void sfc_mae_switchdev_fini(struct sfc_adapter *sa);
399 #endif /* _SFC_MAE_H */