X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_mae.h;h=d835056aef80bddfc435b885607f2f602de884ce;hb=9b14dc746141bcd3ed2769ea745cc0cca85a9bb0;hp=53ddead979e480a61164612951ce423640968033;hpb=dadff137931c27aa90aafc6783b6e9eb535b1117;p=dpdk.git diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h index 53ddead979..d835056aef 100644 --- a/drivers/net/sfc/sfc_mae.h +++ b/drivers/net/sfc/sfc_mae.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2019-2021 Xilinx, Inc. * Copyright(c) 2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract @@ -16,6 +16,8 @@ #include "efx.h" +#include "sfc_stats.h" + #ifdef __cplusplus extern "C" { #endif @@ -27,6 +29,7 @@ struct sfc_mae_fw_rsrc { union { efx_mae_aset_id_t aset_id; efx_mae_rule_id_t rule_id; + efx_mae_eh_id_t eh_id; }; }; @@ -41,11 +44,34 @@ struct sfc_mae_outer_rule { TAILQ_HEAD(sfc_mae_outer_rules, sfc_mae_outer_rule); +/** Encap. header registry entry */ +struct sfc_mae_encap_header { + TAILQ_ENTRY(sfc_mae_encap_header) entries; + unsigned int refcnt; + uint8_t *buf; + size_t size; + efx_tunnel_protocol_t type; + struct sfc_mae_fw_rsrc fw_rsrc; +}; + +TAILQ_HEAD(sfc_mae_encap_headers, sfc_mae_encap_header); + +/* Counter ID */ +struct sfc_mae_counter_id { + /* ID of a counter in MAE */ + efx_counter_t mae_id; + /* ID of a counter in RTE */ + uint32_t rte_id; +}; + /** Action set registry entry */ struct sfc_mae_action_set { TAILQ_ENTRY(sfc_mae_action_set) entries; unsigned int refcnt; + struct sfc_mae_counter_id *counters; + uint32_t n_counters; efx_mae_actions_t *spec; + struct sfc_mae_encap_header *encap_header; struct sfc_mae_fw_rsrc fw_rsrc; }; @@ -58,6 +84,88 @@ enum sfc_mae_status { SFC_MAE_STATUS_SUPPORTED }; +/* + * Encap. header bounce buffer. It is used to store header data + * when parsing the header definition in the action VXLAN_ENCAP. + */ +struct sfc_mae_bounce_eh { + uint8_t *buf; + size_t buf_size; + size_t size; + efx_tunnel_protocol_t type; +}; + +/** Counter collection entry */ +struct sfc_mae_counter { + bool inuse; + uint32_t generation_count; + union sfc_pkts_bytes value; + union sfc_pkts_bytes reset; +}; + +struct sfc_mae_counters_xstats { + uint64_t not_inuse_update; + uint64_t realloc_update; +}; + +struct sfc_mae_counters { + /** An array of all MAE counters */ + struct sfc_mae_counter *mae_counters; + /** Extra statistics for counters */ + struct sfc_mae_counters_xstats xstats; + /** Count of all MAE counters */ + unsigned int n_mae_counters; +}; + +struct sfc_mae_counter_registry { + /* Common counter information */ + /** Counters collection */ + struct sfc_mae_counters counters; + + /* Information used by counter update service */ + /** Callback to get packets from RxQ */ + eth_rx_burst_t rx_pkt_burst; + /** Data for the callback to get packets */ + struct sfc_dp_rxq *rx_dp; + /** Number of buffers pushed to the RxQ */ + unsigned int pushed_n_buffers; + /** Are credits used by counter stream */ + bool use_credits; + + /* Information used by configuration routines */ + /** Counter service core ID */ + uint32_t service_core_id; + /** Counter service ID */ + uint32_t service_id; +}; + +/** + * MAE rules used to capture traffic generated by VFs and direct it to + * representors (one for each VF). + */ +#define SFC_MAE_NB_REPR_RULES_MAX (64) + +/** Rules to forward traffic from PHY port to PF and from PF to PHY port */ +#define SFC_MAE_NB_SWITCHDEV_RULES (2) +/** Maximum required internal MAE rules */ +#define SFC_MAE_NB_RULES_MAX (SFC_MAE_NB_SWITCHDEV_RULES + \ + SFC_MAE_NB_REPR_RULES_MAX) + +struct sfc_mae_rule { + efx_mae_match_spec_t *spec; + efx_mae_actions_t *actions; + efx_mae_aset_id_t action_set; + efx_mae_rule_id_t rule_id; +}; + +struct sfc_mae_internal_rules { + /* + * Rules required to sustain switchdev mode or to provide + * port representor functionality. + */ + struct sfc_mae_rule rules[SFC_MAE_NB_RULES_MAX]; +}; + struct sfc_mae { /** Assigned switch domain identifier */ uint16_t switch_domain_id; @@ -73,8 +181,24 @@ struct sfc_mae { uint32_t encap_types_supported; /** Outer rule registry */ struct sfc_mae_outer_rules outer_rules; + /** Encap. header registry */ + struct sfc_mae_encap_headers encap_headers; /** Action set registry */ struct sfc_mae_action_sets action_sets; + /** Encap. header bounce buffer */ + struct sfc_mae_bounce_eh bounce_eh; + /** Flag indicating whether counter-only RxQ is running */ + bool counter_rxq_running; + /** Counter registry */ + struct sfc_mae_counter_registry counter_registry; + /** Driver-internal flow rules */ + struct sfc_mae_internal_rules internal_rules; + /** + * Switchdev default rules. They forward traffic from PHY port + * to PF and vice versa. + */ + struct sfc_mae_rule *switchdev_rule_pf_to_ext; + struct sfc_mae_rule *switchdev_rule_ext_to_pf; }; struct sfc_adapter; @@ -101,12 +225,14 @@ struct sfc_mae_pattern_data { * * - If an item ETH is followed by a single item VLAN, * the former must have "type" set to one of supported - * TPID values (0x8100, 0x88a8, 0x9100, 0x9200, 0x9300). + * TPID values (0x8100, 0x88a8, 0x9100, 0x9200, 0x9300), + * or 0x0000/0x0000. * * - If an item ETH is followed by two items VLAN, the * item ETH must have "type" set to one of supported TPID - * values (0x88a8, 0x9100, 0x9200, 0x9300), and the outermost - * VLAN item must have "inner_type" set to TPID value 0x8100. + * values (0x88a8, 0x9100, 0x9200, 0x9300), or 0x0000/0x0000, + * and the outermost VLAN item must have "inner_type" set + * to TPID value 0x8100, or 0x0000/0x0000 * * - If a L2 item is followed by a L3 one, the former must * indicate "type" ("inner_type") which corresponds to @@ -127,6 +253,9 @@ struct sfc_mae_pattern_data { * VLAN (L3 EtherType) --> ETHER_TYPE_BE */ struct sfc_mae_ethertype ethertypes[SFC_MAE_L2_MAX_NITEMS]; + + rte_be16_t tci_masks[SFC_MAE_MATCH_VLAN_MAX_NTAGS]; + unsigned int nb_vlan_tags; /** @@ -162,6 +291,14 @@ struct sfc_mae_pattern_data { */ uint8_t l3_next_proto_restriction_value; uint8_t l3_next_proto_restriction_mask; + + /* Projected state of EFX_MAE_FIELD_HAS_OVLAN match bit */ + bool has_ovlan_value; + bool has_ovlan_mask; + + /* Projected state of EFX_MAE_FIELD_HAS_IVLAN match bit */ + bool has_ivlan_value; + bool has_ivlan_mask; }; struct sfc_mae_parse_ctx { @@ -179,7 +316,8 @@ struct sfc_mae_parse_ctx { * which part of the pattern is being parsed. */ const efx_mae_field_id_t *field_ids_remap; - /* This points to a tunnel-specific default mask. */ + /* These two fields correspond to the tunnel-specific default mask. */ + size_t tunnel_def_mask_size; const void *tunnel_def_mask; bool match_mport_set; struct sfc_mae_pattern_data pattern_data; @@ -196,11 +334,33 @@ int sfc_mae_rule_parse_pattern(struct sfc_adapter *sa, struct rte_flow_error *error); int sfc_mae_rule_parse_actions(struct sfc_adapter *sa, const struct rte_flow_action actions[], - struct sfc_mae_action_set **action_setp, + struct sfc_flow_spec_mae *spec_mae, struct rte_flow_error *error); sfc_flow_verify_cb_t sfc_mae_flow_verify; sfc_flow_insert_cb_t sfc_mae_flow_insert; sfc_flow_remove_cb_t sfc_mae_flow_remove; +sfc_flow_query_cb_t sfc_mae_flow_query; + +/** + * The value used to represent the lowest priority. + * Used in MAE rule API. + */ +#define SFC_MAE_RULE_PRIO_LOWEST (-1) + +/** + * Insert a driver-internal flow rule that matches traffic originating from + * some m-port selector and redirects it to another one + * (eg. PF --> PHY, PHY --> PF). + * + * If requested priority is negative, use the lowest priority. + */ +int sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa, + const efx_mport_sel_t *mport_match, + const efx_mport_sel_t *mport_deliver, + int prio, struct sfc_mae_rule **rulep); +void sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule); +int sfc_mae_switchdev_init(struct sfc_adapter *sa); +void sfc_mae_switchdev_fini(struct sfc_adapter *sa); #ifdef __cplusplus }