X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5.h;h=fab58c973f4f80658b0c88fd34d22a331498ae10;hb=dd3c774f6ffb;hp=a36ba2d57056f3bbbbf7c8d6a9b99edb5ca0d10a;hpb=63bd16292c3a519029a1b451e734b8f4489c2fd3;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index a36ba2d570..fab58c973f 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -37,6 +37,7 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5_glue.h" +#include "mlx5_prm.h" enum { PCI_VENDOR_ID_MELLANOX = 0x15b3, @@ -55,6 +56,8 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF = 0xa2d3, PCI_DEVICE_ID_MELLANOX_CONNECTX6 = 0x101b, PCI_DEVICE_ID_MELLANOX_CONNECTX6VF = 0x101c, + PCI_DEVICE_ID_MELLANOX_CONNECTX6DX = 0x101d, + PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF = 0x101e, }; /* Request types for IPC. */ @@ -237,6 +240,7 @@ struct mlx5_dev_config { unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */ unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */ unsigned int dv_flow_en:1; /* Enable DV flow. */ + unsigned int dv_xmeta_en:2; /* Enable extensive flow metadata. */ unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */ unsigned int devx:1; /* Whether devx interface is available or not. */ unsigned int dest_tir:1; /* Whether advanced DR API is available. */ @@ -252,6 +256,8 @@ struct mlx5_dev_config { } mprq; /* Configurations for Multi-Packet RQ. */ int mps; /* Multi-packet send supported mode. */ unsigned int flow_prio; /* Number of flow priorities. */ + enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM]; + /* Availibility of mreg_c's. */ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int ind_table_max_size; /* Maximum indirection table size. */ unsigned int max_dump_files_num; /* Maximum dump files per queue. */ @@ -560,6 +566,12 @@ struct mlx5_flow_tbl_resource { }; #define MLX5_MAX_TABLES UINT16_MAX +#define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1) +/* Reserve the last two tables for metadata register copy. */ +#define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1) +#define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2) +/* Tables for metering splits should be added here. */ +#define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3) #define MLX5_MAX_TABLES_FDB UINT16_MAX #define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */ @@ -577,6 +589,15 @@ struct mlx5_devx_dbr_page { uint64_t dbr_bitmap[MLX5_DBR_BITMAP_SIZE]; }; +/* ID generation structure. */ +struct mlx5_flow_id_pool { + uint32_t *free_arr; /**< Pointer to the a array of free values. */ + uint32_t base_index; + /**< The next index that can be used without any free elements. */ + uint32_t *curr; /**< Pointer to the index to pop. */ + uint32_t *last; /**< Pointer to the last element in the empty arrray. */ +}; + /* * Shared Infiniband device context for Master/Representors * which belong to same IB device with multiple IB ports. @@ -604,6 +625,9 @@ struct mlx5_ibv_shared { } mr; /* Shared DV/DR flow data section. */ pthread_mutex_t dv_mutex; /* DV context mutex. */ + uint32_t dv_meta_mask; /* flow META metadata supported mask. */ + uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */ + uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */ uint32_t dv_refcnt; /* DV/DR data reference counter. */ void *fdb_domain; /* FDB Direct Rules name space handle. */ struct mlx5_flow_tbl_resource fdb_tbl[MLX5_MAX_TABLES_FDB]; @@ -636,6 +660,7 @@ struct mlx5_ibv_shared { struct mlx5dv_devx_cmd_comp *devx_comp; /* DEVX async comp obj. */ struct mlx5_devx_obj *tis; /* TIS object. */ struct mlx5_devx_obj *td; /* Transport domain. */ + struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */ struct mlx5_ibv_shared_port port[]; /* per device port data array. */ }; @@ -711,6 +736,9 @@ struct mlx5_priv { uint32_t nl_sn; /* Netlink message sequence number. */ LIST_HEAD(dbrpage, mlx5_devx_dbr_page) dbrpgs; /* Door-bell pages. */ struct mlx5_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ + struct mlx5_flow_id_pool *qrss_id_pool; + struct mlx5_hlist *mreg_cp_tbl; + /* Hash table of Rx metadata register copy table. */ #ifndef RTE_ARCH_64 rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; @@ -775,7 +803,7 @@ int mlx5_set_link_up(struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); -struct mlx5_priv *mlx5_port_to_eswitch_info(uint16_t port); +struct mlx5_priv *mlx5_port_to_eswitch_info(uint16_t port, bool valid); struct mlx5_priv *mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev); int mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info); @@ -855,6 +883,8 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ +int mlx5_flow_discover_mreg_c(struct rte_eth_dev *eth_dev); +bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev); int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); void mlx5_flow_print(struct rte_flow *flow); int mlx5_flow_validate(struct rte_eth_dev *dev, @@ -883,6 +913,7 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list); void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list); int mlx5_flow_verify(struct rte_eth_dev *dev); +int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue); int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow_item_eth *eth_spec, struct rte_flow_item_eth *eth_mask,