X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5.h;h=4d3e9f38f7f7c6311365631548d9df5caac838c0;hb=15dfc1ecb39f921cac3c5d61e77cd51fa3c1eb23;hp=227429848e02a9ba29b50ce0f96d41a01d80184b;hpb=78be885295b86da2c208324f1e276e802812b60d;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 227429848e..4d3e9f38f7 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -51,6 +51,7 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019, PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a, PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2, + PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF = 0xa2d3, }; /** Switch information returned by mlx5_nl_switch_info(). */ @@ -99,7 +100,6 @@ struct mlx5_dev_config { unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ unsigned int hw_padding:1; /* End alignment padding is supported. */ unsigned int vf:1; /* This is a VF. */ - unsigned int mps:2; /* Multi-packet send supported mode. */ unsigned int tunnel_en:1; /* Whether tunnel stateless offloads are supported. */ unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */ @@ -122,7 +122,8 @@ struct mlx5_dev_config { unsigned int min_rxqs_num; /* Rx queue count threshold to enable MPRQ. */ } mprq; /* Configurations for Multi-Packet RQ. */ - unsigned int max_verbs_prio; /* Number of Verb flow priorities. */ + int mps; /* Multi-packet send supported mode. */ + unsigned int flow_prio; /* Number of flow priorities. */ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int ind_table_max_size; /* Maximum indirection table size. */ int txq_inline; /* Maximum packet size for inlining. */ @@ -156,6 +157,14 @@ struct mlx5_drop { struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */ }; +/** DPDK port to network interface index (ifindex) conversion. */ +struct mlx5_nl_flow_ptoi { + uint16_t port_id; /**< DPDK port ID. */ + unsigned int ifindex; /**< Network interface index. */ +}; + +struct mnl_socket; + struct priv { LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */ struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ @@ -188,6 +197,8 @@ struct priv { struct mlx5_drop drop_queue; /* Flow drop queues. */ struct mlx5_flows flows; /* RTE Flow rules. */ struct mlx5_flows ctrl_flows; /* Control flow rules. */ + LIST_HEAD(counters, mlx5_flow_counter) flow_counters; + /* Flow counters. */ struct { uint32_t dev_gen; /* Generation number to flush local caches. */ rte_rwlock_t rwlock; /* MR Lock. */ @@ -213,6 +224,12 @@ struct priv { int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ uint32_t nl_sn; /* Netlink message sequence number. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ + rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; + /* UAR same-page access control required in 32bit implementations. */ +#endif + struct mnl_socket *mnl_socket; /* Libmnl socket. */ }; #define PORT_ID(priv) ((priv)->dev_data->port_id) @@ -227,7 +244,7 @@ int mlx5_getenv_int(const char *); int mlx5_get_master_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); -int mlx5_ifindex(const struct rte_eth_dev *dev); +unsigned int mlx5_ifindex(const struct rte_eth_dev *dev); int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr, int master); int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); @@ -257,6 +274,8 @@ eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); unsigned int mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, unsigned int port_list_n); +int mlx5_sysfs_switch_info(unsigned int ifindex, + struct mlx5_switch_info *info); /* mlx5_mac.c */ @@ -317,7 +336,7 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ -int mlx5_verbs_max_prio(struct rte_eth_dev *dev); +int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); void mlx5_flow_print(struct rte_flow *flow); int mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -365,7 +384,7 @@ int mlx5_socket_connect(struct rte_eth_dev *priv); /* mlx5_nl.c */ -int mlx5_nl_init(uint32_t nlgroups, int protocol); +int mlx5_nl_init(int protocol); int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, uint32_t index); int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac, @@ -378,4 +397,23 @@ unsigned int mlx5_nl_ifindex(int nl, const char *name); int mlx5_nl_switch_info(int nl, unsigned int ifindex, struct mlx5_switch_info *info); +/* mlx5_nl_flow.c */ + +int mlx5_nl_flow_transpose(void *buf, + size_t size, + const struct mlx5_nl_flow_ptoi *ptoi, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error); +void mlx5_nl_flow_brand(void *buf, uint32_t handle); +int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error); +int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf, + struct rte_flow_error *error); +int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex, + struct rte_flow_error *error); +struct mnl_socket *mlx5_nl_flow_socket_create(void); +void mlx5_nl_flow_socket_destroy(struct mnl_socket *nl); + #endif /* RTE_PMD_MLX5_H_ */