X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.h;h=6ac1a5a48e97fd3a631a4a8c302e27cec373186c;hb=78a38edf66de67c8f52d0fcf17865c0dd9937013;hp=4a857d846eaca3d8c3945e567845b737c74f92bb;hpb=806af6938623757aeb49ae3aed9df784256950ed;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 4a857d846e..6ac1a5a48e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -107,12 +107,8 @@ struct rxq { struct rte_mempool *mp; /* Memory Pool for allocations. */ struct ibv_cq *cq; /* Completion Queue. */ struct ibv_exp_wq *wq; /* Work Queue. */ - struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */ -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS - struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */ -#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - struct ibv_exp_cq_family *if_cq; /* CQ interface. */ -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + int32_t (*poll)(); /* Verbs poll function. */ + int32_t (*recv)(); /* Verbs receive function. */ unsigned int port_id; /* Port ID for incoming packets. */ unsigned int elts_n; /* (*elts)[] length. */ unsigned int elts_head; /* Current index in (*elts)[]. */ @@ -130,6 +126,12 @@ struct rxq { struct ibv_exp_res_domain *rd; /* Resource Domain. */ struct fdir_queue fdir_queue; /* Flow director queue. */ struct ibv_mr *mr; /* Memory Region (for mp). */ + struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */ +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */ +#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + struct ibv_exp_cq_family *if_cq; /* CQ interface. */ +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ }; /* Hash RX queue types. */ @@ -193,6 +195,7 @@ struct special_flow_init { uint8_t dst_mac_val[6]; uint8_t dst_mac_mask[6]; unsigned int hash_types; + unsigned int per_vlan:1; }; enum hash_rxq_flow_type { @@ -229,7 +232,8 @@ struct hash_rxq { enum hash_rxq_type type; /* Hash RX queue type. */ /* MAC flow steering rules, one per VLAN ID. */ struct ibv_exp_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS]; - struct ibv_exp_flow *special_flow[MLX5_MAX_SPECIAL_FLOWS]; + struct ibv_exp_flow *special_flow + [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS]; }; /* TX element. */ @@ -248,6 +252,15 @@ typedef uint8_t linear_t[16384]; /* TX queue descriptor. */ struct txq { struct priv *priv; /* Back pointer to private data. */ + int32_t (*poll_cnt)(struct ibv_cq *cq, uint32_t max); + int (*send_pending)(); +#if MLX5_PMD_MAX_INLINE > 0 + int (*send_pending_inline)(); +#endif +#if MLX5_PMD_SGE_WR_N > 1 + int (*send_pending_sg_list)(); +#endif + int (*send_flush)(struct ibv_qp *qp); struct ibv_cq *cq; /* Completion Queue. */ struct ibv_qp *qp; /* Queue Pair. */ struct txq_elt (*elts)[]; /* TX elements. */ @@ -306,6 +319,7 @@ void mlx5_tx_queue_release(void *); /* mlx5_rxtx.c */ +struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *); void txq_mp2mr_iter(const struct rte_mempool *, void *); uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_rx_burst_sp(void *, struct rte_mbuf **, uint16_t);