BCMFS_RM_CPLQ
};
+#define BCMFS_QP_IOBASE_XLATE(base, idx) \
+ ((base) + ((idx) * BCMFS_HW_QUEUE_IO_ADDR_LEN))
+
+/* Max pkts for preprocessing before submitting to h/w qp */
+#define BCMFS_MAX_REQS_BUFF 64
+
+/* qp stats */
struct bcmfs_qp_stats {
/* Count of all operations enqueued */
uint64_t enqueued_count;
uint16_t nb_descriptors;
/* Maximum number of h/w descriptors needed by a request */
uint16_t max_descs_req;
+ /* h/w ops associated with qp */
+ struct bcmfs_hw_queue_pair_ops *ops;
};
struct bcmfs_queue {
/* s/w pointer for completion h/w queue*/
uint32_t cmpl_read_ptr;
};
+ /* number of inflight descriptor accumulated before next db ring */
+ uint16_t descs_inflight;
/* Memzone name */
char memz_name[RTE_MEMZONE_NAMESIZE];
};
struct bcmfs_qp_stats stats;
/* h/w ops associated with qp */
struct bcmfs_hw_queue_pair_ops *ops;
+ /* bcmfs requests pool*/
+ struct rte_mempool *sr_mp;
+ /* a temporary buffer to keep message pointers */
+ struct bcmfs_qp_message *infl_msgs[BCMFS_MAX_REQS_BUFF];
} __rte_cache_aligned;
uint16_t queue_pair_id,
struct bcmfs_qp_config *bcmfs_conf);
+/* stats functions*/
+void bcmfs_qp_stats_get(struct bcmfs_qp **qp, int num_qp,
+ struct bcmfs_qp_stats *stats);
+void bcmfs_qp_stats_reset(struct bcmfs_qp **qp, int num_qp);
+
#endif /* _BCMFS_QP_H_ */