+struct cnxk_timesync_info {
+ uint8_t rx_ready;
+ uint64_t rx_tstamp;
+ uint64_t rx_tstamp_dynflag;
+ int tstamp_dynfield_offset;
+ rte_iova_t tx_tstamp_iova;
+ uint64_t *tx_tstamp;
+} __plt_cache_aligned;
+
+struct cnxk_meter_node {
+#define MAX_PRV_MTR_NODES 10
+ TAILQ_ENTRY(cnxk_meter_node) next;
+ /**< Pointer to the next flow meter structure. */
+ uint32_t id; /**< Usr mtr id. */
+ struct cnxk_mtr_profile_node *profile;
+ struct cnxk_mtr_policy_node *policy;
+ uint32_t bpf_id; /**< Hw mtr id. */
+ uint32_t rq_num;
+ uint32_t *rq_id;
+ uint16_t level;
+ uint32_t prev_id[MAX_PRV_MTR_NODES]; /**< Prev mtr id for chaining */
+ uint32_t prev_cnt;
+ uint32_t next_id; /**< Next mtr id for chaining */
+ bool is_prev;
+ bool is_next;
+ struct rte_mtr_params params;
+ struct roc_nix_bpf_objs profs;
+ bool is_used;
+ uint32_t ref_cnt;
+};
+
+struct action_rss {
+ enum rte_eth_hash_function func;
+ uint32_t level;
+ uint64_t types;
+ uint32_t key_len;
+ uint32_t queue_num;
+ uint8_t *key;
+ uint16_t *queue;
+};
+
+struct policy_actions {
+ uint32_t action_fate;
+ union {
+ uint16_t queue;
+ uint32_t mtr_id;
+ struct action_rss *rss_desc;
+ };
+};
+
+struct cnxk_mtr_policy_node {
+ TAILQ_ENTRY(cnxk_mtr_policy_node) next;
+ /**< Pointer to the next flow meter structure. */
+ uint32_t id; /**< Policy id */
+ uint32_t mtr_id; /** Meter id */
+ struct rte_mtr_meter_policy_params policy;
+ struct policy_actions actions[RTE_COLORS];
+ uint32_t ref_cnt;
+};
+
+struct cnxk_mtr_profile_node {
+ TAILQ_ENTRY(cnxk_mtr_profile_node) next;
+ struct rte_mtr_meter_profile profile; /**< Profile detail. */
+ uint32_t ref_cnt; /**< Use count. */
+ uint32_t id; /**< Profile id. */
+};
+
+TAILQ_HEAD(cnxk_mtr_profiles, cnxk_mtr_profile_node);
+TAILQ_HEAD(cnxk_mtr_policy, cnxk_mtr_policy_node);
+TAILQ_HEAD(cnxk_mtr, cnxk_meter_node);
+
+/* Security session private data */
+struct cnxk_eth_sec_sess {
+ /* List entry */
+ TAILQ_ENTRY(cnxk_eth_sec_sess) entry;
+
+ /* Inbound SA is from NIX_RX_IPSEC_SA_BASE or
+ * Outbound SA from roc_nix_inl_outb_sa_base_get()
+ */
+ void *sa;
+
+ /* SA index */
+ uint32_t sa_idx;
+
+ /* SPI */
+ uint32_t spi;
+
+ /* Back pointer to session */
+ struct rte_security_session *sess;
+
+ /* Inbound */
+ bool inb;
+
+ /* Inbound session on inl dev */
+ bool inl_dev;
+};
+
+TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
+
+/* Inbound security data */
+struct cnxk_eth_dev_sec_inb {
+ /* IPSec inbound max SPI */
+ uint16_t max_spi;
+
+ /* Using inbound with inline device */
+ bool inl_dev;
+
+ /* Device argument to force inline device for inb */
+ bool force_inl_dev;
+
+ /* Active sessions */
+ uint16_t nb_sess;
+
+ /* List of sessions */
+ struct cnxk_eth_sec_sess_list list;
+};
+
+/* Outbound security data */
+struct cnxk_eth_dev_sec_outb {
+ /* IPSec outbound max SA */
+ uint16_t max_sa;
+
+ /* Per CPT LF descriptor count */
+ uint32_t nb_desc;
+
+ /* SA Bitmap */
+ struct plt_bitmap *sa_bmap;
+
+ /* SA bitmap memory */
+ void *sa_bmap_mem;
+
+ /* SA base */
+ uint64_t sa_base;
+
+ /* CPT LF base */
+ struct roc_cpt_lf *lf_base;
+
+ /* Crypto queues => CPT lf count */
+ uint16_t nb_crypto_qs;
+
+ /* Active sessions */
+ uint16_t nb_sess;
+
+ /* List of sessions */
+ struct cnxk_eth_sec_sess_list list;
+};
+