/* Apply BP/DROP when CQ is 95% full */
#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
-#define NIX_RQ_AURA_THRESH(x) (((x) * 95) / 100)
+#define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
+#define NIX_RQ_AURA_THRESH(x) (((x)*95) / 100)
/* IRQ triggered when NIX_LF_CINTX_CNT[QCOUNT] crosses this value */
#define CQ_CQE_THRESH_DEFAULT 0x1ULL
/* Traffic Manager */
#define NIX_TM_MAX_HW_TXSCHQ 512
#define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
/* TM flags */
#define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
uint32_t priority;
uint32_t weight;
uint16_t lvl;
+ uint16_t rel_chan;
uint32_t parent_id;
uint32_t shaper_profile_id;
void (*free_fn)(void *node);
uint32_t red_algo : 2;
uint32_t pkt_mode : 1;
uint32_t pkt_mode_set : 1;
+ uint32_t bp_capa : 1;
bool child_realloc;
struct nix_tm_node *parent;
struct nix_tm_tb commit;
struct nix_tm_tb peak;
int32_t pkt_len_adj;
+ int32_t pkt_mode_adj;
bool pkt_mode;
uint32_t id;
void (*free_fn)(void *profile);
uint16_t msixoff;
uint8_t rx_pause;
uint8_t tx_pause;
+ uint16_t cev;
+ uint64_t rx_cfg;
struct dev dev;
uint16_t cints;
uint16_t qints;
uint16_t tm_link_cfg_lvl;
uint16_t contig_rsvd[NIX_TXSCH_LVL_CNT];
uint16_t discontig_rsvd[NIX_TXSCH_LVL_CNT];
+
+ /* Ipsec info */
+ uint16_t cpt_msixoff[MAX_RVU_BLKLF_CNT];
+ bool inl_inb_ena;
+ bool inl_outb_ena;
+ void *inb_sa_base;
+ size_t inb_sa_sz;
+ void *outb_sa_base;
+ size_t outb_sa_sz;
+ uint16_t outb_err_sso_pffunc;
+ struct roc_cpt_lf *cpt_lf_base;
+ uint16_t nb_cpt_lf;
+ /* Mode provided by driver */
+ bool inb_inl_dev;
+
} __plt_cache_aligned;
enum nix_err_status {
NIX_ERR_INVALID_RANGE,
NIX_ERR_INTERNAL,
NIX_ERR_OP_NOTSUP,
+ NIX_ERR_HW_NOTSUP,
NIX_ERR_QUEUE_INVALID_RANGE,
NIX_ERR_AQ_READ_FAILED,
NIX_ERR_AQ_WRITE_FAILED,
static inline uint64_t
nix_tm_weight_to_rr_quantum(uint64_t weight)
{
- uint64_t max = (roc_model_is_cn9k() ? NIX_CN9K_TM_RR_QUANTUM_MAX :
- NIX_TM_RR_QUANTUM_MAX);
+ uint64_t max = NIX_CN9K_TM_RR_QUANTUM_MAX;
- weight &= (uint64_t)ROC_NIX_TM_MAX_SCHED_WT;
- return (weight * max) / ROC_NIX_TM_MAX_SCHED_WT;
+ /* From CN10K onwards, we only configure RR weight */
+ if (!roc_model_is_cn9k())
+ return weight;
+
+ weight &= (uint64_t)max;
+ return (weight * max) / ROC_NIX_CN9K_TM_RR_WEIGHT_MAX;
}
static inline bool
int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);
int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
bool rr_quantum_only);
-int nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+
+int nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
+ bool cfg, bool ena);
+int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
+ bool ena);
+int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
+int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ bool enable);
+void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
/*
* TM priv utils.
struct nix_tm_shaper_profile *nix_tm_shaper_profile_alloc(void);
void nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile);
+uint64_t nix_get_blkaddr(struct dev *dev);
+void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx);
+int nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data);
+int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data,
+ uint8_t lf_tx_stats, uint8_t lf_rx_stats);
+int nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
+ uint16_t cints);
+int nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid,
+ __io void **ctx_p);
+
+/*
+ * Telemetry
+ */
+int nix_tel_node_add(struct roc_nix *roc_nix);
+void nix_tel_node_del(struct roc_nix *roc_nix);
+int nix_tel_node_add_rq(struct roc_nix_rq *rq);
+int nix_tel_node_add_cq(struct roc_nix_cq *cq);
+int nix_tel_node_add_sq(struct roc_nix_sq *sq);
+
#endif /* _ROC_NIX_PRIV_H_ */