'roc_nix_queue.c',
'roc_nix_rss.c',
'roc_nix_stats.c',
+ 'roc_nix_tm.c',
+ 'roc_nix_tm_ops.c',
+ 'roc_nix_tm_utils.c',
'roc_nix_vlan.c',
'roc_npa.c',
'roc_npa_debug.c',
if (rc)
goto lf_detach;
+ rc = nix_tm_conf_init(roc_nix);
+ if (rc)
+ goto unregister_irqs;
+
/* Get NIX HW info */
roc_nix_get_hw_info(roc_nix);
nix->dev.drv_inited = true;
return 0;
+unregister_irqs:
+ nix_unregister_irqs(nix);
lf_detach:
nix_lf_detach(nix);
dev_fini:
if (!nix->dev.drv_inited)
goto fini;
+ nix_tm_conf_fini(roc_nix);
nix_unregister_irqs(nix);
rc = nix_lf_detach(nix);
int __roc_api roc_nix_register_cq_irqs(struct roc_nix *roc_nix);
void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
+/* Traffic Management */
+#define ROC_NIX_TM_MAX_SCHED_WT ((uint8_t)~0)
+
+enum roc_nix_tm_tree {
+ ROC_NIX_TM_DEFAULT = 0,
+ ROC_NIX_TM_RLIMIT,
+ ROC_NIX_TM_USER,
+ ROC_NIX_TM_TREE_MAX,
+};
+
+enum roc_tm_node_level {
+ ROC_TM_LVL_ROOT = 0,
+ ROC_TM_LVL_SCH1,
+ ROC_TM_LVL_SCH2,
+ ROC_TM_LVL_SCH3,
+ ROC_TM_LVL_SCH4,
+ ROC_TM_LVL_QUEUE,
+ ROC_TM_LVL_MAX,
+};
+
+/*
+ * TM runtime hierarchy init API.
+ */
+int __roc_api roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable);
+int __roc_api roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq);
+
/* MAC */
int __roc_api roc_nix_mac_rxtx_start_stop(struct roc_nix *roc_nix, bool start);
int __roc_api roc_nix_mac_link_event_start_stop(struct roc_nix *roc_nix,
uint8_t qintx;
};
+/* Traffic Manager */
+#define NIX_TM_MAX_HW_TXSCHQ 512
+#define NIX_TM_HW_ID_INVALID UINT32_MAX
+
+/* TM flags */
+#define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
+#define NIX_TM_TL1_NO_SP BIT_ULL(1)
+#define NIX_TM_TL1_ACCESS BIT_ULL(2)
+
+struct nix_tm_tb {
+ /** Token bucket rate (bytes per second) */
+ uint64_t rate;
+
+ /** Token bucket size (bytes), a.k.a. max burst size */
+ uint64_t size;
+};
+
+struct nix_tm_node {
+ TAILQ_ENTRY(nix_tm_node) node;
+
+ /* Input params */
+ enum roc_nix_tm_tree tree;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint16_t lvl;
+ uint32_t parent_id;
+ uint32_t shaper_profile_id;
+ void (*free_fn)(void *node);
+
+ /* Derived params */
+ uint32_t hw_id;
+ uint16_t hw_lvl;
+ uint32_t rr_prio;
+ uint32_t rr_num;
+ uint32_t max_prio;
+ uint32_t parent_hw_id;
+ uint32_t flags : 16;
+#define NIX_TM_NODE_HWRES BIT_ULL(0)
+#define NIX_TM_NODE_ENABLED BIT_ULL(1)
+ /* Shaper algorithm for RED state @NIX_REDALG_E */
+ uint32_t red_algo : 2;
+ uint32_t pkt_mode : 1;
+ uint32_t pkt_mode_set : 1;
+
+ bool child_realloc;
+ struct nix_tm_node *parent;
+
+ /* Non-leaf node sp count */
+ uint32_t n_sp_priorities;
+
+ /* Last stats */
+ uint64_t last_pkts;
+ uint64_t last_bytes;
+};
+
+struct nix_tm_shaper_profile {
+ TAILQ_ENTRY(nix_tm_shaper_profile) shaper;
+ struct nix_tm_tb commit;
+ struct nix_tm_tb peak;
+ int32_t pkt_len_adj;
+ bool pkt_mode;
+ uint32_t id;
+ void (*free_fn)(void *profile);
+
+ uint32_t ref_cnt;
+};
+
+TAILQ_HEAD(nix_tm_node_list, nix_tm_node);
+TAILQ_HEAD(nix_tm_shaper_profile_list, nix_tm_shaper_profile);
+
struct nix {
uint16_t reta[ROC_NIX_RSS_GRPS][ROC_NIX_RSS_RETA_MAX];
enum roc_nix_rss_reta_sz reta_sz;
bool ptp_en;
bool is_nix1;
+ /* Traffic manager info */
+
+ /* Contiguous resources per lvl */
+ struct plt_bitmap *schq_contig_bmp[NIX_TXSCH_LVL_CNT];
+ /* Dis-contiguous resources per lvl */
+ struct plt_bitmap *schq_bmp[NIX_TXSCH_LVL_CNT];
+ void *schq_bmp_mem;
+
+ struct nix_tm_shaper_profile_list shaper_profile_list;
+ struct nix_tm_node_list trees[ROC_NIX_TM_TREE_MAX];
+ enum roc_nix_tm_tree tm_tree;
+ uint64_t tm_rate_min;
+ uint16_t tm_root_lvl;
+ uint16_t tm_flags;
+ uint16_t tm_link_cfg_lvl;
+ uint16_t contig_rsvd[NIX_TXSCH_LVL_CNT];
+ uint16_t discontig_rsvd[NIX_TXSCH_LVL_CNT];
} __plt_cache_aligned;
enum nix_err_status {
NIX_ERR_QUEUE_INVALID_RANGE,
NIX_ERR_AQ_READ_FAILED,
NIX_ERR_AQ_WRITE_FAILED,
+ NIX_ERR_TM_LEAF_NODE_GET,
+ NIX_ERR_TM_INVALID_LVL,
+ NIX_ERR_TM_INVALID_PRIO,
+ NIX_ERR_TM_INVALID_PARENT,
+ NIX_ERR_TM_NODE_EXISTS,
+ NIX_ERR_TM_INVALID_NODE,
+ NIX_ERR_TM_INVALID_SHAPER_PROFILE,
+ NIX_ERR_TM_PKT_MODE_MISMATCH,
+ NIX_ERR_TM_WEIGHT_EXCEED,
+ NIX_ERR_TM_CHILD_EXISTS,
+ NIX_ERR_TM_INVALID_PEAK_SZ,
+ NIX_ERR_TM_INVALID_PEAK_RATE,
+ NIX_ERR_TM_INVALID_COMMIT_SZ,
+ NIX_ERR_TM_INVALID_COMMIT_RATE,
+ NIX_ERR_TM_SHAPER_PROFILE_IN_USE,
+ NIX_ERR_TM_SHAPER_PROFILE_EXISTS,
+ NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST,
+ NIX_ERR_TM_INVALID_TREE,
+ NIX_ERR_TM_PARENT_PRIO_UPDATE,
+ NIX_ERR_TM_PRIO_EXCEEDED,
+ NIX_ERR_TM_PRIO_ORDER,
+ NIX_ERR_TM_MULTIPLE_RR_GROUPS,
+ NIX_ERR_TM_SQ_UPDATE_FAIL,
NIX_ERR_NDC_SYNC,
};
int nix_register_irqs(struct nix *nix);
void nix_unregister_irqs(struct nix *nix);
+/* TM */
+#define NIX_TM_TREE_MASK_ALL \
+ (BIT(ROC_NIX_TM_DEFAULT) | BIT(ROC_NIX_TM_RLIMIT) | \
+ BIT(ROC_NIX_TM_USER))
+
+/* NIX_MAX_HW_FRS ==
+ * NIX_TM_DFLT_RR_WT * NIX_TM_RR_QUANTUM_MAX / ROC_NIX_TM_MAX_SCHED_WT
+ */
+#define NIX_TM_DFLT_RR_WT 71
+
+/* Default TL1 priority and Quantum from AF */
+#define NIX_TM_TL1_DFLT_RR_QTM ((1 << 24) - 1)
+#define NIX_TM_TL1_DFLT_RR_PRIO 1
+
+struct nix_tm_shaper_data {
+ uint64_t burst_exponent;
+ uint64_t burst_mantissa;
+ uint64_t div_exp;
+ uint64_t exponent;
+ uint64_t mantissa;
+ uint64_t burst;
+ uint64_t rate;
+};
+
+static inline uint64_t
+nix_tm_weight_to_rr_quantum(uint64_t weight)
+{
+ uint64_t max = (roc_model_is_cn9k() ? NIX_CN9K_TM_RR_QUANTUM_MAX :
+ NIX_TM_RR_QUANTUM_MAX);
+
+ weight &= (uint64_t)ROC_NIX_TM_MAX_SCHED_WT;
+ return (weight * max) / ROC_NIX_TM_MAX_SCHED_WT;
+}
+
+static inline bool
+nix_tm_have_tl1_access(struct nix *nix)
+{
+ return !!(nix->tm_flags & NIX_TM_TL1_ACCESS);
+}
+
+static inline bool
+nix_tm_is_leaf(struct nix *nix, int lvl)
+{
+ if (nix_tm_have_tl1_access(nix))
+ return (lvl == ROC_TM_LVL_QUEUE);
+ return (lvl == ROC_TM_LVL_SCH4);
+}
+
+static inline struct nix_tm_node_list *
+nix_tm_node_list(struct nix *nix, enum roc_nix_tm_tree tree)
+{
+ return &nix->trees[tree];
+}
+
+static inline const char *
+nix_tm_hwlvl2str(uint32_t hw_lvl)
+{
+ switch (hw_lvl) {
+ case NIX_TXSCH_LVL_MDQ:
+ return "SMQ/MDQ";
+ case NIX_TXSCH_LVL_TL4:
+ return "TL4";
+ case NIX_TXSCH_LVL_TL3:
+ return "TL3";
+ case NIX_TXSCH_LVL_TL2:
+ return "TL2";
+ case NIX_TXSCH_LVL_TL1:
+ return "TL1";
+ default:
+ break;
+ }
+
+ return "???";
+}
+
+static inline const char *
+nix_tm_tree2str(enum roc_nix_tm_tree tree)
+{
+ if (tree == ROC_NIX_TM_DEFAULT)
+ return "Default Tree";
+ else if (tree == ROC_NIX_TM_RLIMIT)
+ return "Rate Limit Tree";
+ else if (tree == ROC_NIX_TM_USER)
+ return "User Tree";
+ return "???";
+}
+
+/*
+ * TM priv ops.
+ */
+
+int nix_tm_conf_init(struct roc_nix *roc_nix);
+void nix_tm_conf_fini(struct roc_nix *roc_nix);
+int nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
+ uint16_t *smq);
+int nix_tm_sq_flush_pre(struct roc_nix_sq *sq);
+int nix_tm_sq_flush_post(struct roc_nix_sq *sq);
+int nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable);
+int nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node);
+
+/*
+ * TM priv utils.
+ */
+struct nix_tm_node *nix_tm_node_search(struct nix *nix, uint32_t node_id,
+ enum roc_nix_tm_tree tree);
+uint8_t nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
+ volatile uint64_t *reg, volatile uint64_t *regval);
+
#endif /* _ROC_NIX_PRIV_H_ */
if (rc)
goto nomem;
+ rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq);
+ if (rc) {
+ rc = NIX_ERR_TM_LEAF_NODE_GET;
+ goto nomem;
+ }
+
/* Init SQ context */
if (roc_model_is_cn9k())
sq_cn9k_init(nix, sq, rr_quantum, smq);
qid = sq->qid;
+ rc = nix_tm_sq_flush_pre(sq);
+
/* Release SQ context */
if (roc_model_is_cn9k())
rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
if (mbox_process(mbox))
rc |= NIX_ERR_NDC_SYNC;
+ rc |= nix_tm_sq_flush_post(sq);
rc |= roc_npa_pool_destroy(sq->aura_handle);
plt_free(sq->fc);
plt_free(sq->sqe_mem);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+
+int
+nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
+{
+ struct mbox *mbox = (&nix->dev)->mbox;
+ struct nix_txschq_config *req;
+ struct nix_tm_node *p;
+ int rc;
+
+ /* Enable nodes in path for flush to succeed */
+ if (!nix_tm_is_leaf(nix, node->lvl))
+ p = node;
+ else
+ p = node->parent;
+ while (p) {
+ if (!(p->flags & NIX_TM_NODE_ENABLED) &&
+ (p->flags & NIX_TM_NODE_HWRES)) {
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = p->hw_lvl;
+ req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
+ req->regval);
+ rc = mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ p->flags |= NIX_TM_NODE_ENABLED;
+ }
+ p = p->parent;
+ }
+
+ return 0;
+}
+
+int
+nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
+{
+ struct mbox *mbox = (&nix->dev)->mbox;
+ struct nix_txschq_config *req;
+ uint16_t smq;
+ int rc;
+
+ smq = node->hw_id;
+ plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
+ enable ? "enable" : "disable");
+
+ rc = nix_tm_clear_path_xoff(nix, node);
+ if (rc)
+ return rc;
+
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->num_regs = 1;
+
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
+ req->regval_mask[0] =
+ enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
+
+ return mbox_process(mbox);
+}
+
+int
+nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
+ uint16_t *smq)
+{
+ struct nix_tm_node *node;
+ int rc;
+
+ node = nix_tm_node_search(nix, sq, nix->tm_tree);
+
+ /* Check if we found a valid leaf node */
+ if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
+ node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
+ return -EIO;
+ }
+
+ /* Get SMQ Id of leaf node's parent */
+ *smq = node->parent->hw_id;
+ *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
+
+ rc = nix_tm_smq_xoff(nix, node->parent, false);
+ if (rc)
+ return rc;
+ node->flags |= NIX_TM_NODE_ENABLED;
+ return 0;
+}
+
+int
+roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
+{
+ struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
+ uint16_t sqb_cnt, head_off, tail_off;
+ uint64_t wdata, val, prev;
+ uint16_t qid = sq->qid;
+ int64_t *regaddr;
+ uint64_t timeout; /* 10's of usec */
+
+ /* Wait for enough time based on shaper min rate */
+ timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
+ /* Wait for worst case scenario of this SQ being last priority
+ * and so have to wait for all other SQ's drain out by their own.
+ */
+ timeout = timeout * nix->nb_tx_queues;
+ timeout = timeout / nix->tm_rate_min;
+ if (!timeout)
+ timeout = 10000;
+
+ wdata = ((uint64_t)qid << 32);
+ regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
+ val = roc_atomic64_add_nosync(wdata, regaddr);
+
+ /* Spin multiple iterations as "sq->fc_cache_pkts" can still
+ * have space to send pkts even though fc_mem is disabled
+ */
+
+ while (true) {
+ prev = val;
+ plt_delay_us(10);
+ val = roc_atomic64_add_nosync(wdata, regaddr);
+ /* Continue on error */
+ if (val & BIT_ULL(63))
+ continue;
+
+ if (prev != val)
+ continue;
+
+ sqb_cnt = val & 0xFFFF;
+ head_off = (val >> 20) & 0x3F;
+ tail_off = (val >> 28) & 0x3F;
+
+ /* SQ reached quiescent state */
+ if (sqb_cnt <= 1 && head_off == tail_off &&
+ (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
+ break;
+ }
+
+ /* Timeout */
+ if (!timeout)
+ goto exit;
+ timeout--;
+ }
+
+ return 0;
+exit:
+ roc_nix_queues_ctx_dump(sq->roc_nix);
+ return -EFAULT;
+}
+
+/* Flush and disable tx queue and its parent SMQ */
+int
+nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
+{
+ struct roc_nix *roc_nix = sq->roc_nix;
+ struct nix_tm_node *node, *sibling;
+ struct nix_tm_node_list *list;
+ enum roc_nix_tm_tree tree;
+ struct mbox *mbox;
+ struct nix *nix;
+ uint16_t qid;
+ int rc;
+
+ nix = roc_nix_to_nix_priv(roc_nix);
+
+ /* Need not do anything if tree is in disabled state */
+ if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
+ return 0;
+
+ mbox = (&nix->dev)->mbox;
+ qid = sq->qid;
+
+ tree = nix->tm_tree;
+ list = nix_tm_node_list(nix, tree);
+
+ /* Find the node for this SQ */
+ node = nix_tm_node_search(nix, qid, tree);
+ if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
+ plt_err("Invalid node/state for sq %u", qid);
+ return -EFAULT;
+ }
+
+ /* Enable CGX RXTX to drain pkts */
+ if (!roc_nix->io_enabled) {
+ /* Though it enables both RX MCAM Entries and CGX Link
+ * we assume all the rx queues are stopped way back.
+ */
+ mbox_alloc_msg_nix_lf_start_rx(mbox);
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("cgx start failed, rc=%d", rc);
+ return rc;
+ }
+ }
+
+ /* Disable smq xoff for case it was enabled earlier */
+ rc = nix_tm_smq_xoff(nix, node->parent, false);
+ if (rc) {
+ plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
+ rc);
+ return rc;
+ }
+
+ /* As per HRM, to disable an SQ, all other SQ's
+ * that feed to same SMQ must be paused before SMQ flush.
+ */
+ TAILQ_FOREACH(sibling, list, node) {
+ if (sibling->parent != node->parent)
+ continue;
+ if (!(sibling->flags & NIX_TM_NODE_ENABLED))
+ continue;
+
+ qid = sibling->id;
+ sq = nix->sqs[qid];
+ if (!sq)
+ continue;
+
+ rc = roc_nix_tm_sq_aura_fc(sq, false);
+ if (rc) {
+ plt_err("Failed to disable sqb aura fc, rc=%d", rc);
+ goto cleanup;
+ }
+
+ /* Wait for sq entries to be flushed */
+ rc = roc_nix_tm_sq_flush_spin(sq);
+ if (rc) {
+ plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
+ return rc;
+ }
+ }
+
+ node->flags &= ~NIX_TM_NODE_ENABLED;
+
+ /* Disable and flush */
+ rc = nix_tm_smq_xoff(nix, node->parent, true);
+ if (rc) {
+ plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
+ rc);
+ goto cleanup;
+ }
+cleanup:
+ /* Restore cgx state */
+ if (!roc_nix->io_enabled) {
+ mbox_alloc_msg_nix_lf_stop_rx(mbox);
+ rc |= mbox_process(mbox);
+ }
+
+ return rc;
+}
+
+int
+nix_tm_sq_flush_post(struct roc_nix_sq *sq)
+{
+ struct roc_nix *roc_nix = sq->roc_nix;
+ struct nix_tm_node *node, *sibling;
+ struct nix_tm_node_list *list;
+ enum roc_nix_tm_tree tree;
+ struct roc_nix_sq *s_sq;
+ bool once = false;
+ uint16_t qid, s_qid;
+ struct nix *nix;
+ int rc;
+
+ nix = roc_nix_to_nix_priv(roc_nix);
+
+ /* Need not do anything if tree is in disabled state */
+ if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
+ return 0;
+
+ qid = sq->qid;
+ tree = nix->tm_tree;
+ list = nix_tm_node_list(nix, tree);
+
+ /* Find the node for this SQ */
+ node = nix_tm_node_search(nix, qid, tree);
+ if (!node) {
+ plt_err("Invalid node for sq %u", qid);
+ return -EFAULT;
+ }
+
+ /* Enable all the siblings back */
+ TAILQ_FOREACH(sibling, list, node) {
+ if (sibling->parent != node->parent)
+ continue;
+
+ if (sibling->id == qid)
+ continue;
+
+ if (!(sibling->flags & NIX_TM_NODE_ENABLED))
+ continue;
+
+ s_qid = sibling->id;
+ s_sq = nix->sqs[s_qid];
+ if (!s_sq)
+ continue;
+
+ if (!once) {
+ /* Enable back if any SQ is still present */
+ rc = nix_tm_smq_xoff(nix, node->parent, false);
+ if (rc) {
+ plt_err("Failed to enable smq %u, rc=%d",
+ node->parent->hw_id, rc);
+ return rc;
+ }
+ once = true;
+ }
+
+ rc = roc_nix_tm_sq_aura_fc(s_sq, true);
+ if (rc) {
+ plt_err("Failed to enable sqb aura fc, rc=%d", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int
+nix_tm_conf_init(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint32_t bmp_sz, hw_lvl;
+ void *bmp_mem;
+ int rc, i;
+
+ nix->tm_flags = 0;
+ for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
+ TAILQ_INIT(&nix->trees[i]);
+
+ TAILQ_INIT(&nix->shaper_profile_list);
+ nix->tm_rate_min = 1E9; /* 1Gbps */
+
+ rc = -ENOMEM;
+ bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
+ bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
+ if (!bmp_mem)
+ return rc;
+ nix->schq_bmp_mem = bmp_mem;
+
+ /* Init contiguous and discontiguous bitmap per lvl */
+ rc = -EIO;
+ for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
+ /* Bitmap for discontiguous resource */
+ nix->schq_bmp[hw_lvl] =
+ plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
+ if (!nix->schq_bmp[hw_lvl])
+ goto exit;
+
+ bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
+
+ /* Bitmap for contiguous resource */
+ nix->schq_contig_bmp[hw_lvl] =
+ plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
+ if (!nix->schq_contig_bmp[hw_lvl])
+ goto exit;
+
+ bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
+ }
+
+ /* Disable TL1 Static Priority when VF's are enabled
+ * as otherwise VF's TL2 reallocation will be needed
+ * runtime to support a specific topology of PF.
+ */
+ if (nix->pci_dev->max_vfs)
+ nix->tm_flags |= NIX_TM_TL1_NO_SP;
+
+ /* TL1 access is only for PF's */
+ if (roc_nix_is_pf(roc_nix)) {
+ nix->tm_flags |= NIX_TM_TL1_ACCESS;
+ nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
+ } else {
+ nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
+ }
+
+ return 0;
+exit:
+ nix_tm_conf_fini(roc_nix);
+ return rc;
+}
+
+void
+nix_tm_conf_fini(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint16_t hw_lvl;
+
+ for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
+ plt_bitmap_free(nix->schq_bmp[hw_lvl]);
+ plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
+ }
+ plt_free(nix->schq_bmp_mem);
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+int
+roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
+{
+ struct npa_aq_enq_req *req;
+ struct npa_aq_enq_rsp *rsp;
+ uint64_t aura_handle;
+ struct npa_lf *lf;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
+ enable ? "enable" : "disable");
+
+ lf = idev_npa_obj_get();
+ if (!lf)
+ return NPA_ERR_DEVICE_NOT_BOUNDED;
+
+ mbox = lf->mbox;
+ /* Set/clear sqb aura fc_ena */
+ aura_handle = sq->aura_handle;
+ req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (req == NULL)
+ return rc;
+
+ req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_WRITE;
+ /* Below is not needed for aura writes but AF driver needs it */
+ /* AF will translate to associated poolctx */
+ req->aura.pool_addr = req->aura_id;
+
+ req->aura.fc_ena = enable;
+ req->aura_mask.fc_ena = 1;
+
+ rc = mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ /* Read back npa aura ctx */
+ req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (req == NULL)
+ return -ENOSPC;
+
+ req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_READ;
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ /* Init when enabled as there might be no triggers */
+ if (enable)
+ *(volatile uint64_t *)sq->fc = rsp->aura.count;
+ else
+ *(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
+ /* Sync write barrier */
+ plt_wmb();
+ return 0;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+struct nix_tm_node *
+nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree)
+{
+ struct nix_tm_node_list *list;
+ struct nix_tm_node *node;
+
+ list = nix_tm_node_list(nix, tree);
+ TAILQ_FOREACH(node, list, node) {
+ if (node->id == node_id)
+ return node;
+ }
+ return NULL;
+}
+
+uint8_t
+nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
+ volatile uint64_t *reg, volatile uint64_t *regval)
+{
+ uint32_t hw_lvl = node->hw_lvl;
+ uint32_t schq = node->hw_id;
+ uint8_t k = 0;
+
+ plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
+ nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable,
+ node);
+
+ regval[k] = enable;
+
+ switch (hw_lvl) {
+ case NIX_TXSCH_LVL_MDQ:
+ reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
+ k++;
+ break;
+ default:
+ break;
+ }
+
+ return k;
+}
RTE_LOG_REGISTER(cnxk_logtype_mbox, pmd.cnxk.mbox, NOTICE);
RTE_LOG_REGISTER(cnxk_logtype_npa, pmd.mempool.cnxk, NOTICE);
RTE_LOG_REGISTER(cnxk_logtype_nix, pmd.net.cnxk, NOTICE);
+RTE_LOG_REGISTER(cnxk_logtype_tm, pmd.net.cnxk.tm, NOTICE);
extern int cnxk_logtype_mbox;
extern int cnxk_logtype_npa;
extern int cnxk_logtype_nix;
+extern int cnxk_logtype_tm;
#define plt_err(fmt, args...) \
RTE_LOG(ERR, PMD, "%s():%u " fmt "\n", __func__, __LINE__, ##args)
#define plt_mbox_dbg(fmt, ...) plt_dbg(mbox, fmt, ##__VA_ARGS__)
#define plt_npa_dbg(fmt, ...) plt_dbg(npa, fmt, ##__VA_ARGS__)
#define plt_nix_dbg(fmt, ...) plt_dbg(nix, fmt, ##__VA_ARGS__)
+#define plt_tm_dbg(fmt, ...) plt_dbg(tm, fmt, ##__VA_ARGS__)
#ifdef __cplusplus
#define CNXK_PCI_ID(subsystem_dev, dev) \
cnxk_logtype_mbox;
cnxk_logtype_nix;
cnxk_logtype_npa;
+ cnxk_logtype_tm;
roc_clk_freq_get;
roc_error_msg_get;
roc_idev_lmt_base_addr_get;
roc_nix_xstats_names_get;
roc_nix_switch_hdr_set;
roc_nix_eeprom_info_get;
+ roc_nix_tm_sq_aura_fc;
+ roc_nix_tm_sq_flush_spin;
roc_nix_unregister_cq_irqs;
roc_nix_unregister_queue_irqs;
roc_nix_vlan_insert_ena_dis;