msg_rsp) \
M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+ M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req, \
npa_lf_alloc_rsp) \
uint8_t __io tx_pause;
};
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ uint8_t __io rx_pause;
+ uint8_t __io tx_pause;
+ uint16_t __io pfc_en; /* bitmap indicating enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __io rx_pause;
+ uint8_t __io tx_pause;
+};
+
struct sfp_eeprom_s {
#define SFP_EEPROM_SIZE 256
uint16_t __io sff_id;
/* PF can be mapped to either CGX or LBK interface,
* so maximum 64 channels are possible.
*/
-#define NIX_MAX_CHAN 64
+#define NIX_MAX_CHAN 64
+#define NIX_CGX_MAX_CHAN 16
+#define NIX_LBK_MAX_CHAN 1
struct nix_bp_cfg_rsp {
struct mbox_msghdr hdr;
/* Channel and bpid mapping */
struct {
uint32_t rq;
+ uint16_t tc;
uint16_t cq_drop;
bool enable;
} cq_cfg;
struct {
+ uint32_t sq;
+ uint16_t tc;
bool enable;
} tm_cfg;
};
};
+struct roc_nix_pfc_cfg {
+ enum roc_nix_fc_mode mode;
+ /* For SET, tc must be [0, 15].
+ * For GET, TC will represent bitmap
+ */
+ uint16_t tc;
+};
+
struct roc_nix_eeprom_info {
#define ROC_NIX_EEPROM_SIZE 256
uint16_t sff_id;
enum roc_nix_tm_tree {
ROC_NIX_TM_DEFAULT = 0,
ROC_NIX_TM_RLIMIT,
+ ROC_NIX_TM_PFC,
ROC_NIX_TM_USER,
ROC_NIX_TM_TREE_MAX,
};
int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix,
enum roc_nix_fc_mode mode);
+int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix,
+ struct roc_nix_pfc_cfg *pfc_cfg);
+
+int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix,
+ struct roc_nix_pfc_cfg *pfc_cfg);
+
+uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix);
+
enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix);
void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id,
struct mbox *mbox = get_mbox(roc_nix);
struct nix_bp_cfg_req *req;
struct nix_bp_cfg_rsp *rsp;
- int rc = -ENOSPC;
+ int rc = -ENOSPC, i;
if (roc_nix_is_sdp(roc_nix))
return 0;
req = mbox_alloc_msg_nix_bp_enable(mbox);
if (req == NULL)
return rc;
+
req->chan_base = 0;
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
+ if (roc_nix_is_lbk(roc_nix))
+ req->chan_cnt = NIX_LBK_MAX_CHAN;
+ else
+ req->chan_cnt = NIX_CGX_MAX_CHAN;
+
+ req->bpid_per_chan = true;
rc = mbox_process_msg(mbox, (void *)&rsp);
if (rc || (req->chan_cnt != rsp->chan_cnt))
goto exit;
- nix->bpid[0] = rsp->chan_bpid[0];
nix->chan_cnt = rsp->chan_cnt;
+ for (i = 0; i < rsp->chan_cnt; i++)
+ nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF;
} else {
req = mbox_alloc_msg_nix_bp_disable(mbox);
if (req == NULL)
return rc;
req->chan_base = 0;
- req->chan_cnt = 1;
+ req->chan_cnt = nix->chan_cnt;
rc = mbox_process(mbox);
if (rc)
aq->op = NIX_AQ_INSTOP_WRITE;
if (fc_cfg->cq_cfg.enable) {
- aq->cq.bpid = nix->bpid[0];
+ aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
aq->op = NIX_AQ_INSTOP_WRITE;
if (fc_cfg->cq_cfg.enable) {
- aq->cq.bpid = nix->bpid[0];
+ aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
return nix_fc_rxchan_bpid_set(roc_nix,
fc_cfg->rxchan_cfg.enable);
else if (fc_cfg->type == ROC_NIX_FC_TM_CFG)
- return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable);
+ return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq,
+ fc_cfg->tm_cfg.tc,
+ fc_cfg->tm_cfg.enable);
return -EINVAL;
}
mbox_process(mbox);
}
+
+int
+roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct mbox *mbox = get_mbox(roc_nix);
+ uint8_t tx_pause, rx_pause;
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int rc = -ENOSPC;
+
+ if (roc_nix_is_lbk(roc_nix))
+ return NIX_ERR_OP_NOTSUP;
+
+ rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+ (pfc_cfg->mode == ROC_NIX_FC_RX);
+ tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) ||
+ (pfc_cfg->mode == ROC_NIX_FC_TX);
+
+ req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox);
+ if (req == NULL)
+ goto exit;
+
+ req->pfc_en = pfc_cfg->tc;
+ req->rx_pause = rx_pause;
+ req->tx_pause = tx_pause;
+
+ rc = mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto exit;
+
+ nix->rx_pause = rsp->rx_pause;
+ nix->tx_pause = rsp->tx_pause;
+ if (rsp->tx_pause)
+ nix->cev |= BIT(pfc_cfg->tc);
+ else
+ nix->cev &= ~BIT(pfc_cfg->tc);
+
+exit:
+ return rc;
+}
+
+int
+roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if (roc_nix_is_lbk(roc_nix))
+ return NIX_ERR_OP_NOTSUP;
+
+ pfc_cfg->tc = nix->cev;
+
+ if (nix->rx_pause && nix->tx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_FULL;
+ else if (nix->rx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_RX;
+ else if (nix->tx_pause)
+ pfc_cfg->mode = ROC_NIX_FC_TX;
+ else
+ pfc_cfg->mode = ROC_NIX_FC_NONE;
+
+ return 0;
+}
+
+uint16_t
+roc_nix_chan_count_get(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix->chan_cnt;
+}
/* Traffic Manager */
#define NIX_TM_MAX_HW_TXSCHQ 512
#define NIX_TM_HW_ID_INVALID UINT32_MAX
+#define NIX_TM_CHAN_INVALID UINT16_MAX
/* TM flags */
#define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
uint32_t priority;
uint32_t weight;
uint16_t lvl;
+ uint16_t rel_chan;
uint32_t parent_id;
uint32_t shaper_profile_id;
void (*free_fn)(void *node);
uint16_t msixoff;
uint8_t rx_pause;
uint8_t tx_pause;
+ uint16_t cev;
uint64_t rx_cfg;
struct dev dev;
uint16_t cints;
bool ena);
int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled);
-int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable);
+int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ bool enable);
void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
/*
if (is_pf_or_lbk && !skip_bp &&
node->hw_lvl == nix->tm_link_cfg_lvl) {
node->bp_capa = 1;
- skip_bp = true;
+ skip_bp = false;
}
rc = nix_tm_node_reg_conf(nix, node);
}
int
-nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable)
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ bool enable)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
enum roc_nix_tm_tree tree = nix->tm_tree;
struct mbox *mbox = (&nix->dev)->mbox;
struct nix_txschq_config *req = NULL;
struct nix_tm_node_list *list;
+ struct nix_tm_node *sq_node;
+ struct nix_tm_node *parent;
struct nix_tm_node *node;
uint8_t k = 0;
uint16_t link;
int rc = 0;
+ sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+ parent = sq_node->parent;
+ while (parent) {
+ if (parent->lvl == ROC_TM_LVL_SCH2)
+ break;
+
+ parent = parent->parent;
+ }
+
list = nix_tm_node_list(nix, tree);
link = nix->tx_link;
+ if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+ rc = -EINVAL;
+ goto err;
+ }
+
TAILQ_FOREACH(node, list, node) {
if (node->hw_lvl != nix->tm_link_cfg_lvl)
continue;
if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
continue;
+ if (node->hw_id != parent->hw_id)
+ continue;
+
if (!req) {
req = mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = nix->tm_link_cfg_lvl;
}
req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
- req->regval[k] = enable ? BIT_ULL(13) : 0;
- req->regval_mask[k] = ~BIT_ULL(13);
+ req->regval[k] = enable ? tc : 0;
+ req->regval[k] |= enable ? BIT_ULL(13) : 0;
+ req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
k++;
if (k >= MAX_REGS_PER_MBOX_MSG) {
goto err;
}
+ parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
return 0;
err:
plt_err("Failed to %s bp on link %u, rc=%d(%s)",
}
/* Disable backpressure */
- rc = nix_tm_bp_config_set(roc_nix, false);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
if (rc) {
plt_err("Failed to disable backpressure for flush, rc=%d", rc);
return rc;
return 0;
/* Restore backpressure */
- rc = nix_tm_bp_config_set(roc_nix, true);
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
if (rc) {
plt_err("Failed to restore backpressure, rc=%d", rc);
return rc;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_DEFAULT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = leaf_lvl;
node->tree = ROC_NIX_TM_DEFAULT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = leaf_lvl;
node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+error:
+ nix_tm_node_free(node);
+ return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint32_t nonleaf_id = nix->nb_tx_queues;
+ struct nix_tm_node *node = NULL;
+ uint8_t leaf_lvl, lvl, lvl_end;
+ uint32_t tl2_node_id;
+ uint32_t parent, i;
+ int rc = -ENOMEM;
+
+ parent = ROC_NIX_TM_NODE_ID_INVALID;
+ lvl_end = ROC_TM_LVL_SCH3;
+ leaf_lvl = ROC_TM_LVL_QUEUE;
+
+ /* TL1 node */
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_ROOT;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ /* TL2 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_SCH1;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ tl2_node_id = nonleaf_id;
+ nonleaf_id++;
+
+ for (i = 0; i < nix->nb_tx_queues; i++) {
+ parent = tl2_node_id;
+ for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id =
+ ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+ }
+
+ lvl = ROC_TM_LVL_SCH4;
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = i;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = leaf_lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
/* Disable backpressure, it will be enabled back if needed on
* hierarchy enable
*/
- rc = nix_tm_bp_config_set(roc_nix, false);
- if (rc) {
- plt_err("Failed to disable backpressure for flush, rc=%d", rc);
- goto cleanup;
+ for (i = 0; i < sq_cnt; i++) {
+ sq = nix->sqs[i];
+ if (!sq)
+ continue;
+
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ if (rc) {
+ plt_err("Failed to disable backpressure, rc=%d", rc);
+ goto cleanup;
+ }
}
/* Flush all tx queues */
roc_nix_bpf_stats_reset;
roc_nix_bpf_stats_to_idx;
roc_nix_bpf_timeunit_get;
+ roc_nix_chan_count_get;
roc_nix_cq_dump;
roc_nix_cq_fini;
roc_nix_cq_head_tail_get;
roc_nix_npc_promisc_ena_dis;
roc_nix_npc_rx_ena_dis;
roc_nix_npc_mcast_config;
+ roc_nix_pfc_mode_get;
+ roc_nix_pfc_mode_set;
roc_nix_ptp_clock_read;
roc_nix_ptp_info_cb_register;
roc_nix_ptp_info_cb_unregister;
roc_nix_tm_node_stats_get;
roc_nix_tm_node_suspend_resume;
roc_nix_tm_prealloc_res;
+ roc_nix_tm_pfc_prepare_tree;
roc_nix_tm_prepare_rate_limited_tree;
roc_nix_tm_rlimit_sq;
roc_nix_tm_root_has_sp;