#define ROC_NIX_LF_RX_CFG_LEN_OL3 BIT_ULL(41)
/* Group 0 will be used for RSS, 1 -7 will be used for npc_flow RSS action*/
-#define ROC_NIX_RSS_GROUP_DEFAULT 0
-#define ROC_NIX_RSS_GRPS 8
-#define ROC_NIX_RSS_RETA_MAX ROC_NIX_RSS_RETA_SZ_256
-#define ROC_NIX_RSS_KEY_LEN 48 /* 352 Bits */
+#define ROC_NIX_RSS_GROUP_DEFAULT 0
+#define ROC_NIX_RSS_GRPS 8
+#define ROC_NIX_RSS_RETA_MAX ROC_NIX_RSS_RETA_SZ_256
+#define ROC_NIX_RSS_KEY_LEN 48 /* 352 Bits */
+#define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1)
#define ROC_NIX_DEFAULT_HW_FRS 1514
enum roc_nix_sq_max_sqe_sz max_sqe_sz;
uint32_t nb_desc;
uint16_t qid;
+ bool sso_ena;
/* End of Input parameters */
uint16_t sqes_per_sqb_log2;
struct roc_nix *roc_nix;
uint16_t max_sqb_count;
enum roc_nix_rss_reta_sz reta_sz;
bool enable_loop;
+ bool hw_vlan_ins;
+ uint8_t lock_rx_ctx;
/* End of input parameters */
/* LMT line base for "Per Core Tx LMT line" mode*/
uintptr_t lmt_base;
void (*free_fn)(void *profile);
};
+enum roc_nix_tm_node_stats_type {
+ ROC_NIX_TM_NODE_PKTS_DROPPED,
+ ROC_NIX_TM_NODE_BYTES_DROPPED,
+ ROC_NIX_TM_NODE_GREEN_PKTS,
+ ROC_NIX_TM_NODE_GREEN_BYTES,
+ ROC_NIX_TM_NODE_YELLOW_PKTS,
+ ROC_NIX_TM_NODE_YELLOW_BYTES,
+ ROC_NIX_TM_NODE_RED_PKTS,
+ ROC_NIX_TM_NODE_RED_BYTES,
+ ROC_NIX_TM_NODE_STATS_MAX,
+};
+
+struct roc_nix_tm_node_stats {
+ uint64_t stats[ROC_NIX_TM_NODE_STATS_MAX];
+};
+
int __roc_api roc_nix_tm_node_add(struct roc_nix *roc_nix,
struct roc_nix_tm_node *roc_node);
int __roc_api roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
struct roc_nix_tm_shaper_profile *__roc_api roc_nix_tm_shaper_profile_next(
struct roc_nix *roc_nix, struct roc_nix_tm_shaper_profile *__prev);
+int __roc_api roc_nix_tm_node_stats_get(struct roc_nix *roc_nix,
+ uint32_t node_id, bool clear,
+ struct roc_nix_tm_node_stats *stats);
/*
* TM ratelimit tree API.
*/
aq->sq.default_chan = nix->tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF;
aq->sq.ena = 1;
+ aq->sq.sso_ena = !!sq->sso_ena;
if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
aq->sq.sqe_stype = NIX_STYPE_STP;
aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
aq->sq.default_chan = nix->tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF;
aq->sq.ena = 1;
+ aq->sq.sso_ena = !!sq->sso_ena;
if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
aq->sq.sqe_stype = NIX_STYPE_STP;
aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
static int
nix_cn9k_rss_reta_set(struct nix *nix, uint8_t group,
- uint16_t reta[ROC_NIX_RSS_RETA_MAX])
+ uint16_t reta[ROC_NIX_RSS_RETA_MAX], uint8_t lock_rx_ctx)
{
struct mbox *mbox = (&nix->dev)->mbox;
struct nix_aq_enq_req *req;
req->qidx = (group * nix->reta_sz) + idx;
req->ctype = NIX_AQ_CTYPE_RSS;
req->op = NIX_AQ_INSTOP_INIT;
+
+ if (!lock_rx_ctx)
+ continue;
+
+ req = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ rc = mbox_process(mbox);
+ if (rc < 0)
+ return rc;
+ req = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!req)
+ return NIX_ERR_NO_MEM;
+ }
+ req->rss.rq = reta[idx];
+ /* Fill AQ info */
+ req->qidx = (group * nix->reta_sz) + idx;
+ req->ctype = NIX_AQ_CTYPE_RSS;
+ req->op = NIX_AQ_INSTOP_LOCK;
}
rc = mbox_process(mbox);
static int
nix_rss_reta_set(struct nix *nix, uint8_t group,
- uint16_t reta[ROC_NIX_RSS_RETA_MAX])
+ uint16_t reta[ROC_NIX_RSS_RETA_MAX], uint8_t lock_rx_ctx)
{
struct mbox *mbox = (&nix->dev)->mbox;
struct nix_cn10k_aq_enq_req *req;
req->qidx = (group * nix->reta_sz) + idx;
req->ctype = NIX_AQ_CTYPE_RSS;
req->op = NIX_AQ_INSTOP_INIT;
+
+ if (!lock_rx_ctx)
+ continue;
+
+ req = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ rc = mbox_process(mbox);
+ if (rc < 0)
+ return rc;
+ req = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!req)
+ return NIX_ERR_NO_MEM;
+ }
+ req->rss.rq = reta[idx];
+ /* Fill AQ info */
+ req->qidx = (group * nix->reta_sz) + idx;
+ req->ctype = NIX_AQ_CTYPE_RSS;
+ req->op = NIX_AQ_INSTOP_LOCK;
}
rc = mbox_process(mbox);
return NIX_ERR_PARAM;
if (roc_model_is_cn9k())
- rc = nix_cn9k_rss_reta_set(nix, group, reta);
+ rc = nix_cn9k_rss_reta_set(nix, group, reta,
+ roc_nix->lock_rx_ctx);
else
- rc = nix_rss_reta_set(nix, group, reta);
+ rc = nix_rss_reta_set(nix, group, reta, roc_nix->lock_rx_ctx);
if (rc)
return rc;
volatile uint64_t *reg, volatile uint64_t *regval,
volatile uint64_t *regval_mask)
{
+ struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
uint8_t k = 0, hw_lvl, parent_lvl;
uint64_t parent = 0, child = 0;
enum roc_nix_tm_tree tree;
reg[k] = NIX_AF_SMQX_CFG(schq);
regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS |
((nix->mtu & 0xFFFF) << 8));
- regval_mask[k] =
- ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | GENMASK_ULL(23, 8));
+ /* Maximum Vtag insertion size as a multiple of four bytes */
+ if (roc_nix->hw_vlan_ins)
+ regval[k] |= (0x2ULL << 36);
+ regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) |
+ GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36));
k++;
/* Parent and schedule conf */
(profile->free_fn)(profile);
}
+
+int
+roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
+ struct roc_nix_tm_node_stats *n_stats)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct mbox *mbox = (&nix->dev)->mbox;
+ struct nix_txschq_config *req, *rsp;
+ struct nix_tm_node *node;
+ uint32_t schq;
+ int rc, i;
+
+ node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
+ if (!node)
+ return NIX_ERR_TM_INVALID_NODE;
+
+ if (node->hw_lvl != NIX_TXSCH_LVL_TL1)
+ return NIX_ERR_OP_NOTSUP;
+
+ schq = node->hw_id;
+ /* Skip fetch if not requested */
+ if (!n_stats)
+ goto clear_stats;
+
+ memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats));
+ /* Check if node has HW resource */
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ return 0;
+
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->read = 1;
+ req->lvl = NIX_TXSCH_LVL_TL1;
+
+ i = 0;
+ req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
+ req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
+ req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
+ req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
+ req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
+ req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
+ req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
+ req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
+ req->num_regs = i;
+
+ rc = mbox_process_msg(mbox, (void **)&rsp);
+ if (rc)
+ return rc;
+
+ /* Return stats */
+ n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0];
+ n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1];
+ n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2];
+ n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3];
+ n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4];
+ n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5];
+ n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6];
+ n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7];
+
+clear_stats:
+ if (!clear)
+ return 0;
+
+ /* Clear all the stats */
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = NIX_TXSCH_LVL_TL1;
+ i = 0;
+ req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
+ req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
+ req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
+ req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
+ req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
+ req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
+ req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
+ req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
+ req->num_regs = i;
+
+ return mbox_process_msg(mbox, (void **)&rsp);
+}
#define plt_memzone_reserve_cache_align(name, sz) \
rte_memzone_reserve_aligned(name, sz, 0, 0, RTE_CACHE_LINE_SIZE)
#define plt_memzone_free rte_memzone_free
+#define plt_memzone_reserve_aligned(name, len, flags, align) \
+ rte_memzone_reserve_aligned((name), (len), 0, (flags), (align))
#define plt_tsc_hz rte_get_tsc_hz
#define plt_delay_ms rte_delay_ms
roc_nix_tm_node_parent_update;
roc_nix_tm_node_pkt_mode_update;
roc_nix_tm_node_shaper_update;
+ roc_nix_tm_node_stats_get;
roc_nix_tm_node_suspend_resume;
roc_nix_tm_prealloc_res;
roc_nix_tm_rlimit_sq;