struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = rq->qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_WRITE;
struct nix_cn10k_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = rq->qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_WRITE;
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = rq->qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
struct mbox *mbox = dev->mbox;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = rq->qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
aq->rq.qint_idx = rq->qid % qints;
- aq->rq.xqe_drop_ena = 1;
+ aq->rq.xqe_drop_ena = 0;
+ aq->rq.lpb_drop_ena = rq->lpb_drop_ena;
+ aq->rq.spb_drop_ena = rq->spb_drop_ena;
/* If RED enabled, then fill enable for all cases */
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
+ aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena;
+ aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena;
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = cq->qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_INIT;
struct nix_cn10k_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = cq->qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_INIT;
/* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
cq_ctx->cint_idx = cq->qid;
- if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
+ if (roc_errata_nix_has_cq_min_size_4k()) {
const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
uint16_t min_rx_drop;
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = cq->qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_WRITE;
struct nix_cn10k_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = cq->qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_WRITE;
memset(&aura, 0, sizeof(aura));
aura.fc_ena = 1;
- if (roc_model_is_cn9k() || roc_model_is_cn10ka_a0())
+ if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp())
aura.fc_stype = 0x0; /* STF */
else
aura.fc_stype = 0x3; /* STSTP */
roc_npa_aura_op_free(sq->aura_handle, 0, iova);
iova += blk_sz;
}
+
+ if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) !=
+ NIX_MAX_SQB) {
+ plt_err("Failed to free all pointers to the pool");
+ rc = NIX_ERR_NO_MEM;
+ goto npa_fail;
+ }
+
roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
sq->aura_sqb_bufs = NIX_MAX_SQB;
return rc;
+npa_fail:
+ plt_free(sq->sqe_mem);
nomem:
roc_npa_pool_destroy(sq->aura_handle);
fail:
return rc;
}
-static void
+static int
sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
uint16_t smq)
{
+ struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
struct mbox *mbox = (&nix->dev)->mbox;
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_INIT;
aq->sq.max_sqe_size = sq->max_sqe_sz;
aq->sq.smq = smq;
aq->sq.smq_rr_quantum = rr_quantum;
- aq->sq.default_chan = nix->tx_chan_base;
+ if (roc_nix_is_sdp(roc_nix))
+ aq->sq.default_chan =
+ nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt);
+ else
+ aq->sq.default_chan = nix->tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF;
aq->sq.ena = 1;
aq->sq.sso_ena = !!sq->sso_ena;
aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
/* Many to one reduction */
- aq->sq.qint_idx = sq->qid % nix->qints;
+ /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
+ * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
+ * might result in software missing the interrupt.
+ */
+ aq->sq.qint_idx = 0;
+ return 0;
}
static int
int rc, count;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_READ;
/* Disable sq */
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_WRITE;
/* Read SQ and free sqb's */
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_READ;
return 0;
}
-static void
+static int
sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
uint16_t smq)
{
struct nix_cn10k_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_INIT;
aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
- /* Many to one reduction */
- aq->sq.qint_idx = sq->qid % nix->qints;
+ /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
+ * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
+ * might result in software missing the interrupt.
+ */
+ aq->sq.qint_idx = 0;
+ return 0;
}
static int
int rc, count;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_READ;
/* Disable sq */
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_WRITE;
/* Read SQ and free sqb's */
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_READ;
/* Init SQ context */
if (roc_model_is_cn9k())
- sq_cn9k_init(nix, sq, rr_quantum, smq);
+ rc = sq_cn9k_init(nix, sq, rr_quantum, smq);
else
- sq_init(nix, sq, rr_quantum, smq);
+ rc = sq_init(nix, sq, rr_quantum, smq);
+
+ if (rc)
+ goto nomem;
rc = mbox_process(mbox);
if (rc)
return rc;
}
+
+void
+roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
+ uint32_t *tail)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint64_t reg, val;
+ int64_t *addr;
+
+ if (head == NULL || tail == NULL)
+ return;
+
+ reg = (((uint64_t)qid) << 32);
+ addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
+ val = roc_atomic64_add_nosync(reg, addr);
+ if (val &
+ (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)))
+ val = 0;
+
+ *tail = (uint32_t)(val & 0xFFFFF);
+ *head = (uint32_t)((val >> 20) & 0xFFFFF);
+}
+
+void
+roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
+ uint32_t *tail)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct roc_nix_sq *sq = nix->sqs[qid];
+ uint16_t sqes_per_sqb, sqb_cnt;
+ uint64_t reg, val;
+ int64_t *addr;
+
+ if (head == NULL || tail == NULL)
+ return;
+
+ reg = (((uint64_t)qid) << 32);
+ addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
+ val = roc_atomic64_add_nosync(reg, addr);
+ if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) {
+ val = 0;
+ return;
+ }
+
+ *tail = (uint32_t)((val >> 28) & 0x3F);
+ *head = (uint32_t)((val >> 20) & 0x3F);
+ sqb_cnt = (uint16_t)(val & 0xFFFF);
+
+ sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
+
+ /* Update tail index as per used sqb count */
+ *tail += (sqes_per_sqb * (sqb_cnt - 1));
+}