return i;
}
+void
+nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval)
+{
+ uint64_t wait_ns;
+
+ if (!roc_model_is_cn10k())
+ return;
+ /* Due to HW errata writes to VWQE_FLUSH might hang, so instead
+ * wait for max vwqe timeout interval.
+ */
+ if (rq->vwqe_ena) {
+ wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100;
+ plt_delay_us((wait_ns / 1E3) + 1);
+ }
+}
+
int
nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
{
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = rq->qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_WRITE;
struct nix_cn10k_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = rq->qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_WRITE;
int rc;
rc = nix_rq_ena_dis(&nix->dev, rq, enable);
+ nix_rq_vwqe_flush(rq, nix->vwqe_interval);
- if (roc_model_is_cn10k())
- plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
return rc;
}
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = rq->qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
struct mbox *mbox = dev->mbox;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = rq->qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
if (rc)
return rc;
- return mbox_process(mbox);
+ rc = mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ return nix_tel_node_add_rq(rq);
}
int
if (rc)
return rc;
- return mbox_process(mbox);
+ rc = mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ return nix_tel_node_add_rq(rq);
}
int
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = cq->qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_INIT;
struct nix_cn10k_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = cq->qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_INIT;
if (rc)
goto free_mem;
- return 0;
+ return nix_tel_node_add_cq(cq);
free_mem:
plt_free(cq->desc_base);
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = cq->qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_WRITE;
struct nix_cn10k_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = cq->qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_WRITE;
roc_npa_aura_op_free(sq->aura_handle, 0, iova);
iova += blk_sz;
}
+
+ if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) !=
+ NIX_MAX_SQB) {
+ plt_err("Failed to free all pointers to the pool");
+ rc = NIX_ERR_NO_MEM;
+ goto npa_fail;
+ }
+
roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
sq->aura_sqb_bufs = NIX_MAX_SQB;
return rc;
+npa_fail:
+ plt_free(sq->sqe_mem);
nomem:
roc_npa_pool_destroy(sq->aura_handle);
fail:
return rc;
}
-static void
+static int
sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
uint16_t smq)
{
struct nix_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_INIT;
aq->sq.sqe_stype = NIX_STYPE_STF;
aq->sq.ena = 1;
aq->sq.sso_ena = !!sq->sso_ena;
+ aq->sq.cq_ena = !!sq->cq_ena;
+ aq->sq.cq = sq->cqid;
if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
aq->sq.sqe_stype = NIX_STYPE_STP;
aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
/* Many to one reduction */
- aq->sq.qint_idx = sq->qid % nix->qints;
+ /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
+ * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
+ * might result in software missing the interrupt.
+ */
+ aq->sq.qint_idx = 0;
+ return 0;
}
static int
int rc, count;
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_READ;
/* Disable sq */
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_WRITE;
/* Read SQ and free sqb's */
aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_READ;
return 0;
}
-static void
+static int
sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
uint16_t smq)
{
struct nix_cn10k_aq_enq_req *aq;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_INIT;
aq->sq.sqe_stype = NIX_STYPE_STF;
aq->sq.ena = 1;
aq->sq.sso_ena = !!sq->sso_ena;
+ aq->sq.cq_ena = !!sq->cq_ena;
+ aq->sq.cq = sq->cqid;
if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
aq->sq.sqe_stype = NIX_STYPE_STP;
aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
- /* Many to one reduction */
- aq->sq.qint_idx = sq->qid % nix->qints;
+ /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
+ * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
+ * might result in software missing the interrupt.
+ */
+ aq->sq.qint_idx = 0;
+ return 0;
}
static int
int rc, count;
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_READ;
/* Disable sq */
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_WRITE;
/* Read SQ and free sqb's */
aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
aq->qidx = sq->qid;
aq->ctype = NIX_AQ_CTYPE_SQ;
aq->op = NIX_AQ_INSTOP_READ;
/* Init SQ context */
if (roc_model_is_cn9k())
- sq_cn9k_init(nix, sq, rr_quantum, smq);
+ rc = sq_cn9k_init(nix, sq, rr_quantum, smq);
else
- sq_init(nix, sq, rr_quantum, smq);
+ rc = sq_init(nix, sq, rr_quantum, smq);
+
+ if (rc)
+ goto nomem;
rc = mbox_process(mbox);
if (rc)
((qid & RVU_CN9K_LMT_SLOT_MASK) << 12));
}
+ rc = nix_tel_node_add_sq(sq);
return rc;
nomem:
plt_free(sq->fc);
return rc;
}
+
+void
+roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
+ uint32_t *tail)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint64_t reg, val;
+ int64_t *addr;
+
+ if (head == NULL || tail == NULL)
+ return;
+
+ reg = (((uint64_t)qid) << 32);
+ addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
+ val = roc_atomic64_add_nosync(reg, addr);
+ if (val &
+ (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)))
+ val = 0;
+
+ *tail = (uint32_t)(val & 0xFFFFF);
+ *head = (uint32_t)((val >> 20) & 0xFFFFF);
+}
+
+void
+roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
+ uint32_t *tail)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct roc_nix_sq *sq = nix->sqs[qid];
+ uint16_t sqes_per_sqb, sqb_cnt;
+ uint64_t reg, val;
+ int64_t *addr;
+
+ if (head == NULL || tail == NULL)
+ return;
+
+ reg = (((uint64_t)qid) << 32);
+ addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
+ val = roc_atomic64_add_nosync(reg, addr);
+ if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) {
+ val = 0;
+ return;
+ }
+
+ *tail = (uint32_t)((val >> 28) & 0x3F);
+ *head = (uint32_t)((val >> 20) & 0x3F);
+ sqb_cnt = (uint16_t)(val & 0xFFFF);
+
+ sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
+
+ /* Update tail index as per used sqb count */
+ *tail += (sqes_per_sqb * (sqb_cnt - 1));
+}