X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fcnxk%2Froc_nix_queue.c;h=a283d96a01203f810ac8998116ea1e7007f0666f;hb=da718c1919bd49faa83ded38b0549500c7e77f91;hp=41e8f2ce210fc22e0e7c731c626e798fe9367472;hpb=ee48f711f3b0a4ec14ce5377bc3f831dbc32025f;p=dpdk.git diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c index 41e8f2ce21..a283d96a01 100644 --- a/drivers/common/cnxk/roc_nix_queue.c +++ b/drivers/common/cnxk/roc_nix_queue.c @@ -28,6 +28,22 @@ nix_qsize_clampup(uint32_t val) return i; } +void +nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval) +{ + uint64_t wait_ns; + + if (!roc_model_is_cn10k()) + return; + /* Due to HW errata writes to VWQE_FLUSH might hang, so instead + * wait for max vwqe timeout interval. + */ + if (rq->vwqe_ena) { + wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100; + plt_delay_us((wait_ns / 1E3) + 1); + } +} + int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) { @@ -38,6 +54,9 @@ nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) struct nix_aq_enq_req *aq; aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = rq->qid; aq->ctype = NIX_AQ_CTYPE_RQ; aq->op = NIX_AQ_INSTOP_WRITE; @@ -48,6 +67,9 @@ nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable) struct nix_cn10k_aq_enq_req *aq; aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = rq->qid; aq->ctype = NIX_AQ_CTYPE_RQ; aq->op = NIX_AQ_INSTOP_WRITE; @@ -66,9 +88,8 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable) int rc; rc = nix_rq_ena_dis(&nix->dev, rq, enable); + nix_rq_vwqe_flush(rq, nix->vwqe_interval); - if (roc_model_is_cn10k()) - plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH); return rc; } @@ -80,6 +101,9 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, struct nix_aq_enq_req *aq; aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = rq->qid; aq->ctype = NIX_AQ_CTYPE_RQ; aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; @@ -195,6 +219,9 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, struct mbox *mbox = dev->mbox; aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = rq->qid; aq->ctype = NIX_AQ_CTYPE_RQ; aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT; @@ -381,7 +408,11 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) if (rc) return rc; - return mbox_process(mbox); + rc = mbox_process(mbox); + if (rc) + return rc; + + return nix_tel_node_add_rq(rq); } int @@ -409,7 +440,11 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) if (rc) return rc; - return mbox_process(mbox); + rc = mbox_process(mbox); + if (rc) + return rc; + + return nix_tel_node_add_rq(rq); } int @@ -455,6 +490,9 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) struct nix_aq_enq_req *aq; aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = cq->qid; aq->ctype = NIX_AQ_CTYPE_CQ; aq->op = NIX_AQ_INSTOP_INIT; @@ -463,6 +501,9 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) struct nix_cn10k_aq_enq_req *aq; aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = cq->qid; aq->ctype = NIX_AQ_CTYPE_CQ; aq->op = NIX_AQ_INSTOP_INIT; @@ -492,15 +533,20 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) cq->drop_thresh = min_rx_drop; } else { cq->drop_thresh = NIX_CQ_THRESH_LEVEL; - cq_ctx->drop = cq->drop_thresh; - cq_ctx->drop_ena = 1; + /* Drop processing or red drop cannot be enabled due to + * due to packets coming for second pass from CPT. + */ + if (!roc_nix_inl_inb_is_enabled(roc_nix)) { + cq_ctx->drop = cq->drop_thresh; + cq_ctx->drop_ena = 1; + } } /* TX pause frames enable flow ctrl on RX side */ if (nix->tx_pause) { /* Single BPID is allocated for all rx channels for now */ cq_ctx->bpid = nix->bpid[0]; - cq_ctx->bp = cq_ctx->drop; + cq_ctx->bp = cq->drop_thresh; cq_ctx->bp_ena = 1; } @@ -508,7 +554,7 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq) if (rc) goto free_mem; - return 0; + return nix_tel_node_add_cq(cq); free_mem: plt_free(cq->desc_base); @@ -534,6 +580,9 @@ roc_nix_cq_fini(struct roc_nix_cq *cq) struct nix_aq_enq_req *aq; aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = cq->qid; aq->ctype = NIX_AQ_CTYPE_CQ; aq->op = NIX_AQ_INSTOP_WRITE; @@ -545,6 +594,9 @@ roc_nix_cq_fini(struct roc_nix_cq *cq) struct nix_cn10k_aq_enq_req *aq; aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = cq->qid; aq->ctype = NIX_AQ_CTYPE_CQ; aq->op = NIX_AQ_INSTOP_WRITE; @@ -625,18 +677,28 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) roc_npa_aura_op_free(sq->aura_handle, 0, iova); iova += blk_sz; } + + if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) != + NIX_MAX_SQB) { + plt_err("Failed to free all pointers to the pool"); + rc = NIX_ERR_NO_MEM; + goto npa_fail; + } + roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova); roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs); sq->aura_sqb_bufs = NIX_MAX_SQB; return rc; +npa_fail: + plt_free(sq->sqe_mem); nomem: roc_npa_pool_destroy(sq->aura_handle); fail: return rc; } -static void +static int sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, uint16_t smq) { @@ -644,6 +706,9 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, struct nix_aq_enq_req *aq; aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = sq->qid; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_INIT; @@ -656,6 +721,8 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, aq->sq.sqe_stype = NIX_STYPE_STF; aq->sq.ena = 1; aq->sq.sso_ena = !!sq->sso_ena; + aq->sq.cq_ena = !!sq->cq_ena; + aq->sq.cq = sq->cqid; if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) aq->sq.sqe_stype = NIX_STYPE_STP; aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); @@ -665,7 +732,12 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); /* Many to one reduction */ - aq->sq.qint_idx = sq->qid % nix->qints; + /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can + * send incorrect QINT_IDX when reporting queue interrupt (QINT). This + * might result in software missing the interrupt. + */ + aq->sq.qint_idx = 0; + return 0; } static int @@ -679,6 +751,9 @@ sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) int rc, count; aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = sq->qid; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_READ; @@ -692,6 +767,9 @@ sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) /* Disable sq */ aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = sq->qid; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_WRITE; @@ -703,6 +781,9 @@ sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) /* Read SQ and free sqb's */ aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = sq->qid; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_READ; @@ -734,7 +815,7 @@ sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq) return 0; } -static void +static int sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, uint16_t smq) { @@ -742,6 +823,9 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, struct nix_cn10k_aq_enq_req *aq; aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = sq->qid; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_INIT; @@ -754,6 +838,8 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, aq->sq.sqe_stype = NIX_STYPE_STF; aq->sq.ena = 1; aq->sq.sso_ena = !!sq->sso_ena; + aq->sq.cq_ena = !!sq->cq_ena; + aq->sq.cq = sq->cqid; if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) aq->sq.sqe_stype = NIX_STYPE_STP; aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); @@ -762,8 +848,12 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); - /* Many to one reduction */ - aq->sq.qint_idx = sq->qid % nix->qints; + /* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can + * send incorrect QINT_IDX when reporting queue interrupt (QINT). This + * might result in software missing the interrupt. + */ + aq->sq.qint_idx = 0; + return 0; } static int @@ -777,6 +867,9 @@ sq_fini(struct nix *nix, struct roc_nix_sq *sq) int rc, count; aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = sq->qid; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_READ; @@ -790,6 +883,9 @@ sq_fini(struct nix *nix, struct roc_nix_sq *sq) /* Disable sq */ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = sq->qid; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_WRITE; @@ -801,6 +897,9 @@ sq_fini(struct nix *nix, struct roc_nix_sq *sq) /* Read SQ and free sqb's */ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + aq->qidx = sq->qid; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_READ; @@ -871,9 +970,12 @@ roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq) /* Init SQ context */ if (roc_model_is_cn9k()) - sq_cn9k_init(nix, sq, rr_quantum, smq); + rc = sq_cn9k_init(nix, sq, rr_quantum, smq); else - sq_init(nix, sq, rr_quantum, smq); + rc = sq_init(nix, sq, rr_quantum, smq); + + if (rc) + goto nomem; rc = mbox_process(mbox); if (rc) @@ -888,6 +990,7 @@ roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq) ((qid & RVU_CN9K_LMT_SLOT_MASK) << 12)); } + rc = nix_tel_node_add_sq(sq); return rc; nomem: plt_free(sq->fc); @@ -929,6 +1032,11 @@ roc_nix_sq_fini(struct roc_nix_sq *sq) rc |= NIX_ERR_NDC_SYNC; rc |= nix_tm_sq_flush_post(sq); + + /* Restore limit to max SQB count that the pool was created + * for aura drain to succeed. + */ + roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB); rc |= roc_npa_pool_destroy(sq->aura_handle); plt_free(sq->fc); plt_free(sq->sqe_mem); @@ -936,3 +1044,56 @@ roc_nix_sq_fini(struct roc_nix_sq *sq) return rc; } + +void +roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, + uint32_t *tail) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + uint64_t reg, val; + int64_t *addr; + + if (head == NULL || tail == NULL) + return; + + reg = (((uint64_t)qid) << 32); + addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS); + val = roc_atomic64_add_nosync(reg, addr); + if (val & + (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))) + val = 0; + + *tail = (uint32_t)(val & 0xFFFFF); + *head = (uint32_t)((val >> 20) & 0xFFFFF); +} + +void +roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head, + uint32_t *tail) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct roc_nix_sq *sq = nix->sqs[qid]; + uint16_t sqes_per_sqb, sqb_cnt; + uint64_t reg, val; + int64_t *addr; + + if (head == NULL || tail == NULL) + return; + + reg = (((uint64_t)qid) << 32); + addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS); + val = roc_atomic64_add_nosync(reg, addr); + if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) { + val = 0; + return; + } + + *tail = (uint32_t)((val >> 28) & 0x3F); + *head = (uint32_t)((val >> 20) & 0x3F); + sqb_cnt = (uint16_t)(val & 0xFFFF); + + sqes_per_sqb = 1 << sq->sqes_per_sqb_log2; + + /* Update tail index as per used sqb count */ + *tail += (sqes_per_sqb * (sqb_cnt - 1)); +}