+static int
+nix_smq_xoff(struct otx2_eth_dev *dev, uint16_t smq, bool enable)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_txschq_config *req;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->num_regs = 1;
+
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ /* Unmodified fields */
+ req->regval[0] = ((uint64_t)NIX_MAX_VTAG_INS << 36) |
+ (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
+
+ if (enable)
+ req->regval[0] |= BIT_ULL(50) | BIT_ULL(49);
+ else
+ req->regval[0] |= 0;
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
+{
+ struct otx2_eth_txq *txq = __txq;
+ struct npa_aq_enq_req *req;
+ struct npa_aq_enq_rsp *rsp;
+ struct otx2_npa_lf *lf;
+ struct otx2_mbox *mbox;
+ uint64_t aura_handle;
+ int rc;
+
+ lf = otx2_npa_lf_obj_get();
+ if (!lf)
+ return -EFAULT;
+ mbox = lf->mbox;
+ /* Set/clear sqb aura fc_ena */
+ aura_handle = txq->sqb_pool->pool_id;
+ req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+ req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_WRITE;
+ /* Below is not needed for aura writes but AF driver needs it */
+ /* AF will translate to associated poolctx */
+ req->aura.pool_addr = req->aura_id;
+
+ req->aura.fc_ena = enable;
+ req->aura_mask.fc_ena = 1;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ /* Read back npa aura ctx */
+ req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+ req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ /* Init when enabled as there might be no triggers */
+ if (enable)
+ *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
+ else
+ *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
+ /* Sync write barrier */
+ rte_wmb();
+
+ return 0;
+}
+
+static void
+nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
+{
+ uint16_t sqb_cnt, head_off, tail_off;
+ struct otx2_eth_dev *dev = txq->dev;
+ uint16_t sq = txq->sq;
+ uint64_t reg, val;
+ int64_t *regaddr;
+
+ while (true) {
+ reg = ((uint64_t)sq << 32);
+ regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
+ val = otx2_atomic64_add_nosync(reg, regaddr);
+
+ regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
+ val = otx2_atomic64_add_nosync(reg, regaddr);
+ sqb_cnt = val & 0xFFFF;
+ head_off = (val >> 20) & 0x3F;
+ tail_off = (val >> 28) & 0x3F;
+
+ /* SQ reached quiescent state */
+ if (sqb_cnt <= 1 && head_off == tail_off &&
+ (*txq->fc_mem == txq->nb_sqb_bufs)) {
+ break;
+ }
+
+ rte_pause();
+ }
+}
+
+int
+otx2_nix_tm_sw_xoff(void *__txq, bool dev_started)
+{
+ struct otx2_eth_txq *txq = __txq;
+ struct otx2_eth_dev *dev = txq->dev;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *req;
+ struct nix_aq_enq_rsp *rsp;
+ uint16_t smq;
+ int rc;
+
+ /* Get smq from sq */
+ req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ req->qidx = txq->sq;
+ req->ctype = NIX_AQ_CTYPE_SQ;
+ req->op = NIX_AQ_INSTOP_READ;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get smq, rc=%d", rc);
+ return -EIO;
+ }
+
+ /* Check if sq is enabled */
+ if (!rsp->sq.ena)
+ return 0;
+
+ smq = rsp->sq.smq;
+
+ /* Enable CGX RXTX to drain pkts */
+ if (!dev_started) {
+ rc = otx2_cgx_rxtx_start(dev);
+ if (rc)
+ return rc;
+ }
+
+ rc = otx2_nix_sq_sqb_aura_fc(txq, false);
+ if (rc < 0) {
+ otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
+ goto cleanup;
+ }
+
+ /* Disable smq xoff for case it was enabled earlier */
+ rc = nix_smq_xoff(dev, smq, false);
+ if (rc) {
+ otx2_err("Failed to enable smq for sq %u, rc=%d", txq->sq, rc);
+ goto cleanup;
+ }
+
+ /* Wait for sq entries to be flushed */
+ nix_txq_flush_sq_spin(txq);
+
+ /* Flush and enable smq xoff */
+ rc = nix_smq_xoff(dev, smq, true);
+ if (rc) {
+ otx2_err("Failed to disable smq for sq %u, rc=%d", txq->sq, rc);
+ return rc;
+ }
+
+cleanup:
+ /* Restore cgx state */
+ if (!dev_started)
+ rc |= otx2_cgx_rxtx_stop(dev);
+
+ return rc;
+}
+
+static int
+nix_tm_sw_xon(struct otx2_eth_txq *txq,
+ uint16_t smq, uint32_t rr_quantum)
+{
+ struct otx2_eth_dev *dev = txq->dev;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *req;
+ int rc;
+
+ otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum %u",
+ txq->sq, txq->sq, rr_quantum);
+ /* Set smq from sq */
+ req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ req->qidx = txq->sq;
+ req->ctype = NIX_AQ_CTYPE_SQ;
+ req->op = NIX_AQ_INSTOP_WRITE;
+ req->sq.smq = smq;
+ req->sq.smq_rr_quantum = rr_quantum;
+ req->sq_mask.smq = ~req->sq_mask.smq;
+ req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Failed to set smq, rc=%d", rc);
+ return -EIO;
+ }
+
+ /* Enable sqb_aura fc */
+ rc = otx2_nix_sq_sqb_aura_fc(txq, true);
+ if (rc < 0) {
+ otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
+ return rc;
+ }
+
+ /* Disable smq xoff */
+ rc = nix_smq_xoff(dev, smq, false);
+ if (rc) {
+ otx2_err("Failed to enable smq for sq %u", txq->sq);
+ return rc;
+ }
+
+ return 0;
+}
+