1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
8 otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev *eth_dev, bool enb)
10 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
11 struct otx2_fc_info *fc = &dev->fc_info;
12 struct otx2_mbox *mbox = dev->mbox;
13 struct nix_bp_cfg_req *req;
14 struct nix_bp_cfg_rsp *rsp;
18 req = otx2_mbox_alloc_msg_nix_bp_enable(mbox);
21 req->bpid_per_chan = 0;
23 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
24 if (rc || req->chan_cnt != rsp->chan_cnt) {
25 otx2_err("Insufficient BPIDs, alloc=%u < req=%u rc=%d",
26 rsp->chan_cnt, req->chan_cnt, rc);
30 fc->bpid[0] = rsp->chan_bpid[0];
32 req = otx2_mbox_alloc_msg_nix_bp_disable(mbox);
36 rc = otx2_mbox_process(mbox);
38 memset(fc->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
45 otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
46 struct rte_eth_fc_conf *fc_conf)
48 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
49 struct cgx_pause_frm_cfg *req, *rsp;
50 struct otx2_mbox *mbox = dev->mbox;
53 if (otx2_dev_is_lbk(dev)) {
54 fc_conf->mode = RTE_FC_NONE;
58 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
61 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
65 if (rsp->rx_pause && rsp->tx_pause)
66 fc_conf->mode = RTE_FC_FULL;
67 else if (rsp->rx_pause)
68 fc_conf->mode = RTE_FC_RX_PAUSE;
69 else if (rsp->tx_pause)
70 fc_conf->mode = RTE_FC_TX_PAUSE;
72 fc_conf->mode = RTE_FC_NONE;
79 otx2_nix_cq_bp_cfg(struct rte_eth_dev *eth_dev, bool enb)
81 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
82 struct otx2_fc_info *fc = &dev->fc_info;
83 struct otx2_mbox *mbox = dev->mbox;
84 struct nix_aq_enq_req *aq;
85 struct otx2_eth_rxq *rxq;
88 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
89 rxq = eth_dev->data->rx_queues[i];
91 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
93 /* The shared memory buffer can be full.
96 otx2_mbox_msg_send(mbox, 0);
97 rc = otx2_mbox_wait_for_rsp(mbox, 0);
101 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
106 aq->ctype = NIX_AQ_CTYPE_CQ;
107 aq->op = NIX_AQ_INSTOP_WRITE;
110 aq->cq.bpid = fc->bpid[0];
111 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
112 aq->cq.bp = rxq->cq_drop;
113 aq->cq_mask.bp = ~(aq->cq_mask.bp);
116 aq->cq.bp_ena = !!enb;
117 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
120 otx2_mbox_msg_send(mbox, 0);
121 rc = otx2_mbox_wait_for_rsp(mbox, 0);
129 otx2_nix_rx_fc_cfg(struct rte_eth_dev *eth_dev, bool enb)
131 return otx2_nix_cq_bp_cfg(eth_dev, enb);
135 otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
136 struct rte_eth_fc_conf *fc_conf)
138 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
139 struct otx2_fc_info *fc = &dev->fc_info;
140 struct otx2_mbox *mbox = dev->mbox;
141 struct cgx_pause_frm_cfg *req;
142 uint8_t tx_pause, rx_pause;
145 if (otx2_dev_is_lbk(dev)) {
146 otx2_info("No flow control support for LBK bound ethports");
150 if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
151 fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
152 otx2_info("Flowctrl parameter is not supported");
156 if (fc_conf->mode == fc->mode)
159 rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
160 (fc_conf->mode == RTE_FC_RX_PAUSE);
161 tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
162 (fc_conf->mode == RTE_FC_TX_PAUSE);
164 /* Check if TX pause frame is already enabled or not */
165 if (fc->tx_pause ^ tx_pause) {
166 if (otx2_dev_is_Ax(dev) && eth_dev->data->dev_started) {
167 /* on Ax, CQ should be in disabled state
168 * while setting flow control configuration.
170 otx2_info("Stop the port=%d for setting flow control\n",
171 eth_dev->data->port_id);
174 /* TX pause frames, enable/disable flowctrl on RX side. */
175 rc = otx2_nix_rx_fc_cfg(eth_dev, tx_pause);
180 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
182 req->rx_pause = rx_pause;
183 req->tx_pause = tx_pause;
185 rc = otx2_mbox_process(mbox);
189 fc->tx_pause = tx_pause;
190 fc->rx_pause = rx_pause;
191 fc->mode = fc_conf->mode;
197 otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
199 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
200 struct rte_eth_fc_conf fc_conf;
202 if (otx2_dev_is_lbk(dev))
205 memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
206 /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
207 * by AF driver, update those info in PMD structure.
209 otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
211 /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
212 if (otx2_dev_is_Ax(dev) &&
213 (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
215 (fc_conf.mode == RTE_FC_FULL ||
216 fc_conf.mode == RTE_FC_TX_PAUSE) ?
217 RTE_FC_TX_PAUSE : RTE_FC_NONE;
220 return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);