1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
8 otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev *eth_dev, bool enb)
10 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
11 struct otx2_fc_info *fc = &dev->fc_info;
12 struct otx2_mbox *mbox = dev->mbox;
13 struct nix_bp_cfg_req *req;
14 struct nix_bp_cfg_rsp *rsp;
17 if (otx2_dev_is_vf(dev))
21 req = otx2_mbox_alloc_msg_nix_bp_enable(mbox);
24 req->bpid_per_chan = 0;
26 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
27 if (rc || req->chan_cnt != rsp->chan_cnt) {
28 otx2_err("Insufficient BPIDs, alloc=%u < req=%u rc=%d",
29 rsp->chan_cnt, req->chan_cnt, rc);
33 fc->bpid[0] = rsp->chan_bpid[0];
35 req = otx2_mbox_alloc_msg_nix_bp_disable(mbox);
39 rc = otx2_mbox_process(mbox);
41 memset(fc->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
48 otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
49 struct rte_eth_fc_conf *fc_conf)
51 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
52 struct cgx_pause_frm_cfg *req, *rsp;
53 struct otx2_mbox *mbox = dev->mbox;
56 if (otx2_dev_is_vf(dev))
59 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
62 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
66 if (rsp->rx_pause && rsp->tx_pause)
67 fc_conf->mode = RTE_FC_FULL;
68 else if (rsp->rx_pause)
69 fc_conf->mode = RTE_FC_RX_PAUSE;
70 else if (rsp->tx_pause)
71 fc_conf->mode = RTE_FC_TX_PAUSE;
73 fc_conf->mode = RTE_FC_NONE;
80 otx2_nix_cq_bp_cfg(struct rte_eth_dev *eth_dev, bool enb)
82 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
83 struct otx2_fc_info *fc = &dev->fc_info;
84 struct otx2_mbox *mbox = dev->mbox;
85 struct nix_aq_enq_req *aq;
86 struct otx2_eth_rxq *rxq;
89 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
90 rxq = eth_dev->data->rx_queues[i];
92 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
94 /* The shared memory buffer can be full.
97 otx2_mbox_msg_send(mbox, 0);
98 rc = otx2_mbox_wait_for_rsp(mbox, 0);
102 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
107 aq->ctype = NIX_AQ_CTYPE_CQ;
108 aq->op = NIX_AQ_INSTOP_WRITE;
111 aq->cq.bpid = fc->bpid[0];
112 aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
113 aq->cq.bp = rxq->cq_drop;
114 aq->cq_mask.bp = ~(aq->cq_mask.bp);
117 aq->cq.bp_ena = !!enb;
118 aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
121 otx2_mbox_msg_send(mbox, 0);
122 rc = otx2_mbox_wait_for_rsp(mbox, 0);
130 otx2_nix_rx_fc_cfg(struct rte_eth_dev *eth_dev, bool enb)
132 return otx2_nix_cq_bp_cfg(eth_dev, enb);
136 otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
137 struct rte_eth_fc_conf *fc_conf)
139 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
140 struct otx2_fc_info *fc = &dev->fc_info;
141 struct otx2_mbox *mbox = dev->mbox;
142 struct cgx_pause_frm_cfg *req;
143 uint8_t tx_pause, rx_pause;
146 if (otx2_dev_is_vf(dev))
149 if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
150 fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
151 otx2_info("Flowctrl parameter is not supported");
155 if (fc_conf->mode == fc->mode)
158 rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
159 (fc_conf->mode == RTE_FC_RX_PAUSE);
160 tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
161 (fc_conf->mode == RTE_FC_TX_PAUSE);
163 /* Check if TX pause frame is already enabled or not */
164 if (fc->tx_pause ^ tx_pause) {
165 if (otx2_dev_is_Ax(dev) && eth_dev->data->dev_started) {
166 /* on Ax, CQ should be in disabled state
167 * while setting flow control configuration.
169 otx2_info("Stop the port=%d for setting flow control\n",
170 eth_dev->data->port_id);
173 /* TX pause frames, enable/disable flowctrl on RX side. */
174 rc = otx2_nix_rx_fc_cfg(eth_dev, tx_pause);
179 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
181 req->rx_pause = rx_pause;
182 req->tx_pause = tx_pause;
184 rc = otx2_mbox_process(mbox);
188 fc->tx_pause = tx_pause;
189 fc->rx_pause = rx_pause;
190 fc->mode = fc_conf->mode;
196 otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
198 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
199 struct rte_eth_fc_conf fc_conf;
201 if (otx2_dev_is_vf(dev))
204 memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
205 /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
206 * by AF driver, update those info in PMD structure.
208 otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
210 /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
211 if (otx2_dev_is_Ax(dev) &&
212 (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
214 (fc_conf.mode == RTE_FC_FULL ||
215 fc_conf.mode == RTE_FC_TX_PAUSE) ?
216 RTE_FC_TX_PAUSE : RTE_FC_NONE;
219 return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);