1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_common.h>
6 #include <rte_cycles.h>
8 #include <ethdev_driver.h>
9 #include <ethdev_pci.h>
11 #include "otx_ep_common.h"
12 #include "otx_ep_vf.h"
16 otx_ep_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
18 volatile uint64_t reg_val = 0ull;
20 /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for IQs
21 * IS_64B is by default enabled.
23 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_CONTROL(q_no));
25 reg_val |= OTX_EP_R_IN_CTL_RDSIZE;
26 reg_val |= OTX_EP_R_IN_CTL_IS_64B;
27 reg_val |= OTX_EP_R_IN_CTL_ESR;
29 otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_IN_CONTROL(q_no));
30 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_CONTROL(q_no));
32 if (!(reg_val & OTX_EP_R_IN_CTL_IDLE)) {
34 reg_val = rte_read64(otx_ep->hw_addr +
35 OTX_EP_R_IN_CONTROL(q_no));
36 } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE));
41 otx_ep_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
43 volatile uint64_t reg_val = 0ull;
45 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_OUT_CONTROL(q_no));
47 reg_val &= ~(OTX_EP_R_OUT_CTL_IMODE);
48 reg_val &= ~(OTX_EP_R_OUT_CTL_ROR_P);
49 reg_val &= ~(OTX_EP_R_OUT_CTL_NSR_P);
50 reg_val &= ~(OTX_EP_R_OUT_CTL_ROR_I);
51 reg_val &= ~(OTX_EP_R_OUT_CTL_NSR_I);
52 reg_val &= ~(OTX_EP_R_OUT_CTL_ES_I);
53 reg_val &= ~(OTX_EP_R_OUT_CTL_ROR_D);
54 reg_val &= ~(OTX_EP_R_OUT_CTL_NSR_D);
55 reg_val &= ~(OTX_EP_R_OUT_CTL_ES_D);
57 /* INFO/DATA ptr swap is required */
58 reg_val |= (OTX_EP_R_OUT_CTL_ES_P);
60 otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_OUT_CONTROL(q_no));
64 otx_ep_setup_global_input_regs(struct otx_ep_device *otx_ep)
68 for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
69 otx_ep_setup_global_iq_reg(otx_ep, q_no);
73 otx_ep_setup_global_output_regs(struct otx_ep_device *otx_ep)
77 for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
78 otx_ep_setup_global_oq_reg(otx_ep, q_no);
82 otx_ep_setup_device_regs(struct otx_ep_device *otx_ep)
84 otx_ep_setup_global_input_regs(otx_ep);
85 otx_ep_setup_global_output_regs(otx_ep);
89 otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
91 struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
92 volatile uint64_t reg_val = 0ull;
94 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_CONTROL(iq_no));
96 /* Wait till IDLE to set to 1, not supposed to configure BADDR
97 * as long as IDLE is 0
99 if (!(reg_val & OTX_EP_R_IN_CTL_IDLE)) {
101 reg_val = rte_read64(otx_ep->hw_addr +
102 OTX_EP_R_IN_CONTROL(iq_no));
103 } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE));
106 /* Write the start of the input queue's ring and its size */
107 otx_ep_write64(iq->base_addr_dma, otx_ep->hw_addr,
108 OTX_EP_R_IN_INSTR_BADDR(iq_no));
109 otx_ep_write64(iq->nb_desc, otx_ep->hw_addr,
110 OTX_EP_R_IN_INSTR_RSIZE(iq_no));
112 /* Remember the doorbell & instruction count register addr
115 iq->doorbell_reg = (uint8_t *)otx_ep->hw_addr +
116 OTX_EP_R_IN_INSTR_DBELL(iq_no);
117 iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +
118 OTX_EP_R_IN_CNTS(iq_no);
120 otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
121 iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
124 reg_val = rte_read32(iq->inst_cnt_reg);
125 rte_write32(reg_val, iq->inst_cnt_reg);
126 } while (reg_val != 0);
128 /* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
131 /* reg_val = rte_read64(otx_ep->hw_addr +
132 * OTX_EP_R_IN_INT_LEVELS(iq_no));
134 otx_ep_write64(OTX_EP_CLEAR_IN_INT_LVLS, otx_ep->hw_addr,
135 OTX_EP_R_IN_INT_LEVELS(iq_no));
139 otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
141 volatile uint64_t reg_val = 0ull;
142 uint64_t oq_ctl = 0ull;
144 struct otx_ep_droq *droq = otx_ep->droq[oq_no];
146 /* Wait on IDLE to set to 1, supposed to configure BADDR
147 * as log as IDLE is 0
149 otx_ep_write64(0ULL, otx_ep->hw_addr, OTX_EP_R_OUT_ENABLE(oq_no));
151 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_OUT_CONTROL(oq_no));
153 while (!(reg_val & OTX_EP_R_OUT_CTL_IDLE)) {
154 reg_val = rte_read64(otx_ep->hw_addr +
155 OTX_EP_R_OUT_CONTROL(oq_no));
158 otx_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr,
159 OTX_EP_R_OUT_SLIST_BADDR(oq_no));
160 otx_ep_write64(droq->nb_desc, otx_ep->hw_addr,
161 OTX_EP_R_OUT_SLIST_RSIZE(oq_no));
163 oq_ctl = rte_read64(otx_ep->hw_addr + OTX_EP_R_OUT_CONTROL(oq_no));
165 /* Clear the ISIZE and BSIZE (22-0) */
166 oq_ctl &= ~(OTX_EP_CLEAR_ISIZE_BSIZE);
168 /* Populate the BSIZE (15-0) */
169 oq_ctl |= (droq->buffer_size & OTX_EP_DROQ_BUFSZ_MASK);
171 otx_ep_write64(oq_ctl, otx_ep->hw_addr, OTX_EP_R_OUT_CONTROL(oq_no));
173 /* Mapped address of the pkt_sent and pkts_credit regs */
174 droq->pkts_sent_reg = (uint8_t *)otx_ep->hw_addr +
175 OTX_EP_R_OUT_CNTS(oq_no);
176 droq->pkts_credit_reg = (uint8_t *)otx_ep->hw_addr +
177 OTX_EP_R_OUT_SLIST_DBELL(oq_no);
179 otx_ep_write64(OTX_EP_CLEAR_OUT_INT_LVLS, otx_ep->hw_addr,
180 OTX_EP_R_OUT_INT_LEVELS(oq_no));
182 /* Clear the OQ doorbell */
183 rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
184 while ((rte_read32(droq->pkts_credit_reg) != 0ull)) {
185 rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
188 otx_ep_dbg("OTX_EP_R[%d]_credit:%x\n", oq_no,
189 rte_read32(droq->pkts_credit_reg));
191 /* Clear the OQ_OUT_CNTS doorbell */
192 reg_val = rte_read32(droq->pkts_sent_reg);
193 rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
195 otx_ep_dbg("OTX_EP_R[%d]_sent: %x\n", oq_no,
196 rte_read32(droq->pkts_sent_reg));
198 while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
199 reg_val = rte_read32(droq->pkts_sent_reg);
200 rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
206 otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
208 uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
209 uint64_t reg_val = 0ull;
211 /* Resetting doorbells during IQ enabling also to handle abrupt
212 * guest reboot. IQ reset does not clear the doorbells.
214 otx_ep_write64(0xFFFFFFFF, otx_ep->hw_addr,
215 OTX_EP_R_IN_INSTR_DBELL(q_no));
217 while (((rte_read64(otx_ep->hw_addr +
218 OTX_EP_R_IN_INSTR_DBELL(q_no))) != 0ull) && loop--) {
223 otx_ep_err("dbell reset failed\n");
228 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_ENABLE(q_no));
231 otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_IN_ENABLE(q_no));
233 otx_ep_info("IQ[%d] enable done\n", q_no);
239 otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
241 uint64_t reg_val = 0ull;
242 uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
244 /* Resetting doorbells during IQ enabling also to handle abrupt
245 * guest reboot. IQ reset does not clear the doorbells.
247 otx_ep_write64(0xFFFFFFFF, otx_ep->hw_addr,
248 OTX_EP_R_OUT_SLIST_DBELL(q_no));
249 while (((rte_read64(otx_ep->hw_addr +
250 OTX_EP_R_OUT_SLIST_DBELL(q_no))) != 0ull) && loop--) {
254 otx_ep_err("dbell reset failed\n");
259 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_OUT_ENABLE(q_no));
261 otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_OUT_ENABLE(q_no));
263 otx_ep_info("OQ[%d] enable done\n", q_no);
269 otx_ep_enable_io_queues(struct otx_ep_device *otx_ep)
274 for (q_no = 0; q_no < otx_ep->nb_tx_queues; q_no++) {
275 ret = otx_ep_enable_iq(otx_ep, q_no);
280 for (q_no = 0; q_no < otx_ep->nb_rx_queues; q_no++) {
281 ret = otx_ep_enable_oq(otx_ep, q_no);
290 otx_ep_disable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
292 uint64_t reg_val = 0ull;
294 /* Reset the doorbell register for this Input Queue. */
295 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_ENABLE(q_no));
298 otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_IN_ENABLE(q_no));
302 otx_ep_disable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
304 uint64_t reg_val = 0ull;
306 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_OUT_ENABLE(q_no));
309 otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_OUT_ENABLE(q_no));
313 otx_ep_disable_io_queues(struct otx_ep_device *otx_ep)
317 for (q_no = 0; q_no < otx_ep->sriov_info.rings_per_vf; q_no++) {
318 otx_ep_disable_iq(otx_ep, q_no);
319 otx_ep_disable_oq(otx_ep, q_no);
323 /* OTX_EP default configuration */
324 static const struct otx_ep_config default_otx_ep_conf = {
327 .max_iqs = OTX_EP_CFG_IO_QUEUES,
328 .instr_type = OTX_EP_64BYTE_INSTR,
329 .pending_list_size = (OTX_EP_MAX_IQ_DESCRIPTORS *
330 OTX_EP_CFG_IO_QUEUES),
335 .max_oqs = OTX_EP_CFG_IO_QUEUES,
336 .info_ptr = OTX_EP_OQ_INFOPTR_MODE,
337 .refill_threshold = OTX_EP_OQ_REFIL_THRESHOLD,
340 .num_iqdef_descs = OTX_EP_MAX_IQ_DESCRIPTORS,
341 .num_oqdef_descs = OTX_EP_MAX_OQ_DESCRIPTORS,
342 .oqdef_buf_size = OTX_EP_OQ_BUF_SIZE,
347 static const struct otx_ep_config*
348 otx_ep_get_defconf(struct otx_ep_device *otx_ep_dev __rte_unused)
350 const struct otx_ep_config *default_conf = NULL;
352 default_conf = &default_otx_ep_conf;
358 otx_ep_vf_setup_device(struct otx_ep_device *otx_ep)
360 uint64_t reg_val = 0ull;
362 /* If application doesn't provide its conf, use driver default conf */
363 if (otx_ep->conf == NULL) {
364 otx_ep->conf = otx_ep_get_defconf(otx_ep);
365 if (otx_ep->conf == NULL) {
366 otx_ep_err("OTX_EP VF default config not found\n");
369 otx_ep_info("Default config is used\n");
372 /* Get IOQs (RPVF] count */
373 reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_CONTROL(0));
375 otx_ep->sriov_info.rings_per_vf = ((reg_val >> OTX_EP_R_IN_CTL_RPVF_POS)
376 & OTX_EP_R_IN_CTL_RPVF_MASK);
378 otx_ep_info("OTX_EP RPVF: %d\n", otx_ep->sriov_info.rings_per_vf);
380 otx_ep->fn_list.setup_iq_regs = otx_ep_setup_iq_regs;
381 otx_ep->fn_list.setup_oq_regs = otx_ep_setup_oq_regs;
383 otx_ep->fn_list.setup_device_regs = otx_ep_setup_device_regs;
385 otx_ep->fn_list.enable_io_queues = otx_ep_enable_io_queues;
386 otx_ep->fn_list.disable_io_queues = otx_ep_disable_io_queues;
388 otx_ep->fn_list.enable_iq = otx_ep_enable_iq;
389 otx_ep->fn_list.disable_iq = otx_ep_disable_iq;
391 otx_ep->fn_list.enable_oq = otx_ep_enable_oq;
392 otx_ep->fn_list.disable_oq = otx_ep_disable_oq;