1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_common.h>
6 #include <rte_rawdev.h>
7 #include <rte_rawdev_pmd.h>
9 #include "otx2_common.h"
10 #include "otx2_ep_rawdev.h"
11 #include "otx2_ep_vf.h"
14 sdp_vf_reset_iq(struct sdp_device *sdpvf, int q_no)
16 uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
17 volatile uint64_t d64 = 0ull;
19 /* There is no RST for a ring.
20 * Clear all registers one by one after disabling the ring
23 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
24 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_BADDR(q_no));
25 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_RSIZE(q_no));
27 d64 = 0xFFFFFFFF; /* ~0ull */
28 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
29 d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
31 while ((d64 != 0) && loop--) {
32 otx2_write64(d64, sdpvf->hw_addr +
33 SDP_VF_R_IN_INSTR_DBELL(q_no));
37 d64 = otx2_read64(sdpvf->hw_addr +
38 SDP_VF_R_IN_INSTR_DBELL(q_no));
41 loop = SDP_VF_BUSY_LOOP_COUNT;
42 d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no));
43 while ((d64 != 0) && loop--) {
44 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no));
48 d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no));
52 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(q_no));
53 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_PKT_CNT(q_no));
54 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_BYTE_CNT(q_no));
60 sdp_vf_reset_oq(struct sdp_device *sdpvf, int q_no)
62 uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
63 volatile uint64_t d64 = 0ull;
65 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
67 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(q_no));
69 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_RSIZE(q_no));
72 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
73 d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
75 while ((d64 != 0) && loop--) {
76 otx2_write64(d64, sdpvf->hw_addr +
77 SDP_VF_R_OUT_SLIST_DBELL(q_no));
81 d64 = otx2_read64(sdpvf->hw_addr +
82 SDP_VF_R_OUT_SLIST_DBELL(q_no));
85 loop = SDP_VF_BUSY_LOOP_COUNT;
86 d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
87 while ((d64 != 0) && (loop--)) {
88 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
92 d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
96 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_INT_LEVELS(q_no));
97 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_PKT_CNT(q_no));
98 otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_BYTE_CNT(q_no));
104 sdp_vf_setup_global_iq_reg(struct sdp_device *sdpvf, int q_no)
106 volatile uint64_t reg_val = 0ull;
108 /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for IQs
109 * IS_64B is by default enabled.
111 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(q_no));
113 reg_val |= SDP_VF_R_IN_CTL_RDSIZE;
114 reg_val |= SDP_VF_R_IN_CTL_IS_64B;
115 reg_val |= SDP_VF_R_IN_CTL_ESR;
117 otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(q_no));
122 sdp_vf_setup_global_oq_reg(struct sdp_device *sdpvf, int q_no)
124 volatile uint64_t reg_val = 0ull;
126 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
128 reg_val |= (SDP_VF_R_OUT_CTL_IMODE);
130 reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_P);
131 reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_P);
132 reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_I);
133 reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_I);
134 reg_val &= ~(SDP_VF_R_OUT_CTL_ES_I);
135 reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_D);
136 reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_D);
137 reg_val &= ~(SDP_VF_R_OUT_CTL_ES_D);
139 /* INFO/DATA ptr swap is required */
140 reg_val |= (SDP_VF_R_OUT_CTL_ES_P);
142 otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
147 sdp_vf_reset_input_queues(struct sdp_device *sdpvf)
151 otx2_sdp_dbg("%s :", __func__);
153 for (q_no = 0; q_no < sdpvf->sriov_info.rings_per_vf; q_no++)
154 sdp_vf_reset_iq(sdpvf, q_no);
160 sdp_vf_reset_output_queues(struct sdp_device *sdpvf)
162 uint64_t q_no = 0ull;
164 otx2_sdp_dbg(" %s :", __func__);
166 for (q_no = 0; q_no < sdpvf->sriov_info.rings_per_vf; q_no++)
167 sdp_vf_reset_oq(sdpvf, q_no);
173 sdp_vf_setup_global_input_regs(struct sdp_device *sdpvf)
175 uint64_t q_no = 0ull;
177 sdp_vf_reset_input_queues(sdpvf);
179 for (q_no = 0; q_no < (sdpvf->sriov_info.rings_per_vf); q_no++)
180 sdp_vf_setup_global_iq_reg(sdpvf, q_no);
184 sdp_vf_setup_global_output_regs(struct sdp_device *sdpvf)
188 sdp_vf_reset_output_queues(sdpvf);
190 for (q_no = 0; q_no < (sdpvf->sriov_info.rings_per_vf); q_no++)
191 sdp_vf_setup_global_oq_reg(sdpvf, q_no);
196 sdp_vf_setup_device_regs(struct sdp_device *sdpvf)
198 sdp_vf_setup_global_input_regs(sdpvf);
199 sdp_vf_setup_global_output_regs(sdpvf);
205 sdp_vf_setup_iq_regs(struct sdp_device *sdpvf, uint32_t iq_no)
207 struct sdp_instr_queue *iq = sdpvf->instr_queue[iq_no];
208 volatile uint64_t reg_val = 0ull;
210 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
212 /* Wait till IDLE to set to 1, not supposed to configure BADDR
213 * as long as IDLE is 0
215 if (!(reg_val & SDP_VF_R_IN_CTL_IDLE)) {
217 reg_val = otx2_read64(sdpvf->hw_addr +
218 SDP_VF_R_IN_CONTROL(iq_no));
219 } while (!(reg_val & SDP_VF_R_IN_CTL_IDLE));
222 /* Write the start of the input queue's ring and its size */
223 otx2_write64(iq->base_addr_dma, sdpvf->hw_addr +
224 SDP_VF_R_IN_INSTR_BADDR(iq_no));
225 otx2_write64(iq->nb_desc, sdpvf->hw_addr +
226 SDP_VF_R_IN_INSTR_RSIZE(iq_no));
228 /* Remember the doorbell & instruction count register addr
231 iq->doorbell_reg = (uint8_t *) sdpvf->hw_addr +
232 SDP_VF_R_IN_INSTR_DBELL(iq_no);
233 iq->inst_cnt_reg = (uint8_t *) sdpvf->hw_addr +
234 SDP_VF_R_IN_CNTS(iq_no);
236 otx2_sdp_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p",
237 iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
239 /* Store the current instrn counter(used in flush_iq calculation) */
240 iq->reset_instr_cnt = rte_read32(iq->inst_cnt_reg);
242 /* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
245 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
246 reg_val = 0xffffffff;
248 otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
253 sdp_vf_setup_oq_regs(struct sdp_device *sdpvf, uint32_t oq_no)
255 volatile uint64_t reg_val = 0ull;
256 uint64_t oq_ctl = 0ull;
258 struct sdp_droq *droq = sdpvf->droq[oq_no];
260 /* Wait on IDLE to set to 1, supposed to configure BADDR
261 * as log as IDLE is 0
263 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
265 while (!(reg_val & SDP_VF_R_OUT_CTL_IDLE)) {
266 reg_val = otx2_read64(sdpvf->hw_addr +
267 SDP_VF_R_OUT_CONTROL(oq_no));
270 otx2_write64(droq->desc_ring_dma, sdpvf->hw_addr +
271 SDP_VF_R_OUT_SLIST_BADDR(oq_no));
272 otx2_write64(droq->nb_desc, sdpvf->hw_addr +
273 SDP_VF_R_OUT_SLIST_RSIZE(oq_no));
275 oq_ctl = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
277 /* Clear the ISIZE and BSIZE (22-0) */
278 oq_ctl &= ~(0x7fffffull);
280 /* Populate the BSIZE (15-0) */
281 oq_ctl |= (droq->buffer_size & 0xffff);
283 /* Populate ISIZE(22-16) */
284 oq_ctl |= ((SDP_RH_SIZE << 16) & 0x7fffff);
285 otx2_write64(oq_ctl, sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
287 /* Mapped address of the pkt_sent and pkts_credit regs */
288 droq->pkts_sent_reg = (uint8_t *) sdpvf->hw_addr +
289 SDP_VF_R_OUT_CNTS(oq_no);
290 droq->pkts_credit_reg = (uint8_t *) sdpvf->hw_addr +
291 SDP_VF_R_OUT_SLIST_DBELL(oq_no);
293 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_INT_LEVELS(oq_no));
295 /* Clear PKT_CNT register */
296 rte_write64(0xFFFFFFFFF, (uint8_t *)sdpvf->hw_addr +
297 SDP_VF_R_OUT_PKT_CNT(oq_no));
299 /* Clear the OQ doorbell */
300 rte_write32(0xFFFFFFFF, droq->pkts_credit_reg);
301 while ((rte_read32(droq->pkts_credit_reg) != 0ull)) {
302 rte_write32(0xFFFFFFFF, droq->pkts_credit_reg);
305 otx2_sdp_dbg("SDP_R[%d]_credit:%x", oq_no,
306 rte_read32(droq->pkts_credit_reg));
308 /* Clear the OQ_OUT_CNTS doorbell */
309 reg_val = rte_read32(droq->pkts_sent_reg);
310 rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
312 otx2_sdp_dbg("SDP_R[%d]_sent: %x", oq_no,
313 rte_read32(droq->pkts_sent_reg));
315 while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
316 reg_val = rte_read32(droq->pkts_sent_reg);
317 rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
324 sdp_vf_enable_iq(struct sdp_device *sdpvf, uint32_t q_no)
326 volatile uint64_t reg_val = 0ull;
327 uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
329 /* Resetting doorbells during IQ enabling also to handle abrupt
330 * guest reboot. IQ reset does not clear the doorbells.
332 otx2_write64(0xFFFFFFFF, sdpvf->hw_addr +
333 SDP_VF_R_IN_INSTR_DBELL(q_no));
335 while (((otx2_read64(sdpvf->hw_addr +
336 SDP_VF_R_IN_INSTR_DBELL(q_no))) != 0ull) && loop--) {
341 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
344 otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
346 otx2_info("IQ[%d] enable done", q_no);
351 sdp_vf_enable_oq(struct sdp_device *sdpvf, uint32_t q_no)
353 volatile uint64_t reg_val = 0ull;
355 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
357 otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
359 otx2_info("OQ[%d] enable done", q_no);
363 sdp_vf_enable_io_queues(struct sdp_device *sdpvf)
367 for (q_no = 0; q_no < sdpvf->num_iqs; q_no++)
368 sdp_vf_enable_iq(sdpvf, q_no);
370 for (q_no = 0; q_no < sdpvf->num_oqs; q_no++)
371 sdp_vf_enable_oq(sdpvf, q_no);
375 sdp_vf_disable_iq(struct sdp_device *sdpvf, uint32_t q_no)
377 volatile uint64_t reg_val = 0ull;
379 /* Reset the doorbell register for this Input Queue. */
380 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
383 otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
387 sdp_vf_disable_oq(struct sdp_device *sdpvf, uint32_t q_no)
389 volatile uint64_t reg_val = 0ull;
391 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
394 otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
399 sdp_vf_disable_io_queues(struct sdp_device *sdpvf)
403 /* Disable Input Queues. */
404 for (q_no = 0; q_no < sdpvf->num_iqs; q_no++)
405 sdp_vf_disable_iq(sdpvf, q_no);
407 /* Disable Output Queues. */
408 for (q_no = 0; q_no < sdpvf->num_oqs; q_no++)
409 sdp_vf_disable_oq(sdpvf, q_no);
413 sdp_vf_update_read_index(struct sdp_instr_queue *iq)
415 uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
417 /* The new instr cnt reg is a 32-bit counter that can roll over.
418 * We have noted the counter's initial value at init time into
421 if (iq->reset_instr_cnt < new_idx)
422 new_idx -= iq->reset_instr_cnt;
424 new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
426 /* Modulo of the new index with the IQ size will give us
429 new_idx %= iq->nb_desc;
435 sdp_vf_setup_device(struct sdp_device *sdpvf)
437 uint64_t reg_val = 0ull;
439 /* If application doesn't provide its conf, use driver default conf */
440 if (sdpvf->conf == NULL) {
441 sdpvf->conf = sdp_get_defconf(sdpvf);
442 if (sdpvf->conf == NULL) {
443 otx2_err("SDP VF default config not found");
446 otx2_info("Default config is used");
449 /* Get IOQs (RPVF] count */
450 reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(0));
452 sdpvf->sriov_info.rings_per_vf = ((reg_val >> SDP_VF_R_IN_CTL_RPVF_POS)
453 & SDP_VF_R_IN_CTL_RPVF_MASK);
455 otx2_info("SDP RPVF: %d", sdpvf->sriov_info.rings_per_vf);
457 sdpvf->fn_list.setup_iq_regs = sdp_vf_setup_iq_regs;
458 sdpvf->fn_list.setup_oq_regs = sdp_vf_setup_oq_regs;
460 sdpvf->fn_list.setup_device_regs = sdp_vf_setup_device_regs;
461 sdpvf->fn_list.update_iq_read_idx = sdp_vf_update_read_index;
463 sdpvf->fn_list.enable_io_queues = sdp_vf_enable_io_queues;
464 sdpvf->fn_list.disable_io_queues = sdp_vf_disable_io_queues;
466 sdpvf->fn_list.enable_iq = sdp_vf_enable_iq;
467 sdpvf->fn_list.disable_iq = sdp_vf_disable_iq;
469 sdpvf->fn_list.enable_oq = sdp_vf_enable_oq;
470 sdpvf->fn_list.disable_oq = sdp_vf_disable_oq;