1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include <rte_ethdev.h>
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
12 #include "lio_23xx_vf.h"
13 #include "lio_23xx_reg.h"
17 cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues)
19 uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
23 PMD_INIT_FUNC_TRACE();
25 for (q_no = 0; q_no < num_queues; q_no++) {
26 /* set RST bit to 1. This bit applies to both IQ and OQ */
27 d64 = lio_read_csr64(lio_dev,
28 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
29 d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
30 lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
34 /* wait until the RST bit is clear or the RST and QUIET bits are set */
35 for (q_no = 0; q_no < num_queues; q_no++) {
36 volatile uint64_t reg_val;
38 reg_val = lio_read_csr64(lio_dev,
39 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
40 while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
41 !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
43 reg_val = lio_read_csr64(
45 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
51 "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n",
56 reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
57 lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
60 reg_val = lio_read_csr64(
61 lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
62 if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
64 "clearing the reset failed for qno: %lu\n",
74 cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev)
79 PMD_INIT_FUNC_TRACE();
81 if (cn23xx_vf_reset_io_queues(lio_dev,
82 lio_dev->sriov_info.rings_per_vf))
85 for (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) {
86 lio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no),
89 d64 = lio_read_csr64(lio_dev,
90 CN23XX_SLI_IQ_INSTR_COUNT64(q_no));
92 d64 &= 0xEFFFFFFFFFFFFFFFL;
94 lio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no),
97 /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
100 lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
101 CN23XX_PKT_INPUT_CTL_MASK);
108 cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev)
113 PMD_INIT_FUNC_TRACE();
115 for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
116 lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
120 lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no));
122 reg_val &= 0xEFFFFFFFFFFFFFFFL;
125 lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
127 /* set IPTR & DPTR */
129 (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
132 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
134 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
135 * for Output Queue Scatter List
138 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
139 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
141 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
142 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
143 #elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
144 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
146 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
147 * for Output Queue Data
150 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
151 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
153 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
155 /* write all the selected settings */
156 lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
162 cn23xx_vf_setup_device_regs(struct lio_device *lio_dev)
164 PMD_INIT_FUNC_TRACE();
166 if (cn23xx_vf_setup_global_input_regs(lio_dev))
169 cn23xx_vf_setup_global_output_regs(lio_dev);
175 cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
177 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
178 uint64_t pkt_in_done = 0;
180 PMD_INIT_FUNC_TRACE();
182 /* Write the start of the input queue's ring and its size */
183 lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
185 lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
187 /* Remember the doorbell & instruction count register addr
190 iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr +
191 CN23XX_SLI_IQ_DOORBELL(iq_no);
192 iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr +
193 CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
194 lio_dev_dbg(lio_dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
195 iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
197 /* Store the current instruction counter (used in flush_iq
200 pkt_in_done = rte_read64(iq->inst_cnt_reg);
202 /* Clear the count by writing back what we read, but don't
203 * enable data traffic here
205 rte_write64(pkt_in_done, iq->inst_cnt_reg);
209 cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no)
211 struct lio_droq *droq = lio_dev->droq[oq_no];
213 PMD_INIT_FUNC_TRACE();
215 lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
216 droq->desc_ring_dma);
217 lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
219 lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
220 (droq->buffer_size | (OCTEON_RH_SIZE << 16)));
222 /* Get the mapped address of the pkt_sent and pkts_credit regs */
223 droq->pkts_sent_reg = (uint8_t *)lio_dev->hw_addr +
224 CN23XX_SLI_OQ_PKTS_SENT(oq_no);
225 droq->pkts_credit_reg = (uint8_t *)lio_dev->hw_addr +
226 CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
230 cn23xx_vf_free_mbox(struct lio_device *lio_dev)
232 PMD_INIT_FUNC_TRACE();
234 rte_free(lio_dev->mbox[0]);
235 lio_dev->mbox[0] = NULL;
237 rte_free(lio_dev->mbox);
238 lio_dev->mbox = NULL;
242 cn23xx_vf_setup_mbox(struct lio_device *lio_dev)
244 struct lio_mbox *mbox;
246 PMD_INIT_FUNC_TRACE();
248 if (lio_dev->mbox == NULL) {
249 lio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0);
250 if (lio_dev->mbox == NULL)
254 mbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0);
256 rte_free(lio_dev->mbox);
257 lio_dev->mbox = NULL;
261 rte_spinlock_init(&mbox->lock);
263 mbox->lio_dev = lio_dev;
267 mbox->state = LIO_MBOX_STATE_IDLE;
269 /* VF mbox interrupt reg */
270 mbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr +
271 CN23XX_VF_SLI_PKT_MBOX_INT(0);
272 /* VF reads from SIG0 reg */
273 mbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr +
274 CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
275 /* VF writes into SIG1 reg */
276 mbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr +
277 CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
279 lio_dev->mbox[0] = mbox;
281 rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
287 cn23xx_vf_enable_io_queues(struct lio_device *lio_dev)
291 PMD_INIT_FUNC_TRACE();
293 for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) {
296 /* set the corresponding IQ IS_64B bit */
297 if (lio_dev->io_qmask.iq64B & (1ULL << q_no)) {
298 reg_val = lio_read_csr64(
300 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
301 reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
302 lio_write_csr64(lio_dev,
303 CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
307 /* set the corresponding IQ ENB bit */
308 if (lio_dev->io_qmask.iq & (1ULL << q_no)) {
309 reg_val = lio_read_csr64(
311 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
312 reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
313 lio_write_csr64(lio_dev,
314 CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
318 for (q_no = 0; q_no < lio_dev->num_oqs; q_no++) {
321 /* set the corresponding OQ ENB bit */
322 if (lio_dev->io_qmask.oq & (1ULL << q_no)) {
323 reg_val = lio_read_csr(
325 CN23XX_SLI_OQ_PKT_CONTROL(q_no));
326 reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
327 lio_write_csr(lio_dev,
328 CN23XX_SLI_OQ_PKT_CONTROL(q_no),
337 cn23xx_vf_disable_io_queues(struct lio_device *lio_dev)
341 PMD_INIT_FUNC_TRACE();
343 /* per HRM, rings can only be disabled via reset operation,
344 * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
346 num_queues = lio_dev->num_iqs;
347 if (num_queues < lio_dev->num_oqs)
348 num_queues = lio_dev->num_oqs;
350 cn23xx_vf_reset_io_queues(lio_dev, num_queues);
354 cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev)
356 struct lio_mbox_cmd mbox_cmd;
358 memset(&mbox_cmd, 0, sizeof(struct lio_mbox_cmd));
359 mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
360 mbox_cmd.msg.s.resp_needed = 0;
361 mbox_cmd.msg.s.cmd = LIO_VF_FLR_REQUEST;
362 mbox_cmd.msg.s.len = 1;
364 mbox_cmd.recv_len = 0;
365 mbox_cmd.recv_status = 0;
369 lio_mbox_write(lio_dev, &mbox_cmd);
373 cn23xx_pfvf_hs_callback(struct lio_device *lio_dev,
374 struct lio_mbox_cmd *cmd, void *arg)
378 PMD_INIT_FUNC_TRACE();
380 rte_memcpy((uint8_t *)&lio_dev->pfvf_hsword, cmd->msg.s.params, 6);
381 if (cmd->recv_len > 1) {
382 struct lio_version *lio_ver = (struct lio_version *)cmd->data;
384 major = lio_ver->major;
388 rte_atomic64_set((rte_atomic64_t *)arg, major | 1);
392 cn23xx_pfvf_handshake(struct lio_device *lio_dev)
394 struct lio_mbox_cmd mbox_cmd;
395 struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0];
396 uint32_t q_no, count = 0;
397 rte_atomic64_t status;
402 PMD_INIT_FUNC_TRACE();
404 /* Sending VF_ACTIVE indication to the PF driver */
405 lio_dev_dbg(lio_dev, "requesting info from PF\n");
407 mbox_cmd.msg.mbox_msg64 = 0;
408 mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
409 mbox_cmd.msg.s.resp_needed = 1;
410 mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE;
411 mbox_cmd.msg.s.len = 2;
412 mbox_cmd.data[0] = 0;
413 lio_ver->major = LIO_BASE_MAJOR_VERSION;
414 lio_ver->minor = LIO_BASE_MINOR_VERSION;
415 lio_ver->micro = LIO_BASE_MICRO_VERSION;
417 mbox_cmd.recv_len = 0;
418 mbox_cmd.recv_status = 0;
419 mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback;
420 mbox_cmd.fn_arg = (void *)&status;
422 if (lio_mbox_write(lio_dev, &mbox_cmd)) {
423 lio_dev_err(lio_dev, "Write to mailbox failed\n");
427 rte_atomic64_set(&status, 0);
431 } while ((rte_atomic64_read(&status) == 0) && (count++ < 10000));
433 ret = rte_atomic64_read(&status);
435 lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n");
439 for (q_no = 0; q_no < lio_dev->num_iqs; q_no++)
440 lio_dev->instr_queue[q_no]->txpciq.s.pkind =
441 lio_dev->pfvf_hsword.pkind;
443 vfmajor = LIO_BASE_MAJOR_VERSION;
445 if (pfmajor != vfmajor) {
447 "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n",
452 "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n",
457 lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n",
458 lio_dev->pfvf_hsword.pkind);
464 cn23xx_vf_handle_mbox(struct lio_device *lio_dev)
466 uint64_t mbox_int_val;
468 /* read and clear by writing 1 */
469 mbox_int_val = rte_read64(lio_dev->mbox[0]->mbox_int_reg);
470 rte_write64(mbox_int_val, lio_dev->mbox[0]->mbox_int_reg);
471 if (lio_mbox_read(lio_dev->mbox[0]))
472 lio_mbox_process_message(lio_dev->mbox[0]);
476 cn23xx_vf_setup_device(struct lio_device *lio_dev)
480 PMD_INIT_FUNC_TRACE();
482 /* INPUT_CONTROL[RPVF] gives the VF IOq count */
483 reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(0));
485 lio_dev->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
486 CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
487 lio_dev->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
488 CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
490 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
492 lio_dev->sriov_info.rings_per_vf =
493 reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
495 lio_dev->default_config = lio_get_conf(lio_dev);
496 if (lio_dev->default_config == NULL)
499 lio_dev->fn_list.setup_iq_regs = cn23xx_vf_setup_iq_regs;
500 lio_dev->fn_list.setup_oq_regs = cn23xx_vf_setup_oq_regs;
501 lio_dev->fn_list.setup_mbox = cn23xx_vf_setup_mbox;
502 lio_dev->fn_list.free_mbox = cn23xx_vf_free_mbox;
504 lio_dev->fn_list.setup_device_regs = cn23xx_vf_setup_device_regs;
506 lio_dev->fn_list.enable_io_queues = cn23xx_vf_enable_io_queues;
507 lio_dev->fn_list.disable_io_queues = cn23xx_vf_disable_io_queues;
513 cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev)
515 uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
518 /* Disable the i/p and o/p queues for this Octeon.
519 * IOQs will already be in reset.
520 * If RST bit is set, wait for Quiet bit to be set
521 * Once Quiet bit is set, clear the RST bit
523 PMD_INIT_FUNC_TRACE();
525 for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
526 volatile uint64_t reg_val;
528 reg_val = lio_read_csr64(lio_dev,
529 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
530 while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val &
531 CN23XX_PKT_INPUT_CTL_QUIET) && loop) {
532 reg_val = lio_read_csr64(
534 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
540 "clearing the reset reg failed or setting the quiet reg failed for qno %lu\n",
541 (unsigned long)q_no);
545 reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
546 lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
549 reg_val = lio_read_csr64(lio_dev,
550 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
551 if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
552 lio_dev_err(lio_dev, "unable to reset qno %lu\n",
553 (unsigned long)q_no);