1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_common.h>
6 #include <rte_cycles.h>
7 #include <rte_memory.h>
8 #include <rte_byteorder.h>
10 #include "nitrox_hal.h"
11 #include "nitrox_csr.h"
13 #define MAX_VF_QUEUES 8
14 #define MAX_PF_QUEUES 64
15 #define NITROX_TIMER_THOLD 0x3FFFFF
16 #define NITROX_COUNT_THOLD 0xFFFFFFFF
19 nps_pkt_input_ring_disable(uint8_t *bar_addr, uint16_t ring)
21 union nps_pkt_in_instr_ctl pkt_in_instr_ctl;
25 reg_addr = NPS_PKT_IN_INSTR_CTLX(ring);
26 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
27 pkt_in_instr_ctl.s.enb = 0;
28 nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_ctl.u64);
29 rte_delay_us_block(100);
31 /* wait for enable bit to be cleared */
32 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
33 while (pkt_in_instr_ctl.s.enb && max_retries--) {
35 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
40 nps_pkt_solicited_port_disable(uint8_t *bar_addr, uint16_t port)
42 union nps_pkt_slc_ctl pkt_slc_ctl;
46 /* clear enable bit */
47 reg_addr = NPS_PKT_SLC_CTLX(port);
48 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
49 pkt_slc_ctl.s.enb = 0;
50 nitrox_write_csr(bar_addr, reg_addr, pkt_slc_ctl.u64);
51 rte_delay_us_block(100);
53 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
54 while (pkt_slc_ctl.s.enb && max_retries--) {
56 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
61 setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize,
64 union nps_pkt_in_instr_ctl pkt_in_instr_ctl;
65 union nps_pkt_in_instr_rsize pkt_in_instr_rsize;
66 union nps_pkt_in_instr_baoff_dbell pkt_in_instr_baoff_dbell;
67 union nps_pkt_in_done_cnts pkt_in_done_cnts;
68 uint64_t base_addr, reg_addr;
71 nps_pkt_input_ring_disable(bar_addr, ring);
73 /* write base address */
74 reg_addr = NPS_PKT_IN_INSTR_BADDRX(ring);
76 nitrox_write_csr(bar_addr, reg_addr, base_addr);
77 rte_delay_us_block(CSR_DELAY);
80 reg_addr = NPS_PKT_IN_INSTR_RSIZEX(ring);
81 pkt_in_instr_rsize.u64 = 0;
82 pkt_in_instr_rsize.s.rsize = rsize;
83 nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_rsize.u64);
84 rte_delay_us_block(CSR_DELAY);
87 reg_addr = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
88 pkt_in_instr_baoff_dbell.u64 = 0;
89 pkt_in_instr_baoff_dbell.s.dbell = 0xFFFFFFFF;
90 nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_baoff_dbell.u64);
91 rte_delay_us_block(CSR_DELAY);
93 /* clear done count */
94 reg_addr = NPS_PKT_IN_DONE_CNTSX(ring);
95 pkt_in_done_cnts.u64 = nitrox_read_csr(bar_addr, reg_addr);
96 nitrox_write_csr(bar_addr, reg_addr, pkt_in_done_cnts.u64);
97 rte_delay_us_block(CSR_DELAY);
99 /* Setup PKT IN RING Interrupt Threshold */
100 reg_addr = NPS_PKT_IN_INT_LEVELSX(ring);
101 nitrox_write_csr(bar_addr, reg_addr, 0xFFFFFFFF);
102 rte_delay_us_block(CSR_DELAY);
105 reg_addr = NPS_PKT_IN_INSTR_CTLX(ring);
106 pkt_in_instr_ctl.u64 = 0;
107 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
108 pkt_in_instr_ctl.s.is64b = 1;
109 pkt_in_instr_ctl.s.enb = 1;
110 nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_ctl.u64);
111 rte_delay_us_block(100);
113 pkt_in_instr_ctl.u64 = 0;
114 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
115 /* wait for ring to be enabled */
116 while (!pkt_in_instr_ctl.s.enb && max_retries--) {
118 pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
123 setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, uint16_t port)
125 union nps_pkt_slc_ctl pkt_slc_ctl;
126 union nps_pkt_slc_cnts pkt_slc_cnts;
127 union nps_pkt_slc_int_levels pkt_slc_int_levels;
131 nps_pkt_solicited_port_disable(bar_addr, port);
133 /* clear pkt counts */
134 reg_addr = NPS_PKT_SLC_CNTSX(port);
135 pkt_slc_cnts.u64 = nitrox_read_csr(bar_addr, reg_addr);
136 nitrox_write_csr(bar_addr, reg_addr, pkt_slc_cnts.u64);
137 rte_delay_us_block(CSR_DELAY);
139 /* slc interrupt levels */
140 reg_addr = NPS_PKT_SLC_INT_LEVELSX(port);
141 pkt_slc_int_levels.u64 = 0;
142 pkt_slc_int_levels.s.bmode = 0;
143 pkt_slc_int_levels.s.timet = NITROX_TIMER_THOLD;
145 if (NITROX_COUNT_THOLD > 0)
146 pkt_slc_int_levels.s.cnt = NITROX_COUNT_THOLD - 1;
148 nitrox_write_csr(bar_addr, reg_addr, pkt_slc_int_levels.u64);
149 rte_delay_us_block(CSR_DELAY);
152 reg_addr = NPS_PKT_SLC_CTLX(port);
153 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
154 pkt_slc_ctl.s.rh = 1;
156 pkt_slc_ctl.s.enb = 1;
157 nitrox_write_csr(bar_addr, reg_addr, pkt_slc_ctl.u64);
158 rte_delay_us_block(100);
160 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
161 while (!pkt_slc_ctl.s.enb && max_retries--) {
163 pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
168 vf_get_vf_config_mode(uint8_t *bar_addr)
170 union aqmq_qsz aqmq_qsz;
175 aqmq_qsz.s.host_queue_size = 0xDEADBEEF;
176 reg_addr = AQMQ_QSZX(0);
177 nitrox_write_csr(bar_addr, reg_addr, aqmq_qsz.u64);
178 rte_delay_us_block(CSR_DELAY);
181 for (q = 1; q < MAX_VF_QUEUES; q++) {
182 reg_addr = AQMQ_QSZX(q);
183 aqmq_qsz.u64 = nitrox_read_csr(bar_addr, reg_addr);
184 if (aqmq_qsz.s.host_queue_size == 0xDEADBEEF)
190 vf_mode = NITROX_MODE_VF128;
193 vf_mode = NITROX_MODE_VF64;
196 vf_mode = NITROX_MODE_VF32;
199 vf_mode = NITROX_MODE_VF16;
210 vf_config_mode_to_nr_queues(enum nitrox_vf_mode vf_mode)
216 nr_queues = MAX_PF_QUEUES;
218 case NITROX_MODE_VF16:
221 case NITROX_MODE_VF32:
224 case NITROX_MODE_VF64:
227 case NITROX_MODE_VF128: