net/thunderx: cleanup
[dpdk.git] / drivers / net / thunderx / base / nicvf_hw.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium networks Ltd. 2016.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium networks nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <unistd.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <assert.h>
42
43 #include "nicvf_plat.h"
44
45 struct nicvf_reg_info {
46         uint32_t offset;
47         const char *name;
48 };
49
50 #define NICVF_REG_POLL_ITER_NR   (10)
51 #define NICVF_REG_POLL_DELAY_US  (2000)
52 #define NICVF_REG_INFO(reg) {reg, #reg}
53
54 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55         NICVF_REG_INFO(NIC_VF_CFG),
56         NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57         NICVF_REG_INFO(NIC_VF_INT),
58         NICVF_REG_INFO(NIC_VF_INT_W1S),
59         NICVF_REG_INFO(NIC_VF_ENA_W1C),
60         NICVF_REG_INFO(NIC_VF_ENA_W1S),
61         NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62         NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
63 };
64
65 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66         {NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
67         {NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
68         {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69         {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70         {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71         {NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
72         {NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
73         {NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
74         {NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
75         {NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
76         {NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
77         {NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
78         {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79         {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80         {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81         {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82         {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83         {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84         {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85         {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86         {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87         {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88         {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89         {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
90 };
91
92 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93         NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94         NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95         NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96         NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97         NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98         NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99         NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100         NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101         NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102         NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
103 };
104
105 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106         NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107         NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108         NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
109 };
110
111 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112         NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113         NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114         NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115         NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116         NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117         NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119         NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
122 };
123
124 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
134 };
135
136 int
137 nicvf_base_init(struct nicvf *nic)
138 {
139         nic->hwcap = 0;
140         if (nic->subsystem_device_id == 0)
141                 return NICVF_ERR_BASE_INIT;
142
143         if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
144                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
145
146         if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
147                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
148
149         return NICVF_OK;
150 }
151
152 /* dump on stdout if data is NULL */
153 int
154 nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
155 {
156         uint32_t i, q;
157         bool dump_stdout;
158
159         dump_stdout = data ? 0 : 1;
160
161         for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
162                 if (dump_stdout)
163                         nicvf_log("%24s  = 0x%" PRIx64 "\n",
164                                 nicvf_reg_tbl[i].name,
165                                 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
166                 else
167                         *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
168
169         for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
170                 if (dump_stdout)
171                         nicvf_log("%24s  = 0x%" PRIx64 "\n",
172                                 nicvf_multi_reg_tbl[i].name,
173                                 nicvf_reg_read(nic,
174                                         nicvf_multi_reg_tbl[i].offset));
175                 else
176                         *data++ = nicvf_reg_read(nic,
177                                         nicvf_multi_reg_tbl[i].offset);
178
179         for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
180                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
181                         if (dump_stdout)
182                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
183                                         nicvf_qset_cq_reg_tbl[i].name, q,
184                                         nicvf_queue_reg_read(nic,
185                                         nicvf_qset_cq_reg_tbl[i].offset, q));
186                         else
187                                 *data++ = nicvf_queue_reg_read(nic,
188                                         nicvf_qset_cq_reg_tbl[i].offset, q);
189
190         for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
191                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
192                         if (dump_stdout)
193                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
194                                         nicvf_qset_rq_reg_tbl[i].name, q,
195                                         nicvf_queue_reg_read(nic,
196                                         nicvf_qset_rq_reg_tbl[i].offset, q));
197                         else
198                                 *data++ = nicvf_queue_reg_read(nic,
199                                         nicvf_qset_rq_reg_tbl[i].offset, q);
200
201         for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
202                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
203                         if (dump_stdout)
204                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
205                                         nicvf_qset_sq_reg_tbl[i].name, q,
206                                         nicvf_queue_reg_read(nic,
207                                         nicvf_qset_sq_reg_tbl[i].offset, q));
208                         else
209                                 *data++ = nicvf_queue_reg_read(nic,
210                                         nicvf_qset_sq_reg_tbl[i].offset, q);
211
212         for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
213                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
214                         if (dump_stdout)
215                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
216                                         nicvf_qset_rbdr_reg_tbl[i].name, q,
217                                         nicvf_queue_reg_read(nic,
218                                         nicvf_qset_rbdr_reg_tbl[i].offset, q));
219                         else
220                                 *data++ = nicvf_queue_reg_read(nic,
221                                         nicvf_qset_rbdr_reg_tbl[i].offset, q);
222         return 0;
223 }
224
225 int
226 nicvf_reg_get_count(void)
227 {
228         int nr_regs;
229
230         nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
231         nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
232         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
233                         MAX_CMP_QUEUES_PER_QS;
234         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
235                         MAX_RCV_QUEUES_PER_QS;
236         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
237                         MAX_SND_QUEUES_PER_QS;
238         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
239                         MAX_RCV_BUF_DESC_RINGS_PER_QS;
240
241         return nr_regs;
242 }
243
244 static int
245 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
246 {
247         int ret;
248         struct pf_qs_cfg pf_qs_cfg = {.value = 0};
249
250         pf_qs_cfg.ena = enable ? 1 : 0;
251         pf_qs_cfg.vnic = nic->vf_id;
252         ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
253         return ret ? NICVF_ERR_SET_QS : 0;
254 }
255
256 /* Requests PF to assign and enable Qset */
257 int
258 nicvf_qset_config(struct nicvf *nic)
259 {
260         /* Enable Qset */
261         return nicvf_qset_config_internal(nic, true);
262 }
263
264 int
265 nicvf_qset_reclaim(struct nicvf *nic)
266 {
267         /* Disable Qset */
268         return nicvf_qset_config_internal(nic, false);
269 }
270
271 static int
272 cmpfunc(const void *a, const void *b)
273 {
274         return (*(const uint32_t *)a - *(const uint32_t *)b);
275 }
276
277 static uint32_t
278 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
279 {
280         uint32_t i;
281
282         qsort(list, entries, sizeof(uint32_t), cmpfunc);
283         for (i = 0; i < entries; i++)
284                 if (val <= list[i])
285                         break;
286         /* Not in the list */
287         if (i >= entries)
288                 return 0;
289         else
290                 return list[i];
291 }
292
293 static void
294 nicvf_handle_qset_err_intr(struct nicvf *nic)
295 {
296         uint16_t qidx;
297         uint64_t status;
298
299         nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
300         nicvf_reg_dump(nic, NULL);
301
302         for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
303                 status = nicvf_queue_reg_read(
304                                 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
305                 if (!(status & NICVF_CQ_ERR_MASK))
306                         continue;
307
308                 if (status & NICVF_CQ_WR_FULL)
309                         nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
310                 if (status & NICVF_CQ_WR_DISABLE)
311                         nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
312                 if (status & NICVF_CQ_WR_FAULT)
313                         nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
314                 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
315         }
316
317         for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
318                 status = nicvf_queue_reg_read(
319                                 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
320                 if (!(status & NICVF_SQ_ERR_MASK))
321                         continue;
322
323                 if (status & NICVF_SQ_ERR_STOPPED)
324                         nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
325                 if (status & NICVF_SQ_ERR_SEND)
326                         nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
327                 if (status & NICVF_SQ_ERR_DPE)
328                         nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
329                 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
330         }
331
332         for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
333                 status = nicvf_queue_reg_read(nic,
334                                 NIC_QSET_RBDR_0_1_STATUS0, qidx);
335                 status &= NICVF_RBDR_FIFO_STATE_MASK;
336                 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
337
338                 if (status == RBDR_FIFO_STATE_FAIL)
339                         nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
340                 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
341         }
342
343         nicvf_disable_all_interrupts(nic);
344         abort();
345 }
346
347 /*
348  * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
349  * This function is not re-entrant.
350  * The caller should provide proper serialization.
351  */
352 int
353 nicvf_reg_poll_interrupts(struct nicvf *nic)
354 {
355         int msg = 0;
356         uint64_t intr;
357
358         intr = nicvf_reg_read(nic, NIC_VF_INT);
359         if (intr & NICVF_INTR_MBOX_MASK) {
360                 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
361                 msg = nicvf_handle_mbx_intr(nic);
362         }
363         if (intr & NICVF_INTR_QS_ERR_MASK) {
364                 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
365                 nicvf_handle_qset_err_intr(nic);
366         }
367         return msg;
368 }
369
370 static int
371 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
372                     uint32_t bit_pos, uint32_t bits, uint64_t val)
373 {
374         uint64_t bit_mask;
375         uint64_t reg_val;
376         int timeout = NICVF_REG_POLL_ITER_NR;
377
378         bit_mask = (1ULL << bits) - 1;
379         bit_mask = (bit_mask << bit_pos);
380
381         while (timeout) {
382                 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
383                 if (((reg_val & bit_mask) >> bit_pos) == val)
384                         return NICVF_OK;
385                 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
386                 timeout--;
387         }
388         return NICVF_ERR_REG_POLL;
389 }
390
391 int
392 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
393 {
394         uint64_t status;
395         int timeout = NICVF_REG_POLL_ITER_NR;
396         struct nicvf_rbdr *rbdr = nic->rbdr;
397
398         /* Save head and tail pointers for freeing up buffers */
399         if (rbdr) {
400                 rbdr->head = nicvf_queue_reg_read(nic,
401                                 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
402                 rbdr->tail = nicvf_queue_reg_read(nic,
403                                 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
404                 rbdr->next_tail = rbdr->tail;
405         }
406
407         /* Reset RBDR */
408         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
409                                 NICVF_RBDR_RESET);
410
411         /* Disable RBDR */
412         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
413         if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
414                                 62, 2, 0x00))
415                 return NICVF_ERR_RBDR_DISABLE;
416
417         while (1) {
418                 status = nicvf_queue_reg_read(nic,
419                                 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
420                 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
421                         break;
422                 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
423                 timeout--;
424                 if (!timeout)
425                         return NICVF_ERR_RBDR_PREFETCH;
426         }
427
428         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
429                         NICVF_RBDR_RESET);
430         if (nicvf_qset_poll_reg(nic, qidx,
431                         NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
432                 return NICVF_ERR_RBDR_RESET1;
433
434         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
435         if (nicvf_qset_poll_reg(nic, qidx,
436                         NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
437                 return NICVF_ERR_RBDR_RESET2;
438
439         return NICVF_OK;
440 }
441
442 static int
443 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
444 {
445         int val;
446
447         val = ((uint32_t)log2(len) - len_shift);
448         assert(val >= NICVF_QSIZE_MIN_VAL);
449         assert(val <= NICVF_QSIZE_MAX_VAL);
450         return val;
451 }
452
453 int
454 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
455 {
456         int ret;
457         uint64_t head, tail;
458         struct nicvf_rbdr *rbdr = nic->rbdr;
459         struct rbdr_cfg rbdr_cfg = {.value = 0};
460
461         ret = nicvf_qset_rbdr_reclaim(nic, qidx);
462         if (ret)
463                 return ret;
464
465         /* Set descriptor base address */
466         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
467
468         /* Enable RBDR  & set queue size */
469         rbdr_cfg.ena = 1;
470         rbdr_cfg.reset = 0;
471         rbdr_cfg.ldwb = 0;
472         rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
473                                                 RBDR_SIZE_SHIFT);
474         rbdr_cfg.avg_con = 0;
475         rbdr_cfg.lines = rbdr->buffsz / 128;
476
477         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
478
479         /* Verify proper RBDR reset */
480         head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
481         tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
482
483         if (head | tail)
484                 return NICVF_ERR_RBDR_RESET;
485
486         return NICVF_OK;
487 }
488
489 uint32_t
490 nicvf_qsize_rbdr_roundup(uint32_t val)
491 {
492         uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
493                         RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
494                         RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
495                         RBDR_QUEUE_SZ_512K};
496         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
497 }
498
499 int
500 nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
501                           uint16_t ridx, rbdr_pool_get_handler handler,
502                           uint32_t max_buffs)
503 {
504         struct rbdr_entry_t *desc, *desc0;
505         struct nicvf_rbdr *rbdr = nic->rbdr;
506         uint32_t count;
507         nicvf_phys_addr_t phy;
508
509         assert(rbdr != NULL);
510         desc = rbdr->desc;
511         count = 0;
512         /* Don't fill beyond max numbers of desc */
513         while (count < rbdr->qlen_mask) {
514                 if (count >= max_buffs)
515                         break;
516                 desc0 = desc + count;
517                 phy = handler(dev, nic);
518                 if (phy) {
519                         desc0->full_addr = phy;
520                         count++;
521                 } else {
522                         break;
523                 }
524         }
525         nicvf_smp_wmb();
526         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
527         rbdr->tail = nicvf_queue_reg_read(nic,
528                                 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
529         rbdr->next_tail = rbdr->tail;
530         nicvf_smp_rmb();
531         return 0;
532 }
533
534 int
535 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
536 {
537         return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
538 }
539
540 int
541 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
542 {
543         uint64_t head, tail;
544         struct sq_cfg sq_cfg;
545
546         sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
547
548         /* Disable send queue */
549         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
550
551         /* Check if SQ is stopped */
552         if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
553                                 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
554                 return NICVF_ERR_SQ_DISABLE;
555
556         /* Reset send queue */
557         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
558         head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
559         tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
560         if (head | tail)
561                 return  NICVF_ERR_SQ_RESET;
562
563         return 0;
564 }
565
566 int
567 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
568 {
569         int ret;
570         struct sq_cfg sq_cfg = {.value = 0};
571
572         ret = nicvf_qset_sq_reclaim(nic, qidx);
573         if (ret)
574                 return ret;
575
576         /* Send a mailbox msg to PF to config SQ */
577         if (nicvf_mbox_sq_config(nic, qidx))
578                 return  NICVF_ERR_SQ_PF_CFG;
579
580         /* Set queue base address */
581         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
582
583         /* Enable send queue  & set queue size */
584         sq_cfg.ena = 1;
585         sq_cfg.reset = 0;
586         sq_cfg.ldwb = 0;
587         sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
588         sq_cfg.tstmp_bgx_intf = 0;
589         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
590
591         /* Ring doorbell so that H/W restarts processing SQEs */
592         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
593
594         return 0;
595 }
596
597 uint32_t
598 nicvf_qsize_sq_roundup(uint32_t val)
599 {
600         uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
601                         SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
602                         SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
603                         SND_QUEUE_SZ_64K};
604         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
605 }
606
607 int
608 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
609 {
610         /* Disable receive queue */
611         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
612         return nicvf_mbox_rq_sync(nic);
613 }
614
615 int
616 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
617 {
618         struct pf_rq_cfg pf_rq_cfg = {.value = 0};
619         struct rq_cfg rq_cfg = {.value = 0};
620
621         if (nicvf_qset_rq_reclaim(nic, qidx))
622                 return NICVF_ERR_RQ_CLAIM;
623
624         pf_rq_cfg.strip_pre_l2 = 0;
625         /* First cache line of RBDR data will be allocated into L2C */
626         pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
627         pf_rq_cfg.cq_qs = nic->vf_id;
628         pf_rq_cfg.cq_idx = qidx;
629         pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
630         pf_rq_cfg.rbdr_cont_idx = 0;
631         pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
632         pf_rq_cfg.rbdr_strt_idx = 0;
633
634         /* Send a mailbox msg to PF to config RQ */
635         if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
636                 return NICVF_ERR_RQ_PF_CFG;
637
638         /* Select Rx backpressure */
639         if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
640                 return NICVF_ERR_RQ_BP_CFG;
641
642         /* Send a mailbox msg to PF to config RQ drop */
643         if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
644                 return NICVF_ERR_RQ_DROP_CFG;
645
646         /* Enable Receive queue */
647         rq_cfg.ena = 1;
648         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
649
650         return 0;
651 }
652
653 int
654 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
655 {
656         uint64_t tail, head;
657
658         /* Disable completion queue */
659         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
660         if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
661                 return NICVF_ERR_CQ_DISABLE;
662
663         /* Reset completion queue */
664         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
665         tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
666         head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
667         if (head | tail)
668                 return  NICVF_ERR_CQ_RESET;
669
670         /* Disable timer threshold (doesn't get reset upon CQ reset) */
671         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
672         return 0;
673 }
674
675 int
676 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
677 {
678         int ret;
679         struct cq_cfg cq_cfg = {.value = 0};
680
681         ret = nicvf_qset_cq_reclaim(nic, qidx);
682         if (ret)
683                 return ret;
684
685         /* Set completion queue base address */
686         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
687
688         cq_cfg.ena = 1;
689         cq_cfg.reset = 0;
690         /* Writes of CQE will be allocated into L2C */
691         cq_cfg.caching = 1;
692         cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
693         cq_cfg.avg_con = 0;
694         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
695
696         /* Set threshold value for interrupt generation */
697         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
698         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
699         return 0;
700 }
701
702 uint32_t
703 nicvf_qsize_cq_roundup(uint32_t val)
704 {
705         uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
706                         CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
707                         CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
708                         CMP_QUEUE_SZ_64K};
709         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
710 }
711
712
713 void
714 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
715 {
716         uint64_t val;
717
718         val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
719         if (enable)
720                 val |= (STRIP_FIRST_VLAN << 25);
721         else
722                 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
723
724         nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
725 }
726
727 void
728 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
729 {
730         int idx;
731         uint64_t addr, val;
732         uint64_t *keyptr = (uint64_t *)key;
733
734         addr = NIC_VNIC_RSS_KEY_0_4;
735         for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
736                 val = nicvf_cpu_to_be_64(*keyptr);
737                 nicvf_reg_write(nic, addr, val);
738                 addr += sizeof(uint64_t);
739                 keyptr++;
740         }
741 }
742
743 void
744 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
745 {
746         int idx;
747         uint64_t addr, val;
748         uint64_t *keyptr = (uint64_t *)key;
749
750         addr = NIC_VNIC_RSS_KEY_0_4;
751         for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
752                 val = nicvf_reg_read(nic, addr);
753                 *keyptr = nicvf_be_to_cpu_64(val);
754                 addr += sizeof(uint64_t);
755                 keyptr++;
756         }
757 }
758
759 void
760 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
761 {
762         nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
763 }
764
765 uint64_t
766 nicvf_rss_get_cfg(struct nicvf *nic)
767 {
768         return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
769 }
770
771 int
772 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
773 {
774         uint32_t idx;
775         struct nicvf_rss_reta_info *rss = &nic->rss_info;
776
777         /* result will be stored in nic->rss_info.rss_size */
778         if (nicvf_mbox_get_rss_size(nic))
779                 return NICVF_ERR_RSS_GET_SZ;
780
781         assert(rss->rss_size > 0);
782         rss->hash_bits = (uint8_t)log2(rss->rss_size);
783         for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
784                 rss->ind_tbl[idx] = tbl[idx];
785
786         if (nicvf_mbox_config_rss(nic))
787                 return NICVF_ERR_RSS_TBL_UPDATE;
788
789         return NICVF_OK;
790 }
791
792 int
793 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
794 {
795         uint32_t idx;
796         struct nicvf_rss_reta_info *rss = &nic->rss_info;
797
798         /* result will be stored in nic->rss_info.rss_size */
799         if (nicvf_mbox_get_rss_size(nic))
800                 return NICVF_ERR_RSS_GET_SZ;
801
802         assert(rss->rss_size > 0);
803         rss->hash_bits = (uint8_t)log2(rss->rss_size);
804         for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
805                 tbl[idx] = rss->ind_tbl[idx];
806
807         return NICVF_OK;
808 }
809
810 int
811 nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg)
812 {
813         uint32_t idx;
814         uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
815         uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
816                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
817                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
818                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
819                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
820                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
821         };
822
823         if (nic->cpi_alg != CPI_ALG_NONE)
824                 return -EINVAL;
825
826         if (cfg == 0)
827                 return -EINVAL;
828
829         /* Update default RSS key and cfg */
830         nicvf_rss_set_key(nic, default_key);
831         nicvf_rss_set_cfg(nic, cfg);
832
833         /* Update default RSS RETA */
834         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
835                 default_reta[idx] = idx % qcnt;
836
837         return nicvf_rss_reta_update(nic, default_reta,
838                         NIC_MAX_RSS_IDR_TBL_SIZE);
839 }
840
841 int
842 nicvf_rss_term(struct nicvf *nic)
843 {
844         uint32_t idx;
845         uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
846
847         nicvf_rss_set_cfg(nic, 0);
848         /* Redirect the output to 0th queue  */
849         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
850                 disable_rss[idx] = 0;
851
852         return nicvf_rss_reta_update(nic, disable_rss,
853                         NIC_MAX_RSS_IDR_TBL_SIZE);
854 }
855
856 int
857 nicvf_loopback_config(struct nicvf *nic, bool enable)
858 {
859         if (enable && nic->loopback_supported == 0)
860                 return NICVF_ERR_LOOPBACK_CFG;
861
862         return nicvf_mbox_loopback_config(nic, enable);
863 }
864
865 void
866 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
867 {
868         stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
869         stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
870         stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
871         stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
872         stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
873         stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
874         stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
875         stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
876         stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
877         stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
878         stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
879         stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
880         stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
881         stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
882
883         stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
884         stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
885         stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
886         stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
887         stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
888 }
889
890 void
891 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
892                        uint16_t qidx)
893 {
894         qstats->q_rx_bytes =
895                 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
896         qstats->q_rx_packets =
897                 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
898 }
899
900 void
901 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
902                        uint16_t qidx)
903 {
904         qstats->q_tx_bytes =
905                 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
906         qstats->q_tx_packets =
907                 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
908 }