d10d2c0e0e5553375fee443ccbbe5cdc85b8ab2a
[dpdk.git] / drivers / net / thunderx / base / nicvf_hw.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium, Inc. 2016.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium, Inc nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <unistd.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <assert.h>
42
43 #include "nicvf_plat.h"
44
45 struct nicvf_reg_info {
46         uint32_t offset;
47         const char *name;
48 };
49
50 #define NICVF_REG_POLL_ITER_NR   (10)
51 #define NICVF_REG_POLL_DELAY_US  (2000)
52 #define NICVF_REG_INFO(reg) {reg, #reg}
53
54 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55         NICVF_REG_INFO(NIC_VF_CFG),
56         NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57         NICVF_REG_INFO(NIC_VF_INT),
58         NICVF_REG_INFO(NIC_VF_INT_W1S),
59         NICVF_REG_INFO(NIC_VF_ENA_W1C),
60         NICVF_REG_INFO(NIC_VF_ENA_W1S),
61         NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62         NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
63 };
64
65 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66         {NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
67         {NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
68         {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69         {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70         {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71         {NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
72         {NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
73         {NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
74         {NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
75         {NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
76         {NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
77         {NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
78         {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79         {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80         {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81         {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82         {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83         {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84         {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85         {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86         {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87         {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88         {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89         {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
90 };
91
92 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93         NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94         NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95         NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96         NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97         NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98         NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99         NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100         NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101         NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102         NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
103 };
104
105 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106         NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107         NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108         NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
109 };
110
111 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112         NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113         NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114         NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115         NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116         NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117         NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119         NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
122 };
123
124 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
134 };
135
136 int
137 nicvf_base_init(struct nicvf *nic)
138 {
139         nic->hwcap = 0;
140         if (nic->subsystem_device_id == 0)
141                 return NICVF_ERR_BASE_INIT;
142
143         if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
144                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
145
146         if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
147                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
148
149         if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
150                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
151                                 NICVF_CAP_DISABLE_APAD;
152
153         return NICVF_OK;
154 }
155
156 /* dump on stdout if data is NULL */
157 int
158 nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
159 {
160         uint32_t i, q;
161         bool dump_stdout;
162
163         dump_stdout = data ? 0 : 1;
164
165         for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
166                 if (dump_stdout)
167                         nicvf_log("%24s  = 0x%" PRIx64 "\n",
168                                 nicvf_reg_tbl[i].name,
169                                 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
170                 else
171                         *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
172
173         for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
174                 if (dump_stdout)
175                         nicvf_log("%24s  = 0x%" PRIx64 "\n",
176                                 nicvf_multi_reg_tbl[i].name,
177                                 nicvf_reg_read(nic,
178                                         nicvf_multi_reg_tbl[i].offset));
179                 else
180                         *data++ = nicvf_reg_read(nic,
181                                         nicvf_multi_reg_tbl[i].offset);
182
183         for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
184                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
185                         if (dump_stdout)
186                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
187                                         nicvf_qset_cq_reg_tbl[i].name, q,
188                                         nicvf_queue_reg_read(nic,
189                                         nicvf_qset_cq_reg_tbl[i].offset, q));
190                         else
191                                 *data++ = nicvf_queue_reg_read(nic,
192                                         nicvf_qset_cq_reg_tbl[i].offset, q);
193
194         for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
195                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
196                         if (dump_stdout)
197                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
198                                         nicvf_qset_rq_reg_tbl[i].name, q,
199                                         nicvf_queue_reg_read(nic,
200                                         nicvf_qset_rq_reg_tbl[i].offset, q));
201                         else
202                                 *data++ = nicvf_queue_reg_read(nic,
203                                         nicvf_qset_rq_reg_tbl[i].offset, q);
204
205         for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
206                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
207                         if (dump_stdout)
208                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
209                                         nicvf_qset_sq_reg_tbl[i].name, q,
210                                         nicvf_queue_reg_read(nic,
211                                         nicvf_qset_sq_reg_tbl[i].offset, q));
212                         else
213                                 *data++ = nicvf_queue_reg_read(nic,
214                                         nicvf_qset_sq_reg_tbl[i].offset, q);
215
216         for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
217                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
218                         if (dump_stdout)
219                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
220                                         nicvf_qset_rbdr_reg_tbl[i].name, q,
221                                         nicvf_queue_reg_read(nic,
222                                         nicvf_qset_rbdr_reg_tbl[i].offset, q));
223                         else
224                                 *data++ = nicvf_queue_reg_read(nic,
225                                         nicvf_qset_rbdr_reg_tbl[i].offset, q);
226         return 0;
227 }
228
229 int
230 nicvf_reg_get_count(void)
231 {
232         int nr_regs;
233
234         nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
235         nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
236         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
237                         MAX_CMP_QUEUES_PER_QS;
238         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
239                         MAX_RCV_QUEUES_PER_QS;
240         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
241                         MAX_SND_QUEUES_PER_QS;
242         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
243                         MAX_RCV_BUF_DESC_RINGS_PER_QS;
244
245         return nr_regs;
246 }
247
248 static int
249 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
250 {
251         int ret;
252         struct pf_qs_cfg pf_qs_cfg = {.value = 0};
253
254         pf_qs_cfg.ena = enable ? 1 : 0;
255         pf_qs_cfg.vnic = nic->vf_id;
256         ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
257         return ret ? NICVF_ERR_SET_QS : 0;
258 }
259
260 /* Requests PF to assign and enable Qset */
261 int
262 nicvf_qset_config(struct nicvf *nic)
263 {
264         /* Enable Qset */
265         return nicvf_qset_config_internal(nic, true);
266 }
267
268 int
269 nicvf_qset_reclaim(struct nicvf *nic)
270 {
271         /* Disable Qset */
272         return nicvf_qset_config_internal(nic, false);
273 }
274
275 static int
276 cmpfunc(const void *a, const void *b)
277 {
278         return (*(const uint32_t *)a - *(const uint32_t *)b);
279 }
280
281 static uint32_t
282 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
283 {
284         uint32_t i;
285
286         qsort(list, entries, sizeof(uint32_t), cmpfunc);
287         for (i = 0; i < entries; i++)
288                 if (val <= list[i])
289                         break;
290         /* Not in the list */
291         if (i >= entries)
292                 return 0;
293         else
294                 return list[i];
295 }
296
297 static void
298 nicvf_handle_qset_err_intr(struct nicvf *nic)
299 {
300         uint16_t qidx;
301         uint64_t status;
302
303         nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
304         nicvf_reg_dump(nic, NULL);
305
306         for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
307                 status = nicvf_queue_reg_read(
308                                 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
309                 if (!(status & NICVF_CQ_ERR_MASK))
310                         continue;
311
312                 if (status & NICVF_CQ_WR_FULL)
313                         nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
314                 if (status & NICVF_CQ_WR_DISABLE)
315                         nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
316                 if (status & NICVF_CQ_WR_FAULT)
317                         nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
318                 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
319         }
320
321         for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
322                 status = nicvf_queue_reg_read(
323                                 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
324                 if (!(status & NICVF_SQ_ERR_MASK))
325                         continue;
326
327                 if (status & NICVF_SQ_ERR_STOPPED)
328                         nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
329                 if (status & NICVF_SQ_ERR_SEND)
330                         nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
331                 if (status & NICVF_SQ_ERR_DPE)
332                         nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
333                 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
334         }
335
336         for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
337                 status = nicvf_queue_reg_read(nic,
338                                 NIC_QSET_RBDR_0_1_STATUS0, qidx);
339                 status &= NICVF_RBDR_FIFO_STATE_MASK;
340                 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
341
342                 if (status == RBDR_FIFO_STATE_FAIL)
343                         nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
344                 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
345         }
346
347         nicvf_disable_all_interrupts(nic);
348         abort();
349 }
350
351 /*
352  * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
353  * This function is not re-entrant.
354  * The caller should provide proper serialization.
355  */
356 int
357 nicvf_reg_poll_interrupts(struct nicvf *nic)
358 {
359         int msg = 0;
360         uint64_t intr;
361
362         intr = nicvf_reg_read(nic, NIC_VF_INT);
363         if (intr & NICVF_INTR_MBOX_MASK) {
364                 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
365                 msg = nicvf_handle_mbx_intr(nic);
366         }
367         if (intr & NICVF_INTR_QS_ERR_MASK) {
368                 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
369                 nicvf_handle_qset_err_intr(nic);
370         }
371         return msg;
372 }
373
374 static int
375 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
376                     uint32_t bit_pos, uint32_t bits, uint64_t val)
377 {
378         uint64_t bit_mask;
379         uint64_t reg_val;
380         int timeout = NICVF_REG_POLL_ITER_NR;
381
382         bit_mask = (1ULL << bits) - 1;
383         bit_mask = (bit_mask << bit_pos);
384
385         while (timeout) {
386                 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
387                 if (((reg_val & bit_mask) >> bit_pos) == val)
388                         return NICVF_OK;
389                 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
390                 timeout--;
391         }
392         return NICVF_ERR_REG_POLL;
393 }
394
395 int
396 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
397 {
398         uint64_t status;
399         int timeout = NICVF_REG_POLL_ITER_NR;
400         struct nicvf_rbdr *rbdr = nic->rbdr;
401
402         /* Save head and tail pointers for freeing up buffers */
403         if (rbdr) {
404                 rbdr->head = nicvf_queue_reg_read(nic,
405                                 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
406                 rbdr->tail = nicvf_queue_reg_read(nic,
407                                 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
408                 rbdr->next_tail = rbdr->tail;
409         }
410
411         /* Reset RBDR */
412         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
413                                 NICVF_RBDR_RESET);
414
415         /* Disable RBDR */
416         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
417         if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
418                                 62, 2, 0x00))
419                 return NICVF_ERR_RBDR_DISABLE;
420
421         while (1) {
422                 status = nicvf_queue_reg_read(nic,
423                                 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
424                 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
425                         break;
426                 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
427                 timeout--;
428                 if (!timeout)
429                         return NICVF_ERR_RBDR_PREFETCH;
430         }
431
432         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
433                         NICVF_RBDR_RESET);
434         if (nicvf_qset_poll_reg(nic, qidx,
435                         NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
436                 return NICVF_ERR_RBDR_RESET1;
437
438         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
439         if (nicvf_qset_poll_reg(nic, qidx,
440                         NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
441                 return NICVF_ERR_RBDR_RESET2;
442
443         return NICVF_OK;
444 }
445
446 static int
447 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
448 {
449         int val;
450
451         val = nicvf_log2_u32(len) - len_shift;
452
453         assert(val >= NICVF_QSIZE_MIN_VAL);
454         assert(val <= NICVF_QSIZE_MAX_VAL);
455         return val;
456 }
457
458 int
459 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
460 {
461         int ret;
462         uint64_t head, tail;
463         struct nicvf_rbdr *rbdr = nic->rbdr;
464         struct rbdr_cfg rbdr_cfg = {.value = 0};
465
466         ret = nicvf_qset_rbdr_reclaim(nic, qidx);
467         if (ret)
468                 return ret;
469
470         /* Set descriptor base address */
471         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
472
473         /* Enable RBDR  & set queue size */
474         rbdr_cfg.ena = 1;
475         rbdr_cfg.reset = 0;
476         rbdr_cfg.ldwb = 0;
477         rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
478                                                 RBDR_SIZE_SHIFT);
479         rbdr_cfg.avg_con = 0;
480         rbdr_cfg.lines = rbdr->buffsz / 128;
481
482         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
483
484         /* Verify proper RBDR reset */
485         head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
486         tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
487
488         if (head | tail)
489                 return NICVF_ERR_RBDR_RESET;
490
491         return NICVF_OK;
492 }
493
494 uint32_t
495 nicvf_qsize_rbdr_roundup(uint32_t val)
496 {
497         uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
498                         RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
499                         RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
500                         RBDR_QUEUE_SZ_512K};
501         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
502 }
503
504 int
505 nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
506                           uint16_t ridx, rbdr_pool_get_handler handler,
507                           uint32_t max_buffs)
508 {
509         struct rbdr_entry_t *desc, *desc0;
510         struct nicvf_rbdr *rbdr = nic->rbdr;
511         uint32_t count;
512         nicvf_phys_addr_t phy;
513
514         assert(rbdr != NULL);
515         desc = rbdr->desc;
516         count = 0;
517         /* Don't fill beyond max numbers of desc */
518         while (count < rbdr->qlen_mask) {
519                 if (count >= max_buffs)
520                         break;
521                 desc0 = desc + count;
522                 phy = handler(dev, nic);
523                 if (phy) {
524                         desc0->full_addr = phy;
525                         count++;
526                 } else {
527                         break;
528                 }
529         }
530         nicvf_smp_wmb();
531         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
532         rbdr->tail = nicvf_queue_reg_read(nic,
533                                 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
534         rbdr->next_tail = rbdr->tail;
535         nicvf_smp_rmb();
536         return 0;
537 }
538
539 int
540 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
541 {
542         return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
543 }
544
545 int
546 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
547 {
548         uint64_t head, tail;
549         struct sq_cfg sq_cfg;
550
551         sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
552
553         /* Disable send queue */
554         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
555
556         /* Check if SQ is stopped */
557         if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
558                                 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
559                 return NICVF_ERR_SQ_DISABLE;
560
561         /* Reset send queue */
562         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
563         head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
564         tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
565         if (head | tail)
566                 return  NICVF_ERR_SQ_RESET;
567
568         return 0;
569 }
570
571 int
572 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
573 {
574         int ret;
575         struct sq_cfg sq_cfg = {.value = 0};
576
577         ret = nicvf_qset_sq_reclaim(nic, qidx);
578         if (ret)
579                 return ret;
580
581         /* Send a mailbox msg to PF to config SQ */
582         if (nicvf_mbox_sq_config(nic, qidx))
583                 return  NICVF_ERR_SQ_PF_CFG;
584
585         /* Set queue base address */
586         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
587
588         /* Enable send queue  & set queue size */
589         sq_cfg.ena = 1;
590         sq_cfg.reset = 0;
591         sq_cfg.ldwb = 0;
592         sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
593         sq_cfg.tstmp_bgx_intf = 0;
594         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
595
596         /* Ring doorbell so that H/W restarts processing SQEs */
597         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
598
599         return 0;
600 }
601
602 uint32_t
603 nicvf_qsize_sq_roundup(uint32_t val)
604 {
605         uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
606                         SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
607                         SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
608                         SND_QUEUE_SZ_64K};
609         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
610 }
611
612 int
613 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
614 {
615         /* Disable receive queue */
616         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
617         return nicvf_mbox_rq_sync(nic);
618 }
619
620 int
621 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
622 {
623         struct pf_rq_cfg pf_rq_cfg = {.value = 0};
624         struct rq_cfg rq_cfg = {.value = 0};
625
626         if (nicvf_qset_rq_reclaim(nic, qidx))
627                 return NICVF_ERR_RQ_CLAIM;
628
629         pf_rq_cfg.strip_pre_l2 = 0;
630         /* First cache line of RBDR data will be allocated into L2C */
631         pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
632         pf_rq_cfg.cq_qs = nic->vf_id;
633         pf_rq_cfg.cq_idx = qidx;
634         pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
635         pf_rq_cfg.rbdr_cont_idx = 0;
636         pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
637         pf_rq_cfg.rbdr_strt_idx = 0;
638
639         /* Send a mailbox msg to PF to config RQ */
640         if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
641                 return NICVF_ERR_RQ_PF_CFG;
642
643         /* Select Rx backpressure */
644         if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
645                 return NICVF_ERR_RQ_BP_CFG;
646
647         /* Send a mailbox msg to PF to config RQ drop */
648         if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
649                 return NICVF_ERR_RQ_DROP_CFG;
650
651         /* Enable Receive queue */
652         rq_cfg.ena = 1;
653         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
654
655         return 0;
656 }
657
658 int
659 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
660 {
661         uint64_t tail, head;
662
663         /* Disable completion queue */
664         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
665         if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
666                 return NICVF_ERR_CQ_DISABLE;
667
668         /* Reset completion queue */
669         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
670         tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
671         head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
672         if (head | tail)
673                 return  NICVF_ERR_CQ_RESET;
674
675         /* Disable timer threshold (doesn't get reset upon CQ reset) */
676         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
677         return 0;
678 }
679
680 int
681 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
682 {
683         int ret;
684         struct cq_cfg cq_cfg = {.value = 0};
685
686         ret = nicvf_qset_cq_reclaim(nic, qidx);
687         if (ret)
688                 return ret;
689
690         /* Set completion queue base address */
691         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
692
693         cq_cfg.ena = 1;
694         cq_cfg.reset = 0;
695         /* Writes of CQE will be allocated into L2C */
696         cq_cfg.caching = 1;
697         cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
698         cq_cfg.avg_con = 0;
699         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
700
701         /* Set threshold value for interrupt generation */
702         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
703         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
704         return 0;
705 }
706
707 uint32_t
708 nicvf_qsize_cq_roundup(uint32_t val)
709 {
710         uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
711                         CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
712                         CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
713                         CMP_QUEUE_SZ_64K};
714         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
715 }
716
717
718 void
719 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
720 {
721         uint64_t val;
722
723         val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
724         if (enable)
725                 val |= (STRIP_FIRST_VLAN << 25);
726         else
727                 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
728
729         nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
730 }
731
732 void
733 nicvf_apad_config(struct nicvf *nic, bool enable)
734 {
735         uint64_t val;
736
737         /* APAD always enabled in this device */
738         if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
739                 return;
740
741         val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
742         if (enable)
743                 val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
744         else
745                 val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
746
747         nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
748 }
749
750 void
751 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
752 {
753         int idx;
754         uint64_t addr, val;
755         uint64_t *keyptr = (uint64_t *)key;
756
757         addr = NIC_VNIC_RSS_KEY_0_4;
758         for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
759                 val = nicvf_cpu_to_be_64(*keyptr);
760                 nicvf_reg_write(nic, addr, val);
761                 addr += sizeof(uint64_t);
762                 keyptr++;
763         }
764 }
765
766 void
767 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
768 {
769         int idx;
770         uint64_t addr, val;
771         uint64_t *keyptr = (uint64_t *)key;
772
773         addr = NIC_VNIC_RSS_KEY_0_4;
774         for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
775                 val = nicvf_reg_read(nic, addr);
776                 *keyptr = nicvf_be_to_cpu_64(val);
777                 addr += sizeof(uint64_t);
778                 keyptr++;
779         }
780 }
781
782 void
783 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
784 {
785         nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
786 }
787
788 uint64_t
789 nicvf_rss_get_cfg(struct nicvf *nic)
790 {
791         return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
792 }
793
794 int
795 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
796 {
797         uint32_t idx;
798         struct nicvf_rss_reta_info *rss = &nic->rss_info;
799
800         /* result will be stored in nic->rss_info.rss_size */
801         if (nicvf_mbox_get_rss_size(nic))
802                 return NICVF_ERR_RSS_GET_SZ;
803
804         assert(rss->rss_size > 0);
805         rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
806         for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
807                 rss->ind_tbl[idx] = tbl[idx];
808
809         if (nicvf_mbox_config_rss(nic))
810                 return NICVF_ERR_RSS_TBL_UPDATE;
811
812         return NICVF_OK;
813 }
814
815 int
816 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
817 {
818         uint32_t idx;
819         struct nicvf_rss_reta_info *rss = &nic->rss_info;
820
821         /* result will be stored in nic->rss_info.rss_size */
822         if (nicvf_mbox_get_rss_size(nic))
823                 return NICVF_ERR_RSS_GET_SZ;
824
825         assert(rss->rss_size > 0);
826         rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
827
828         for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
829                 tbl[idx] = rss->ind_tbl[idx];
830
831         return NICVF_OK;
832 }
833
834 int
835 nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg)
836 {
837         uint32_t idx;
838         uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
839         uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
840                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
841                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
842                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
843                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
844                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
845         };
846
847         if (nic->cpi_alg != CPI_ALG_NONE)
848                 return -EINVAL;
849
850         if (cfg == 0)
851                 return -EINVAL;
852
853         /* Update default RSS key and cfg */
854         nicvf_rss_set_key(nic, default_key);
855         nicvf_rss_set_cfg(nic, cfg);
856
857         /* Update default RSS RETA */
858         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
859                 default_reta[idx] = idx % qcnt;
860
861         return nicvf_rss_reta_update(nic, default_reta,
862                         NIC_MAX_RSS_IDR_TBL_SIZE);
863 }
864
865 int
866 nicvf_rss_term(struct nicvf *nic)
867 {
868         uint32_t idx;
869         uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
870
871         nicvf_rss_set_cfg(nic, 0);
872         /* Redirect the output to 0th queue  */
873         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
874                 disable_rss[idx] = 0;
875
876         return nicvf_rss_reta_update(nic, disable_rss,
877                         NIC_MAX_RSS_IDR_TBL_SIZE);
878 }
879
880 int
881 nicvf_loopback_config(struct nicvf *nic, bool enable)
882 {
883         if (enable && nic->loopback_supported == 0)
884                 return NICVF_ERR_LOOPBACK_CFG;
885
886         return nicvf_mbox_loopback_config(nic, enable);
887 }
888
889 void
890 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
891 {
892         stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
893         stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
894         stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
895         stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
896         stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
897         stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
898         stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
899         stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
900         stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
901         stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
902         stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
903         stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
904         stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
905         stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
906
907         stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
908         stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
909         stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
910         stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
911         stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
912 }
913
914 void
915 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
916                        uint16_t qidx)
917 {
918         qstats->q_rx_bytes =
919                 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
920         qstats->q_rx_packets =
921                 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
922 }
923
924 void
925 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
926                        uint16_t qidx)
927 {
928         qstats->q_tx_bytes =
929                 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
930         qstats->q_tx_packets =
931                 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
932 }