4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 #include "nicvf_plat.h"
45 struct nicvf_reg_info {
50 #define NICVF_REG_POLL_ITER_NR (10)
51 #define NICVF_REG_POLL_DELAY_US (2000)
52 #define NICVF_REG_INFO(reg) {reg, #reg}
54 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55 NICVF_REG_INFO(NIC_VF_CFG),
56 NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57 NICVF_REG_INFO(NIC_VF_INT),
58 NICVF_REG_INFO(NIC_VF_INT_W1S),
59 NICVF_REG_INFO(NIC_VF_ENA_W1C),
60 NICVF_REG_INFO(NIC_VF_ENA_W1S),
61 NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62 NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
65 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66 {NIC_VNIC_RSS_KEY_0_4 + 0, "NIC_VNIC_RSS_KEY_0"},
67 {NIC_VNIC_RSS_KEY_0_4 + 8, "NIC_VNIC_RSS_KEY_1"},
68 {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69 {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70 {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71 {NIC_VNIC_TX_STAT_0_4 + 0, "NIC_VNIC_STAT_TX_OCTS"},
72 {NIC_VNIC_TX_STAT_0_4 + 8, "NIC_VNIC_STAT_TX_UCAST"},
73 {NIC_VNIC_TX_STAT_0_4 + 16, "NIC_VNIC_STAT_TX_BCAST"},
74 {NIC_VNIC_TX_STAT_0_4 + 24, "NIC_VNIC_STAT_TX_MCAST"},
75 {NIC_VNIC_TX_STAT_0_4 + 32, "NIC_VNIC_STAT_TX_DROP"},
76 {NIC_VNIC_RX_STAT_0_13 + 0, "NIC_VNIC_STAT_RX_OCTS"},
77 {NIC_VNIC_RX_STAT_0_13 + 8, "NIC_VNIC_STAT_RX_UCAST"},
78 {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79 {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80 {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81 {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82 {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83 {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84 {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85 {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86 {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87 {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88 {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89 {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
92 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95 NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96 NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97 NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98 NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
105 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106 NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
111 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112 NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113 NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114 NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115 NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116 NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
124 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
137 nicvf_base_init(struct nicvf *nic)
140 if (nic->subsystem_device_id == 0)
141 return NICVF_ERR_BASE_INIT;
143 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
144 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
146 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
147 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
149 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
150 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
151 NICVF_CAP_DISABLE_APAD;
156 /* dump on stdout if data is NULL */
158 nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
163 dump_stdout = data ? 0 : 1;
165 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
167 nicvf_log("%24s = 0x%" PRIx64 "\n",
168 nicvf_reg_tbl[i].name,
169 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
171 *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
173 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
175 nicvf_log("%24s = 0x%" PRIx64 "\n",
176 nicvf_multi_reg_tbl[i].name,
178 nicvf_multi_reg_tbl[i].offset));
180 *data++ = nicvf_reg_read(nic,
181 nicvf_multi_reg_tbl[i].offset);
183 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
184 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
186 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
187 nicvf_qset_cq_reg_tbl[i].name, q,
188 nicvf_queue_reg_read(nic,
189 nicvf_qset_cq_reg_tbl[i].offset, q));
191 *data++ = nicvf_queue_reg_read(nic,
192 nicvf_qset_cq_reg_tbl[i].offset, q);
194 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
195 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
197 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
198 nicvf_qset_rq_reg_tbl[i].name, q,
199 nicvf_queue_reg_read(nic,
200 nicvf_qset_rq_reg_tbl[i].offset, q));
202 *data++ = nicvf_queue_reg_read(nic,
203 nicvf_qset_rq_reg_tbl[i].offset, q);
205 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
206 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
208 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
209 nicvf_qset_sq_reg_tbl[i].name, q,
210 nicvf_queue_reg_read(nic,
211 nicvf_qset_sq_reg_tbl[i].offset, q));
213 *data++ = nicvf_queue_reg_read(nic,
214 nicvf_qset_sq_reg_tbl[i].offset, q);
216 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
217 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
219 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
220 nicvf_qset_rbdr_reg_tbl[i].name, q,
221 nicvf_queue_reg_read(nic,
222 nicvf_qset_rbdr_reg_tbl[i].offset, q));
224 *data++ = nicvf_queue_reg_read(nic,
225 nicvf_qset_rbdr_reg_tbl[i].offset, q);
230 nicvf_reg_get_count(void)
234 nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
235 nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
236 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
237 MAX_CMP_QUEUES_PER_QS;
238 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
239 MAX_RCV_QUEUES_PER_QS;
240 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
241 MAX_SND_QUEUES_PER_QS;
242 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
243 MAX_RCV_BUF_DESC_RINGS_PER_QS;
249 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
252 struct pf_qs_cfg pf_qs_cfg = {.value = 0};
254 pf_qs_cfg.ena = enable ? 1 : 0;
255 pf_qs_cfg.vnic = nic->vf_id;
256 ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
257 return ret ? NICVF_ERR_SET_QS : 0;
260 /* Requests PF to assign and enable Qset */
262 nicvf_qset_config(struct nicvf *nic)
265 return nicvf_qset_config_internal(nic, true);
269 nicvf_qset_reclaim(struct nicvf *nic)
272 return nicvf_qset_config_internal(nic, false);
276 cmpfunc(const void *a, const void *b)
278 return (*(const uint32_t *)a - *(const uint32_t *)b);
282 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
286 qsort(list, entries, sizeof(uint32_t), cmpfunc);
287 for (i = 0; i < entries; i++)
290 /* Not in the list */
298 nicvf_handle_qset_err_intr(struct nicvf *nic)
303 nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
304 nicvf_reg_dump(nic, NULL);
306 for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
307 status = nicvf_queue_reg_read(
308 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
309 if (!(status & NICVF_CQ_ERR_MASK))
312 if (status & NICVF_CQ_WR_FULL)
313 nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
314 if (status & NICVF_CQ_WR_DISABLE)
315 nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
316 if (status & NICVF_CQ_WR_FAULT)
317 nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
318 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
321 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
322 status = nicvf_queue_reg_read(
323 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
324 if (!(status & NICVF_SQ_ERR_MASK))
327 if (status & NICVF_SQ_ERR_STOPPED)
328 nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
329 if (status & NICVF_SQ_ERR_SEND)
330 nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
331 if (status & NICVF_SQ_ERR_DPE)
332 nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
333 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
336 for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
337 status = nicvf_queue_reg_read(nic,
338 NIC_QSET_RBDR_0_1_STATUS0, qidx);
339 status &= NICVF_RBDR_FIFO_STATE_MASK;
340 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
342 if (status == RBDR_FIFO_STATE_FAIL)
343 nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
344 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
347 nicvf_disable_all_interrupts(nic);
352 * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
353 * This function is not re-entrant.
354 * The caller should provide proper serialization.
357 nicvf_reg_poll_interrupts(struct nicvf *nic)
362 intr = nicvf_reg_read(nic, NIC_VF_INT);
363 if (intr & NICVF_INTR_MBOX_MASK) {
364 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
365 msg = nicvf_handle_mbx_intr(nic);
367 if (intr & NICVF_INTR_QS_ERR_MASK) {
368 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
369 nicvf_handle_qset_err_intr(nic);
375 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
376 uint32_t bit_pos, uint32_t bits, uint64_t val)
380 int timeout = NICVF_REG_POLL_ITER_NR;
382 bit_mask = (1ULL << bits) - 1;
383 bit_mask = (bit_mask << bit_pos);
386 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
387 if (((reg_val & bit_mask) >> bit_pos) == val)
389 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
392 return NICVF_ERR_REG_POLL;
396 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
399 int timeout = NICVF_REG_POLL_ITER_NR;
400 struct nicvf_rbdr *rbdr = nic->rbdr;
402 /* Save head and tail pointers for freeing up buffers */
404 rbdr->head = nicvf_queue_reg_read(nic,
405 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
406 rbdr->tail = nicvf_queue_reg_read(nic,
407 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
408 rbdr->next_tail = rbdr->tail;
412 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
416 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
417 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
419 return NICVF_ERR_RBDR_DISABLE;
422 status = nicvf_queue_reg_read(nic,
423 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
424 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
426 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
429 return NICVF_ERR_RBDR_PREFETCH;
432 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
434 if (nicvf_qset_poll_reg(nic, qidx,
435 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
436 return NICVF_ERR_RBDR_RESET1;
438 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
439 if (nicvf_qset_poll_reg(nic, qidx,
440 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
441 return NICVF_ERR_RBDR_RESET2;
447 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
451 val = ((uint32_t)log2(len) - len_shift);
452 assert(val >= NICVF_QSIZE_MIN_VAL);
453 assert(val <= NICVF_QSIZE_MAX_VAL);
458 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
462 struct nicvf_rbdr *rbdr = nic->rbdr;
463 struct rbdr_cfg rbdr_cfg = {.value = 0};
465 ret = nicvf_qset_rbdr_reclaim(nic, qidx);
469 /* Set descriptor base address */
470 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
472 /* Enable RBDR & set queue size */
476 rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
478 rbdr_cfg.avg_con = 0;
479 rbdr_cfg.lines = rbdr->buffsz / 128;
481 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
483 /* Verify proper RBDR reset */
484 head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
485 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
488 return NICVF_ERR_RBDR_RESET;
494 nicvf_qsize_rbdr_roundup(uint32_t val)
496 uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
497 RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
498 RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
500 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
504 nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
505 uint16_t ridx, rbdr_pool_get_handler handler,
508 struct rbdr_entry_t *desc, *desc0;
509 struct nicvf_rbdr *rbdr = nic->rbdr;
511 nicvf_phys_addr_t phy;
513 assert(rbdr != NULL);
516 /* Don't fill beyond max numbers of desc */
517 while (count < rbdr->qlen_mask) {
518 if (count >= max_buffs)
520 desc0 = desc + count;
521 phy = handler(dev, nic);
523 desc0->full_addr = phy;
530 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
531 rbdr->tail = nicvf_queue_reg_read(nic,
532 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
533 rbdr->next_tail = rbdr->tail;
539 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
541 return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
545 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
548 struct sq_cfg sq_cfg;
550 sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
552 /* Disable send queue */
553 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
555 /* Check if SQ is stopped */
556 if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
557 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
558 return NICVF_ERR_SQ_DISABLE;
560 /* Reset send queue */
561 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
562 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
563 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
565 return NICVF_ERR_SQ_RESET;
571 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
574 struct sq_cfg sq_cfg = {.value = 0};
576 ret = nicvf_qset_sq_reclaim(nic, qidx);
580 /* Send a mailbox msg to PF to config SQ */
581 if (nicvf_mbox_sq_config(nic, qidx))
582 return NICVF_ERR_SQ_PF_CFG;
584 /* Set queue base address */
585 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
587 /* Enable send queue & set queue size */
591 sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
592 sq_cfg.tstmp_bgx_intf = 0;
593 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
595 /* Ring doorbell so that H/W restarts processing SQEs */
596 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
602 nicvf_qsize_sq_roundup(uint32_t val)
604 uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
605 SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
606 SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
608 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
612 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
614 /* Disable receive queue */
615 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
616 return nicvf_mbox_rq_sync(nic);
620 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
622 struct pf_rq_cfg pf_rq_cfg = {.value = 0};
623 struct rq_cfg rq_cfg = {.value = 0};
625 if (nicvf_qset_rq_reclaim(nic, qidx))
626 return NICVF_ERR_RQ_CLAIM;
628 pf_rq_cfg.strip_pre_l2 = 0;
629 /* First cache line of RBDR data will be allocated into L2C */
630 pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
631 pf_rq_cfg.cq_qs = nic->vf_id;
632 pf_rq_cfg.cq_idx = qidx;
633 pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
634 pf_rq_cfg.rbdr_cont_idx = 0;
635 pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
636 pf_rq_cfg.rbdr_strt_idx = 0;
638 /* Send a mailbox msg to PF to config RQ */
639 if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
640 return NICVF_ERR_RQ_PF_CFG;
642 /* Select Rx backpressure */
643 if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
644 return NICVF_ERR_RQ_BP_CFG;
646 /* Send a mailbox msg to PF to config RQ drop */
647 if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
648 return NICVF_ERR_RQ_DROP_CFG;
650 /* Enable Receive queue */
652 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
658 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
662 /* Disable completion queue */
663 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
664 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
665 return NICVF_ERR_CQ_DISABLE;
667 /* Reset completion queue */
668 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
669 tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
670 head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
672 return NICVF_ERR_CQ_RESET;
674 /* Disable timer threshold (doesn't get reset upon CQ reset) */
675 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
680 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
683 struct cq_cfg cq_cfg = {.value = 0};
685 ret = nicvf_qset_cq_reclaim(nic, qidx);
689 /* Set completion queue base address */
690 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
694 /* Writes of CQE will be allocated into L2C */
696 cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
698 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
700 /* Set threshold value for interrupt generation */
701 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
702 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
707 nicvf_qsize_cq_roundup(uint32_t val)
709 uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
710 CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
711 CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
713 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
718 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
722 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
724 val |= (STRIP_FIRST_VLAN << 25);
726 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
728 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
732 nicvf_apad_config(struct nicvf *nic, bool enable)
736 /* APAD always enabled in this device */
737 if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
740 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
742 val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
744 val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
746 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
750 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
754 uint64_t *keyptr = (uint64_t *)key;
756 addr = NIC_VNIC_RSS_KEY_0_4;
757 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
758 val = nicvf_cpu_to_be_64(*keyptr);
759 nicvf_reg_write(nic, addr, val);
760 addr += sizeof(uint64_t);
766 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
770 uint64_t *keyptr = (uint64_t *)key;
772 addr = NIC_VNIC_RSS_KEY_0_4;
773 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
774 val = nicvf_reg_read(nic, addr);
775 *keyptr = nicvf_be_to_cpu_64(val);
776 addr += sizeof(uint64_t);
782 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
784 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
788 nicvf_rss_get_cfg(struct nicvf *nic)
790 return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
794 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
797 struct nicvf_rss_reta_info *rss = &nic->rss_info;
799 /* result will be stored in nic->rss_info.rss_size */
800 if (nicvf_mbox_get_rss_size(nic))
801 return NICVF_ERR_RSS_GET_SZ;
803 assert(rss->rss_size > 0);
804 rss->hash_bits = (uint8_t)log2(rss->rss_size);
805 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
806 rss->ind_tbl[idx] = tbl[idx];
808 if (nicvf_mbox_config_rss(nic))
809 return NICVF_ERR_RSS_TBL_UPDATE;
815 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
818 struct nicvf_rss_reta_info *rss = &nic->rss_info;
820 /* result will be stored in nic->rss_info.rss_size */
821 if (nicvf_mbox_get_rss_size(nic))
822 return NICVF_ERR_RSS_GET_SZ;
824 assert(rss->rss_size > 0);
825 rss->hash_bits = (uint8_t)log2(rss->rss_size);
826 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
827 tbl[idx] = rss->ind_tbl[idx];
833 nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg)
836 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
837 uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
838 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
839 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
840 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
841 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
842 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
845 if (nic->cpi_alg != CPI_ALG_NONE)
851 /* Update default RSS key and cfg */
852 nicvf_rss_set_key(nic, default_key);
853 nicvf_rss_set_cfg(nic, cfg);
855 /* Update default RSS RETA */
856 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
857 default_reta[idx] = idx % qcnt;
859 return nicvf_rss_reta_update(nic, default_reta,
860 NIC_MAX_RSS_IDR_TBL_SIZE);
864 nicvf_rss_term(struct nicvf *nic)
867 uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
869 nicvf_rss_set_cfg(nic, 0);
870 /* Redirect the output to 0th queue */
871 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
872 disable_rss[idx] = 0;
874 return nicvf_rss_reta_update(nic, disable_rss,
875 NIC_MAX_RSS_IDR_TBL_SIZE);
879 nicvf_loopback_config(struct nicvf *nic, bool enable)
881 if (enable && nic->loopback_supported == 0)
882 return NICVF_ERR_LOOPBACK_CFG;
884 return nicvf_mbox_loopback_config(nic, enable);
888 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
890 stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
891 stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
892 stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
893 stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
894 stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
895 stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
896 stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
897 stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
898 stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
899 stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
900 stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
901 stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
902 stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
903 stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
905 stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
906 stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
907 stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
908 stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
909 stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
913 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
917 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
918 qstats->q_rx_packets =
919 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
923 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
927 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
928 qstats->q_tx_packets =
929 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);