4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 #include "nicvf_plat.h"
45 struct nicvf_reg_info {
50 #define NICVF_REG_POLL_ITER_NR (10)
51 #define NICVF_REG_POLL_DELAY_US (2000)
52 #define NICVF_REG_INFO(reg) {reg, #reg}
54 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55 NICVF_REG_INFO(NIC_VF_CFG),
56 NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57 NICVF_REG_INFO(NIC_VF_INT),
58 NICVF_REG_INFO(NIC_VF_INT_W1S),
59 NICVF_REG_INFO(NIC_VF_ENA_W1C),
60 NICVF_REG_INFO(NIC_VF_ENA_W1S),
61 NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62 NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
65 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66 {NIC_VNIC_RSS_KEY_0_4 + 0, "NIC_VNIC_RSS_KEY_0"},
67 {NIC_VNIC_RSS_KEY_0_4 + 8, "NIC_VNIC_RSS_KEY_1"},
68 {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69 {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70 {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71 {NIC_VNIC_TX_STAT_0_4 + 0, "NIC_VNIC_STAT_TX_OCTS"},
72 {NIC_VNIC_TX_STAT_0_4 + 8, "NIC_VNIC_STAT_TX_UCAST"},
73 {NIC_VNIC_TX_STAT_0_4 + 16, "NIC_VNIC_STAT_TX_BCAST"},
74 {NIC_VNIC_TX_STAT_0_4 + 24, "NIC_VNIC_STAT_TX_MCAST"},
75 {NIC_VNIC_TX_STAT_0_4 + 32, "NIC_VNIC_STAT_TX_DROP"},
76 {NIC_VNIC_RX_STAT_0_13 + 0, "NIC_VNIC_STAT_RX_OCTS"},
77 {NIC_VNIC_RX_STAT_0_13 + 8, "NIC_VNIC_STAT_RX_UCAST"},
78 {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79 {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80 {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81 {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82 {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83 {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84 {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85 {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86 {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87 {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88 {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89 {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
92 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95 NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96 NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97 NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98 NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
105 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106 NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
111 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112 NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113 NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114 NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115 NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116 NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
124 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
137 nicvf_base_init(struct nicvf *nic)
140 if (nic->subsystem_device_id == 0)
141 return NICVF_ERR_BASE_INIT;
143 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
144 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
146 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
147 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
152 /* dump on stdout if data is NULL */
154 nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
159 dump_stdout = data ? 0 : 1;
161 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
163 nicvf_log("%24s = 0x%" PRIx64 "\n",
164 nicvf_reg_tbl[i].name,
165 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
167 *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
169 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
171 nicvf_log("%24s = 0x%" PRIx64 "\n",
172 nicvf_multi_reg_tbl[i].name,
174 nicvf_multi_reg_tbl[i].offset));
176 *data++ = nicvf_reg_read(nic,
177 nicvf_multi_reg_tbl[i].offset);
179 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
180 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
182 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
183 nicvf_qset_cq_reg_tbl[i].name, q,
184 nicvf_queue_reg_read(nic,
185 nicvf_qset_cq_reg_tbl[i].offset, q));
187 *data++ = nicvf_queue_reg_read(nic,
188 nicvf_qset_cq_reg_tbl[i].offset, q);
190 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
191 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
193 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
194 nicvf_qset_rq_reg_tbl[i].name, q,
195 nicvf_queue_reg_read(nic,
196 nicvf_qset_rq_reg_tbl[i].offset, q));
198 *data++ = nicvf_queue_reg_read(nic,
199 nicvf_qset_rq_reg_tbl[i].offset, q);
201 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
202 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
204 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
205 nicvf_qset_sq_reg_tbl[i].name, q,
206 nicvf_queue_reg_read(nic,
207 nicvf_qset_sq_reg_tbl[i].offset, q));
209 *data++ = nicvf_queue_reg_read(nic,
210 nicvf_qset_sq_reg_tbl[i].offset, q);
212 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
213 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
215 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
216 nicvf_qset_rbdr_reg_tbl[i].name, q,
217 nicvf_queue_reg_read(nic,
218 nicvf_qset_rbdr_reg_tbl[i].offset, q));
220 *data++ = nicvf_queue_reg_read(nic,
221 nicvf_qset_rbdr_reg_tbl[i].offset, q);
226 nicvf_reg_get_count(void)
230 nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
231 nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
232 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
233 MAX_CMP_QUEUES_PER_QS;
234 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
235 MAX_RCV_QUEUES_PER_QS;
236 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
237 MAX_SND_QUEUES_PER_QS;
238 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
239 MAX_RCV_BUF_DESC_RINGS_PER_QS;
245 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
248 struct pf_qs_cfg pf_qs_cfg = {.value = 0};
250 pf_qs_cfg.ena = enable ? 1 : 0;
251 pf_qs_cfg.vnic = nic->vf_id;
252 ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
253 return ret ? NICVF_ERR_SET_QS : 0;
256 /* Requests PF to assign and enable Qset */
258 nicvf_qset_config(struct nicvf *nic)
261 return nicvf_qset_config_internal(nic, true);
265 nicvf_qset_reclaim(struct nicvf *nic)
268 return nicvf_qset_config_internal(nic, false);
272 cmpfunc(const void *a, const void *b)
274 return (*(const uint32_t *)a - *(const uint32_t *)b);
278 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
282 qsort(list, entries, sizeof(uint32_t), cmpfunc);
283 for (i = 0; i < entries; i++)
286 /* Not in the list */
294 nicvf_handle_qset_err_intr(struct nicvf *nic)
299 nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
300 nicvf_reg_dump(nic, NULL);
302 for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
303 status = nicvf_queue_reg_read(
304 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
305 if (!(status & NICVF_CQ_ERR_MASK))
308 if (status & NICVF_CQ_WR_FULL)
309 nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
310 if (status & NICVF_CQ_WR_DISABLE)
311 nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
312 if (status & NICVF_CQ_WR_FAULT)
313 nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
314 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
317 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
318 status = nicvf_queue_reg_read(
319 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
320 if (!(status & NICVF_SQ_ERR_MASK))
323 if (status & NICVF_SQ_ERR_STOPPED)
324 nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
325 if (status & NICVF_SQ_ERR_SEND)
326 nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
327 if (status & NICVF_SQ_ERR_DPE)
328 nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
329 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
332 for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
333 status = nicvf_queue_reg_read(nic,
334 NIC_QSET_RBDR_0_1_STATUS0, qidx);
335 status &= NICVF_RBDR_FIFO_STATE_MASK;
336 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
338 if (status == RBDR_FIFO_STATE_FAIL)
339 nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
340 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
343 nicvf_disable_all_interrupts(nic);
348 * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
349 * This function is not re-entrant.
350 * The caller should provide proper serialization.
353 nicvf_reg_poll_interrupts(struct nicvf *nic)
358 intr = nicvf_reg_read(nic, NIC_VF_INT);
359 if (intr & NICVF_INTR_MBOX_MASK) {
360 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
361 msg = nicvf_handle_mbx_intr(nic);
363 if (intr & NICVF_INTR_QS_ERR_MASK) {
364 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
365 nicvf_handle_qset_err_intr(nic);
371 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
372 uint32_t bit_pos, uint32_t bits, uint64_t val)
376 int timeout = NICVF_REG_POLL_ITER_NR;
378 bit_mask = (1ULL << bits) - 1;
379 bit_mask = (bit_mask << bit_pos);
382 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
383 if (((reg_val & bit_mask) >> bit_pos) == val)
385 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
388 return NICVF_ERR_REG_POLL;
392 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
395 int timeout = NICVF_REG_POLL_ITER_NR;
396 struct nicvf_rbdr *rbdr = nic->rbdr;
398 /* Save head and tail pointers for freeing up buffers */
400 rbdr->head = nicvf_queue_reg_read(nic,
401 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
402 rbdr->tail = nicvf_queue_reg_read(nic,
403 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
404 rbdr->next_tail = rbdr->tail;
408 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
412 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
413 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
415 return NICVF_ERR_RBDR_DISABLE;
418 status = nicvf_queue_reg_read(nic,
419 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
420 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
422 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
425 return NICVF_ERR_RBDR_PREFETCH;
428 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
430 if (nicvf_qset_poll_reg(nic, qidx,
431 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
432 return NICVF_ERR_RBDR_RESET1;
434 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
435 if (nicvf_qset_poll_reg(nic, qidx,
436 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
437 return NICVF_ERR_RBDR_RESET2;
443 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
447 val = ((uint32_t)log2(len) - len_shift);
448 assert(val >= NICVF_QSIZE_MIN_VAL);
449 assert(val <= NICVF_QSIZE_MAX_VAL);
454 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
458 struct nicvf_rbdr *rbdr = nic->rbdr;
459 struct rbdr_cfg rbdr_cfg = {.value = 0};
461 ret = nicvf_qset_rbdr_reclaim(nic, qidx);
465 /* Set descriptor base address */
466 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
468 /* Enable RBDR & set queue size */
472 rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
474 rbdr_cfg.avg_con = 0;
475 rbdr_cfg.lines = rbdr->buffsz / 128;
477 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
479 /* Verify proper RBDR reset */
480 head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
481 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
484 return NICVF_ERR_RBDR_RESET;
490 nicvf_qsize_rbdr_roundup(uint32_t val)
492 uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
493 RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
494 RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
496 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
500 nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
501 uint16_t ridx, rbdr_pool_get_handler handler,
504 struct rbdr_entry_t *desc, *desc0;
505 struct nicvf_rbdr *rbdr = nic->rbdr;
507 nicvf_phys_addr_t phy;
509 assert(rbdr != NULL);
512 /* Don't fill beyond max numbers of desc */
513 while (count < rbdr->qlen_mask) {
514 if (count >= max_buffs)
516 desc0 = desc + count;
517 phy = handler(dev, nic);
519 desc0->full_addr = phy;
526 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
527 rbdr->tail = nicvf_queue_reg_read(nic,
528 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
529 rbdr->next_tail = rbdr->tail;
535 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
537 return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
541 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
544 struct sq_cfg sq_cfg;
546 sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
548 /* Disable send queue */
549 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
551 /* Check if SQ is stopped */
552 if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
553 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
554 return NICVF_ERR_SQ_DISABLE;
556 /* Reset send queue */
557 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
558 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
559 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
561 return NICVF_ERR_SQ_RESET;
567 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
570 struct sq_cfg sq_cfg = {.value = 0};
572 ret = nicvf_qset_sq_reclaim(nic, qidx);
576 /* Send a mailbox msg to PF to config SQ */
577 if (nicvf_mbox_sq_config(nic, qidx))
578 return NICVF_ERR_SQ_PF_CFG;
580 /* Set queue base address */
581 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
583 /* Enable send queue & set queue size */
587 sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
588 sq_cfg.tstmp_bgx_intf = 0;
589 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
591 /* Ring doorbell so that H/W restarts processing SQEs */
592 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
598 nicvf_qsize_sq_roundup(uint32_t val)
600 uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
601 SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
602 SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
604 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
608 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
610 /* Disable receive queue */
611 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
612 return nicvf_mbox_rq_sync(nic);
616 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
618 struct pf_rq_cfg pf_rq_cfg = {.value = 0};
619 struct rq_cfg rq_cfg = {.value = 0};
621 if (nicvf_qset_rq_reclaim(nic, qidx))
622 return NICVF_ERR_RQ_CLAIM;
624 pf_rq_cfg.strip_pre_l2 = 0;
625 /* First cache line of RBDR data will be allocated into L2C */
626 pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
627 pf_rq_cfg.cq_qs = nic->vf_id;
628 pf_rq_cfg.cq_idx = qidx;
629 pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
630 pf_rq_cfg.rbdr_cont_idx = 0;
631 pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
632 pf_rq_cfg.rbdr_strt_idx = 0;
634 /* Send a mailbox msg to PF to config RQ */
635 if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
636 return NICVF_ERR_RQ_PF_CFG;
638 /* Select Rx backpressure */
639 if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
640 return NICVF_ERR_RQ_BP_CFG;
642 /* Send a mailbox msg to PF to config RQ drop */
643 if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
644 return NICVF_ERR_RQ_DROP_CFG;
646 /* Enable Receive queue */
648 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
654 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
658 /* Disable completion queue */
659 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
660 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
661 return NICVF_ERR_CQ_DISABLE;
663 /* Reset completion queue */
664 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
665 tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
666 head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
668 return NICVF_ERR_CQ_RESET;
670 /* Disable timer threshold (doesn't get reset upon CQ reset) */
671 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
676 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
679 struct cq_cfg cq_cfg = {.value = 0};
681 ret = nicvf_qset_cq_reclaim(nic, qidx);
685 /* Set completion queue base address */
686 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
690 /* Writes of CQE will be allocated into L2C */
692 cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
694 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
696 /* Set threshold value for interrupt generation */
697 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
698 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
703 nicvf_qsize_cq_roundup(uint32_t val)
705 uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
706 CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
707 CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
709 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
714 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
718 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
720 val |= (STRIP_FIRST_VLAN << 25);
722 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
724 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
728 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
732 uint64_t *keyptr = (uint64_t *)key;
734 addr = NIC_VNIC_RSS_KEY_0_4;
735 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
736 val = nicvf_cpu_to_be_64(*keyptr);
737 nicvf_reg_write(nic, addr, val);
738 addr += sizeof(uint64_t);
744 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
748 uint64_t *keyptr = (uint64_t *)key;
750 addr = NIC_VNIC_RSS_KEY_0_4;
751 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
752 val = nicvf_reg_read(nic, addr);
753 *keyptr = nicvf_be_to_cpu_64(val);
754 addr += sizeof(uint64_t);
760 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
762 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
766 nicvf_rss_get_cfg(struct nicvf *nic)
768 return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
772 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
775 struct nicvf_rss_reta_info *rss = &nic->rss_info;
777 /* result will be stored in nic->rss_info.rss_size */
778 if (nicvf_mbox_get_rss_size(nic))
779 return NICVF_ERR_RSS_GET_SZ;
781 assert(rss->rss_size > 0);
782 rss->hash_bits = (uint8_t)log2(rss->rss_size);
783 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
784 rss->ind_tbl[idx] = tbl[idx];
786 if (nicvf_mbox_config_rss(nic))
787 return NICVF_ERR_RSS_TBL_UPDATE;
793 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
796 struct nicvf_rss_reta_info *rss = &nic->rss_info;
798 /* result will be stored in nic->rss_info.rss_size */
799 if (nicvf_mbox_get_rss_size(nic))
800 return NICVF_ERR_RSS_GET_SZ;
802 assert(rss->rss_size > 0);
803 rss->hash_bits = (uint8_t)log2(rss->rss_size);
804 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
805 tbl[idx] = rss->ind_tbl[idx];
811 nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg)
814 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
815 uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
816 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
817 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
818 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
819 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
820 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
823 if (nic->cpi_alg != CPI_ALG_NONE)
829 /* Update default RSS key and cfg */
830 nicvf_rss_set_key(nic, default_key);
831 nicvf_rss_set_cfg(nic, cfg);
833 /* Update default RSS RETA */
834 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
835 default_reta[idx] = idx % qcnt;
837 return nicvf_rss_reta_update(nic, default_reta,
838 NIC_MAX_RSS_IDR_TBL_SIZE);
842 nicvf_rss_term(struct nicvf *nic)
845 uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
847 nicvf_rss_set_cfg(nic, 0);
848 /* Redirect the output to 0th queue */
849 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
850 disable_rss[idx] = 0;
852 return nicvf_rss_reta_update(nic, disable_rss,
853 NIC_MAX_RSS_IDR_TBL_SIZE);
857 nicvf_loopback_config(struct nicvf *nic, bool enable)
859 if (enable && nic->loopback_supported == 0)
860 return NICVF_ERR_LOOPBACK_CFG;
862 return nicvf_mbox_loopback_config(nic, enable);
866 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
868 stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
869 stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
870 stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
871 stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
872 stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
873 stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
874 stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
875 stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
876 stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
877 stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
878 stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
879 stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
880 stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
881 stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
883 stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
884 stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
885 stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
886 stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
887 stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
891 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
895 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
896 qstats->q_rx_packets =
897 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
901 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
905 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
906 qstats->q_tx_packets =
907 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);