4 * Copyright (C) Cavium, Inc. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 #include "nicvf_plat.h"
45 struct nicvf_reg_info {
50 #define NICVF_REG_POLL_ITER_NR (10)
51 #define NICVF_REG_POLL_DELAY_US (2000)
52 #define NICVF_REG_INFO(reg) {reg, #reg}
54 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55 NICVF_REG_INFO(NIC_VF_CFG),
56 NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57 NICVF_REG_INFO(NIC_VF_INT),
58 NICVF_REG_INFO(NIC_VF_INT_W1S),
59 NICVF_REG_INFO(NIC_VF_ENA_W1C),
60 NICVF_REG_INFO(NIC_VF_ENA_W1S),
61 NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62 NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
65 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66 {NIC_VNIC_RSS_KEY_0_4 + 0, "NIC_VNIC_RSS_KEY_0"},
67 {NIC_VNIC_RSS_KEY_0_4 + 8, "NIC_VNIC_RSS_KEY_1"},
68 {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69 {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70 {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71 {NIC_VNIC_TX_STAT_0_4 + 0, "NIC_VNIC_STAT_TX_OCTS"},
72 {NIC_VNIC_TX_STAT_0_4 + 8, "NIC_VNIC_STAT_TX_UCAST"},
73 {NIC_VNIC_TX_STAT_0_4 + 16, "NIC_VNIC_STAT_TX_BCAST"},
74 {NIC_VNIC_TX_STAT_0_4 + 24, "NIC_VNIC_STAT_TX_MCAST"},
75 {NIC_VNIC_TX_STAT_0_4 + 32, "NIC_VNIC_STAT_TX_DROP"},
76 {NIC_VNIC_RX_STAT_0_13 + 0, "NIC_VNIC_STAT_RX_OCTS"},
77 {NIC_VNIC_RX_STAT_0_13 + 8, "NIC_VNIC_STAT_RX_UCAST"},
78 {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79 {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80 {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81 {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82 {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83 {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84 {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85 {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86 {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87 {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88 {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89 {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
92 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95 NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96 NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97 NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98 NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
105 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106 NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
111 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112 NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113 NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114 NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115 NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116 NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
124 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
137 nicvf_base_init(struct nicvf *nic)
140 if (nic->subsystem_device_id == 0)
141 return NICVF_ERR_BASE_INIT;
143 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
144 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
146 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
147 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
149 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
150 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
151 NICVF_CAP_DISABLE_APAD;
156 /* dump on stdout if data is NULL */
158 nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
163 dump_stdout = data ? 0 : 1;
165 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
167 nicvf_log("%24s = 0x%" PRIx64 "\n",
168 nicvf_reg_tbl[i].name,
169 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
171 *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
173 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
175 nicvf_log("%24s = 0x%" PRIx64 "\n",
176 nicvf_multi_reg_tbl[i].name,
178 nicvf_multi_reg_tbl[i].offset));
180 *data++ = nicvf_reg_read(nic,
181 nicvf_multi_reg_tbl[i].offset);
183 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
184 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
186 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
187 nicvf_qset_cq_reg_tbl[i].name, q,
188 nicvf_queue_reg_read(nic,
189 nicvf_qset_cq_reg_tbl[i].offset, q));
191 *data++ = nicvf_queue_reg_read(nic,
192 nicvf_qset_cq_reg_tbl[i].offset, q);
194 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
195 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
197 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
198 nicvf_qset_rq_reg_tbl[i].name, q,
199 nicvf_queue_reg_read(nic,
200 nicvf_qset_rq_reg_tbl[i].offset, q));
202 *data++ = nicvf_queue_reg_read(nic,
203 nicvf_qset_rq_reg_tbl[i].offset, q);
205 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
206 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
208 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
209 nicvf_qset_sq_reg_tbl[i].name, q,
210 nicvf_queue_reg_read(nic,
211 nicvf_qset_sq_reg_tbl[i].offset, q));
213 *data++ = nicvf_queue_reg_read(nic,
214 nicvf_qset_sq_reg_tbl[i].offset, q);
216 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
217 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
219 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
220 nicvf_qset_rbdr_reg_tbl[i].name, q,
221 nicvf_queue_reg_read(nic,
222 nicvf_qset_rbdr_reg_tbl[i].offset, q));
224 *data++ = nicvf_queue_reg_read(nic,
225 nicvf_qset_rbdr_reg_tbl[i].offset, q);
230 nicvf_reg_get_count(void)
234 nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
235 nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
236 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
237 MAX_CMP_QUEUES_PER_QS;
238 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
239 MAX_RCV_QUEUES_PER_QS;
240 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
241 MAX_SND_QUEUES_PER_QS;
242 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
243 MAX_RCV_BUF_DESC_RINGS_PER_QS;
249 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
252 struct pf_qs_cfg pf_qs_cfg = {.value = 0};
254 pf_qs_cfg.ena = enable ? 1 : 0;
255 pf_qs_cfg.vnic = nic->vf_id;
256 ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
257 return ret ? NICVF_ERR_SET_QS : 0;
260 /* Requests PF to assign and enable Qset */
262 nicvf_qset_config(struct nicvf *nic)
265 return nicvf_qset_config_internal(nic, true);
269 nicvf_qset_reclaim(struct nicvf *nic)
272 return nicvf_qset_config_internal(nic, false);
276 cmpfunc(const void *a, const void *b)
278 return (*(const uint32_t *)a - *(const uint32_t *)b);
282 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
286 qsort(list, entries, sizeof(uint32_t), cmpfunc);
287 for (i = 0; i < entries; i++)
290 /* Not in the list */
298 nicvf_handle_qset_err_intr(struct nicvf *nic)
303 nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
304 nicvf_reg_dump(nic, NULL);
306 for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
307 status = nicvf_queue_reg_read(
308 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
309 if (!(status & NICVF_CQ_ERR_MASK))
312 if (status & NICVF_CQ_WR_FULL)
313 nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
314 if (status & NICVF_CQ_WR_DISABLE)
315 nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
316 if (status & NICVF_CQ_WR_FAULT)
317 nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
318 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
321 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
322 status = nicvf_queue_reg_read(
323 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
324 if (!(status & NICVF_SQ_ERR_MASK))
327 if (status & NICVF_SQ_ERR_STOPPED)
328 nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
329 if (status & NICVF_SQ_ERR_SEND)
330 nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
331 if (status & NICVF_SQ_ERR_DPE)
332 nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
333 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
336 for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
337 status = nicvf_queue_reg_read(nic,
338 NIC_QSET_RBDR_0_1_STATUS0, qidx);
339 status &= NICVF_RBDR_FIFO_STATE_MASK;
340 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
342 if (status == RBDR_FIFO_STATE_FAIL)
343 nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
344 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
347 nicvf_disable_all_interrupts(nic);
352 * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
353 * This function is not re-entrant.
354 * The caller should provide proper serialization.
357 nicvf_reg_poll_interrupts(struct nicvf *nic)
362 intr = nicvf_reg_read(nic, NIC_VF_INT);
363 if (intr & NICVF_INTR_MBOX_MASK) {
364 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
365 msg = nicvf_handle_mbx_intr(nic);
367 if (intr & NICVF_INTR_QS_ERR_MASK) {
368 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
369 nicvf_handle_qset_err_intr(nic);
375 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
376 uint32_t bit_pos, uint32_t bits, uint64_t val)
380 int timeout = NICVF_REG_POLL_ITER_NR;
382 bit_mask = (1ULL << bits) - 1;
383 bit_mask = (bit_mask << bit_pos);
386 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
387 if (((reg_val & bit_mask) >> bit_pos) == val)
389 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
392 return NICVF_ERR_REG_POLL;
396 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
399 int timeout = NICVF_REG_POLL_ITER_NR;
400 struct nicvf_rbdr *rbdr = nic->rbdr;
402 /* Save head and tail pointers for freeing up buffers */
404 rbdr->head = nicvf_queue_reg_read(nic,
405 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
406 rbdr->tail = nicvf_queue_reg_read(nic,
407 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
408 rbdr->next_tail = rbdr->tail;
412 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
416 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
417 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
419 return NICVF_ERR_RBDR_DISABLE;
422 status = nicvf_queue_reg_read(nic,
423 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
424 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
426 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
429 return NICVF_ERR_RBDR_PREFETCH;
432 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
434 if (nicvf_qset_poll_reg(nic, qidx,
435 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
436 return NICVF_ERR_RBDR_RESET1;
438 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
439 if (nicvf_qset_poll_reg(nic, qidx,
440 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
441 return NICVF_ERR_RBDR_RESET2;
447 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
451 val = nicvf_log2_u32(len) - len_shift;
453 assert(val >= NICVF_QSIZE_MIN_VAL);
454 assert(val <= NICVF_QSIZE_MAX_VAL);
459 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
463 struct nicvf_rbdr *rbdr = nic->rbdr;
464 struct rbdr_cfg rbdr_cfg = {.value = 0};
466 ret = nicvf_qset_rbdr_reclaim(nic, qidx);
470 /* Set descriptor base address */
471 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
473 /* Enable RBDR & set queue size */
477 rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
479 rbdr_cfg.avg_con = 0;
480 rbdr_cfg.lines = rbdr->buffsz / 128;
482 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
484 /* Verify proper RBDR reset */
485 head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
486 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
489 return NICVF_ERR_RBDR_RESET;
495 nicvf_qsize_rbdr_roundup(uint32_t val)
497 uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
498 RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
499 RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
501 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
505 nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
506 uint16_t ridx, rbdr_pool_get_handler handler,
509 struct rbdr_entry_t *desc, *desc0;
510 struct nicvf_rbdr *rbdr = nic->rbdr;
512 nicvf_phys_addr_t phy;
514 assert(rbdr != NULL);
517 /* Don't fill beyond max numbers of desc */
518 while (count < rbdr->qlen_mask) {
519 if (count >= max_buffs)
521 desc0 = desc + count;
522 phy = handler(dev, nic);
524 desc0->full_addr = phy;
531 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
532 rbdr->tail = nicvf_queue_reg_read(nic,
533 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
534 rbdr->next_tail = rbdr->tail;
540 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
542 return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
546 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
549 struct sq_cfg sq_cfg;
551 sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
553 /* Disable send queue */
554 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
556 /* Check if SQ is stopped */
557 if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
558 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
559 return NICVF_ERR_SQ_DISABLE;
561 /* Reset send queue */
562 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
563 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
564 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
566 return NICVF_ERR_SQ_RESET;
572 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
575 struct sq_cfg sq_cfg = {.value = 0};
577 ret = nicvf_qset_sq_reclaim(nic, qidx);
581 /* Send a mailbox msg to PF to config SQ */
582 if (nicvf_mbox_sq_config(nic, qidx))
583 return NICVF_ERR_SQ_PF_CFG;
585 /* Set queue base address */
586 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
588 /* Enable send queue & set queue size */
593 sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
594 sq_cfg.tstmp_bgx_intf = 0;
595 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
597 /* Ring doorbell so that H/W restarts processing SQEs */
598 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
604 nicvf_qsize_sq_roundup(uint32_t val)
606 uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
607 SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
608 SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
610 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
614 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
616 /* Disable receive queue */
617 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
618 return nicvf_mbox_rq_sync(nic);
622 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
624 struct pf_rq_cfg pf_rq_cfg = {.value = 0};
625 struct rq_cfg rq_cfg = {.value = 0};
627 if (nicvf_qset_rq_reclaim(nic, qidx))
628 return NICVF_ERR_RQ_CLAIM;
630 pf_rq_cfg.strip_pre_l2 = 0;
631 /* First cache line of RBDR data will be allocated into L2C */
632 pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
633 pf_rq_cfg.cq_qs = nic->vf_id;
634 pf_rq_cfg.cq_idx = qidx;
635 pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
636 pf_rq_cfg.rbdr_cont_idx = 0;
637 pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
638 pf_rq_cfg.rbdr_strt_idx = 0;
640 /* Send a mailbox msg to PF to config RQ */
641 if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
642 return NICVF_ERR_RQ_PF_CFG;
644 /* Select Rx backpressure */
645 if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
646 return NICVF_ERR_RQ_BP_CFG;
648 /* Send a mailbox msg to PF to config RQ drop */
649 if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
650 return NICVF_ERR_RQ_DROP_CFG;
652 /* Enable Receive queue */
654 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
660 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
664 /* Disable completion queue */
665 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
666 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
667 return NICVF_ERR_CQ_DISABLE;
669 /* Reset completion queue */
670 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
671 tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
672 head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
674 return NICVF_ERR_CQ_RESET;
676 /* Disable timer threshold (doesn't get reset upon CQ reset) */
677 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
682 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
685 struct cq_cfg cq_cfg = {.value = 0};
687 ret = nicvf_qset_cq_reclaim(nic, qidx);
691 /* Set completion queue base address */
692 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
696 /* Writes of CQE will be allocated into L2C */
698 cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
700 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
702 /* Set threshold value for interrupt generation */
703 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
704 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
709 nicvf_qsize_cq_roundup(uint32_t val)
711 uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
712 CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
713 CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
715 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
720 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
724 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
726 val |= (STRIP_FIRST_VLAN << 25);
728 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
730 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
734 nicvf_apad_config(struct nicvf *nic, bool enable)
738 /* APAD always enabled in this device */
739 if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
742 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
744 val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
746 val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
748 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
752 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
756 uint64_t *keyptr = (uint64_t *)key;
758 addr = NIC_VNIC_RSS_KEY_0_4;
759 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
760 val = nicvf_cpu_to_be_64(*keyptr);
761 nicvf_reg_write(nic, addr, val);
762 addr += sizeof(uint64_t);
768 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
772 uint64_t *keyptr = (uint64_t *)key;
774 addr = NIC_VNIC_RSS_KEY_0_4;
775 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
776 val = nicvf_reg_read(nic, addr);
777 *keyptr = nicvf_be_to_cpu_64(val);
778 addr += sizeof(uint64_t);
784 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
786 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
790 nicvf_rss_get_cfg(struct nicvf *nic)
792 return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
796 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
799 struct nicvf_rss_reta_info *rss = &nic->rss_info;
801 /* result will be stored in nic->rss_info.rss_size */
802 if (nicvf_mbox_get_rss_size(nic))
803 return NICVF_ERR_RSS_GET_SZ;
805 assert(rss->rss_size > 0);
806 rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
807 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
808 rss->ind_tbl[idx] = tbl[idx];
810 if (nicvf_mbox_config_rss(nic))
811 return NICVF_ERR_RSS_TBL_UPDATE;
817 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
820 struct nicvf_rss_reta_info *rss = &nic->rss_info;
822 /* result will be stored in nic->rss_info.rss_size */
823 if (nicvf_mbox_get_rss_size(nic))
824 return NICVF_ERR_RSS_GET_SZ;
826 assert(rss->rss_size > 0);
827 rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
829 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
830 tbl[idx] = rss->ind_tbl[idx];
836 nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg)
839 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
840 uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
841 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
842 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
843 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
844 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
845 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
848 if (nic->cpi_alg != CPI_ALG_NONE)
854 /* Update default RSS key and cfg */
855 nicvf_rss_set_key(nic, default_key);
856 nicvf_rss_set_cfg(nic, cfg);
858 /* Update default RSS RETA */
859 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
860 default_reta[idx] = idx % qcnt;
862 return nicvf_rss_reta_update(nic, default_reta,
863 NIC_MAX_RSS_IDR_TBL_SIZE);
867 nicvf_rss_term(struct nicvf *nic)
870 uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
872 nicvf_rss_set_cfg(nic, 0);
873 /* Redirect the output to 0th queue */
874 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
875 disable_rss[idx] = 0;
877 return nicvf_rss_reta_update(nic, disable_rss,
878 NIC_MAX_RSS_IDR_TBL_SIZE);
882 nicvf_loopback_config(struct nicvf *nic, bool enable)
884 if (enable && nic->loopback_supported == 0)
885 return NICVF_ERR_LOOPBACK_CFG;
887 return nicvf_mbox_loopback_config(nic, enable);
891 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
893 stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
894 stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
895 stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
896 stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
897 stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
898 stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
899 stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
900 stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
901 stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
902 stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
903 stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
904 stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
905 stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
906 stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
908 stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
909 stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
910 stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
911 stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
912 stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
916 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
920 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
921 qstats->q_rx_packets =
922 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
926 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
930 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
931 qstats->q_tx_packets =
932 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);