1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
15 #include "nicvf_plat.h"
17 struct nicvf_reg_info {
22 #define NICVF_REG_POLL_ITER_NR (10)
23 #define NICVF_REG_POLL_DELAY_US (2000)
24 #define NICVF_REG_INFO(reg) {reg, #reg}
26 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
27 NICVF_REG_INFO(NIC_VF_CFG),
28 NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
29 NICVF_REG_INFO(NIC_VF_INT),
30 NICVF_REG_INFO(NIC_VF_INT_W1S),
31 NICVF_REG_INFO(NIC_VF_ENA_W1C),
32 NICVF_REG_INFO(NIC_VF_ENA_W1S),
33 NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
34 NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
37 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
38 {NIC_VNIC_RSS_KEY_0_4 + 0, "NIC_VNIC_RSS_KEY_0"},
39 {NIC_VNIC_RSS_KEY_0_4 + 8, "NIC_VNIC_RSS_KEY_1"},
40 {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
41 {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
42 {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
43 {NIC_VNIC_TX_STAT_0_4 + 0, "NIC_VNIC_STAT_TX_OCTS"},
44 {NIC_VNIC_TX_STAT_0_4 + 8, "NIC_VNIC_STAT_TX_UCAST"},
45 {NIC_VNIC_TX_STAT_0_4 + 16, "NIC_VNIC_STAT_TX_BCAST"},
46 {NIC_VNIC_TX_STAT_0_4 + 24, "NIC_VNIC_STAT_TX_MCAST"},
47 {NIC_VNIC_TX_STAT_0_4 + 32, "NIC_VNIC_STAT_TX_DROP"},
48 {NIC_VNIC_RX_STAT_0_13 + 0, "NIC_VNIC_STAT_RX_OCTS"},
49 {NIC_VNIC_RX_STAT_0_13 + 8, "NIC_VNIC_STAT_RX_UCAST"},
50 {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
51 {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
52 {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
53 {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
54 {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
55 {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
56 {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
57 {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
58 {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
59 {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
60 {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
61 {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
64 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
65 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
66 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
67 NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
68 NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
69 NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
70 NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
71 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
72 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
73 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
74 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
77 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
78 NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
79 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
80 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
83 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
84 NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
85 NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
86 NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
87 NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
88 NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
89 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
90 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
91 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
92 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
93 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
96 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
97 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
98 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
99 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
100 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
101 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
102 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
103 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
104 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
105 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
109 nicvf_base_init(struct nicvf *nic)
112 if (nic->subsystem_device_id == 0)
113 return NICVF_ERR_BASE_INIT;
115 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
116 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
118 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
119 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
121 if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
122 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
123 NICVF_CAP_DISABLE_APAD;
128 /* dump on stdout if data is NULL */
130 nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
135 dump_stdout = data ? 0 : 1;
137 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
139 nicvf_log("%24s = 0x%" PRIx64 "\n",
140 nicvf_reg_tbl[i].name,
141 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
143 *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
145 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
147 nicvf_log("%24s = 0x%" PRIx64 "\n",
148 nicvf_multi_reg_tbl[i].name,
150 nicvf_multi_reg_tbl[i].offset));
152 *data++ = nicvf_reg_read(nic,
153 nicvf_multi_reg_tbl[i].offset);
155 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
156 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
158 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
159 nicvf_qset_cq_reg_tbl[i].name, q,
160 nicvf_queue_reg_read(nic,
161 nicvf_qset_cq_reg_tbl[i].offset, q));
163 *data++ = nicvf_queue_reg_read(nic,
164 nicvf_qset_cq_reg_tbl[i].offset, q);
166 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
167 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
169 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
170 nicvf_qset_rq_reg_tbl[i].name, q,
171 nicvf_queue_reg_read(nic,
172 nicvf_qset_rq_reg_tbl[i].offset, q));
174 *data++ = nicvf_queue_reg_read(nic,
175 nicvf_qset_rq_reg_tbl[i].offset, q);
177 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
178 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
180 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
181 nicvf_qset_sq_reg_tbl[i].name, q,
182 nicvf_queue_reg_read(nic,
183 nicvf_qset_sq_reg_tbl[i].offset, q));
185 *data++ = nicvf_queue_reg_read(nic,
186 nicvf_qset_sq_reg_tbl[i].offset, q);
188 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
189 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
191 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
192 nicvf_qset_rbdr_reg_tbl[i].name, q,
193 nicvf_queue_reg_read(nic,
194 nicvf_qset_rbdr_reg_tbl[i].offset, q));
196 *data++ = nicvf_queue_reg_read(nic,
197 nicvf_qset_rbdr_reg_tbl[i].offset, q);
202 nicvf_reg_get_count(void)
206 nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
207 nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
208 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
209 MAX_CMP_QUEUES_PER_QS;
210 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
211 MAX_RCV_QUEUES_PER_QS;
212 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
213 MAX_SND_QUEUES_PER_QS;
214 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
215 MAX_RCV_BUF_DESC_RINGS_PER_QS;
221 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
224 struct pf_qs_cfg pf_qs_cfg = {.value = 0};
226 pf_qs_cfg.ena = enable ? 1 : 0;
227 pf_qs_cfg.vnic = nic->vf_id;
228 ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
229 return ret ? NICVF_ERR_SET_QS : 0;
232 /* Requests PF to assign and enable Qset */
234 nicvf_qset_config(struct nicvf *nic)
237 return nicvf_qset_config_internal(nic, true);
241 nicvf_qset_reclaim(struct nicvf *nic)
244 return nicvf_qset_config_internal(nic, false);
248 cmpfunc(const void *a, const void *b)
250 return (*(const uint32_t *)a - *(const uint32_t *)b);
254 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
258 qsort(list, entries, sizeof(uint32_t), cmpfunc);
259 for (i = 0; i < entries; i++)
262 /* Not in the list */
270 nicvf_handle_qset_err_intr(struct nicvf *nic)
275 nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
276 nicvf_reg_dump(nic, NULL);
278 for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
279 status = nicvf_queue_reg_read(
280 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
281 if (!(status & NICVF_CQ_ERR_MASK))
284 if (status & NICVF_CQ_WR_FULL)
285 nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
286 if (status & NICVF_CQ_WR_DISABLE)
287 nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
288 if (status & NICVF_CQ_WR_FAULT)
289 nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
290 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
293 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
294 status = nicvf_queue_reg_read(
295 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
296 if (!(status & NICVF_SQ_ERR_MASK))
299 if (status & NICVF_SQ_ERR_STOPPED)
300 nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
301 if (status & NICVF_SQ_ERR_SEND)
302 nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
303 if (status & NICVF_SQ_ERR_DPE)
304 nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
305 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
308 for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
309 status = nicvf_queue_reg_read(nic,
310 NIC_QSET_RBDR_0_1_STATUS0, qidx);
311 status &= NICVF_RBDR_FIFO_STATE_MASK;
312 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
314 if (status == RBDR_FIFO_STATE_FAIL)
315 nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
316 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
319 nicvf_disable_all_interrupts(nic);
324 * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
325 * This function is not re-entrant.
326 * The caller should provide proper serialization.
329 nicvf_reg_poll_interrupts(struct nicvf *nic)
334 intr = nicvf_reg_read(nic, NIC_VF_INT);
335 if (intr & NICVF_INTR_MBOX_MASK) {
336 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
337 msg = nicvf_handle_mbx_intr(nic);
339 if (intr & NICVF_INTR_QS_ERR_MASK) {
340 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
341 nicvf_handle_qset_err_intr(nic);
347 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
348 uint32_t bit_pos, uint32_t bits, uint64_t val)
352 int timeout = NICVF_REG_POLL_ITER_NR;
354 bit_mask = (1ULL << bits) - 1;
355 bit_mask = (bit_mask << bit_pos);
358 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
359 if (((reg_val & bit_mask) >> bit_pos) == val)
361 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
364 return NICVF_ERR_REG_POLL;
368 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
371 int timeout = NICVF_REG_POLL_ITER_NR;
372 struct nicvf_rbdr *rbdr = nic->rbdr;
374 /* Save head and tail pointers for freeing up buffers */
376 rbdr->head = nicvf_queue_reg_read(nic,
377 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
378 rbdr->tail = nicvf_queue_reg_read(nic,
379 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
380 rbdr->next_tail = rbdr->tail;
384 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
388 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
389 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
391 return NICVF_ERR_RBDR_DISABLE;
394 status = nicvf_queue_reg_read(nic,
395 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
396 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
398 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
401 return NICVF_ERR_RBDR_PREFETCH;
404 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
406 if (nicvf_qset_poll_reg(nic, qidx,
407 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
408 return NICVF_ERR_RBDR_RESET1;
410 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
411 if (nicvf_qset_poll_reg(nic, qidx,
412 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
413 return NICVF_ERR_RBDR_RESET2;
419 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
423 val = nicvf_log2_u32(len) - len_shift;
425 assert(val >= NICVF_QSIZE_MIN_VAL);
426 assert(val <= NICVF_QSIZE_MAX_VAL);
431 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
435 struct nicvf_rbdr *rbdr = nic->rbdr;
436 struct rbdr_cfg rbdr_cfg = {.value = 0};
438 ret = nicvf_qset_rbdr_reclaim(nic, qidx);
442 /* Set descriptor base address */
443 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
445 /* Enable RBDR & set queue size */
449 rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
451 rbdr_cfg.avg_con = 0;
452 rbdr_cfg.lines = rbdr->buffsz / 128;
454 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
456 /* Verify proper RBDR reset */
457 head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
458 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
461 return NICVF_ERR_RBDR_RESET;
467 nicvf_qsize_rbdr_roundup(uint32_t val)
469 uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
470 RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
471 RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
473 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
477 nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
478 uint16_t ridx, rbdr_pool_get_handler handler,
481 struct rbdr_entry_t *desc, *desc0;
482 struct nicvf_rbdr *rbdr = nic->rbdr;
484 nicvf_iova_addr_t phy;
486 assert(rbdr != NULL);
489 /* Don't fill beyond max numbers of desc */
490 while (count < rbdr->qlen_mask) {
491 if (count >= max_buffs)
493 desc0 = desc + count;
494 phy = handler(dev, nic);
496 desc0->full_addr = phy;
503 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
504 rbdr->tail = nicvf_queue_reg_read(nic,
505 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
506 rbdr->next_tail = rbdr->tail;
512 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
514 return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
518 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
521 struct sq_cfg sq_cfg;
523 sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
525 /* Disable send queue */
526 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
528 /* Check if SQ is stopped */
529 if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
530 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
531 return NICVF_ERR_SQ_DISABLE;
533 /* Reset send queue */
534 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
535 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
536 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
538 return NICVF_ERR_SQ_RESET;
544 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
547 struct sq_cfg sq_cfg = {.value = 0};
549 ret = nicvf_qset_sq_reclaim(nic, qidx);
553 /* Send a mailbox msg to PF to config SQ */
554 if (nicvf_mbox_sq_config(nic, qidx))
555 return NICVF_ERR_SQ_PF_CFG;
557 /* Set queue base address */
558 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
560 /* Enable send queue & set queue size */
565 sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
566 sq_cfg.tstmp_bgx_intf = 0;
567 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
569 /* Ring doorbell so that H/W restarts processing SQEs */
570 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
576 nicvf_qsize_sq_roundup(uint32_t val)
578 uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
579 SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
580 SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
582 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
586 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
588 /* Disable receive queue */
589 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
590 return nicvf_mbox_rq_sync(nic);
594 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
596 struct pf_rq_cfg pf_rq_cfg = {.value = 0};
597 struct rq_cfg rq_cfg = {.value = 0};
599 if (nicvf_qset_rq_reclaim(nic, qidx))
600 return NICVF_ERR_RQ_CLAIM;
602 pf_rq_cfg.strip_pre_l2 = 0;
603 /* First cache line of RBDR data will be allocated into L2C */
604 pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
605 pf_rq_cfg.cq_qs = nic->vf_id;
606 pf_rq_cfg.cq_idx = qidx;
607 pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
608 pf_rq_cfg.rbdr_cont_idx = 0;
609 pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
610 pf_rq_cfg.rbdr_strt_idx = 0;
612 /* Send a mailbox msg to PF to config RQ */
613 if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
614 return NICVF_ERR_RQ_PF_CFG;
616 /* Select Rx backpressure */
617 if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
618 return NICVF_ERR_RQ_BP_CFG;
620 /* Send a mailbox msg to PF to config RQ drop */
621 if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
622 return NICVF_ERR_RQ_DROP_CFG;
624 /* Enable Receive queue */
626 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
632 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
636 /* Disable completion queue */
637 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
638 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
639 return NICVF_ERR_CQ_DISABLE;
641 /* Reset completion queue */
642 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
643 tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
644 head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
646 return NICVF_ERR_CQ_RESET;
648 /* Disable timer threshold (doesn't get reset upon CQ reset) */
649 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
654 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
657 struct cq_cfg cq_cfg = {.value = 0};
659 ret = nicvf_qset_cq_reclaim(nic, qidx);
663 /* Set completion queue base address */
664 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
668 /* Writes of CQE will be allocated into L2C */
670 cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
672 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
674 /* Set threshold value for interrupt generation */
675 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
676 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
681 nicvf_qsize_cq_roundup(uint32_t val)
683 uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
684 CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
685 CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
687 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
692 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
696 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
698 val |= (STRIP_FIRST_VLAN << 25);
700 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
702 nic->vlan_strip = enable;
703 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
707 nicvf_first_skip_config(struct nicvf *nic, uint8_t num_dwords)
711 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
713 val |= (num_dwords & 0xf);
715 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
719 nicvf_apad_config(struct nicvf *nic, bool enable)
723 /* APAD always enabled in this device */
724 if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
727 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
729 val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
731 val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
733 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
737 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
741 uint64_t *keyptr = (uint64_t *)key;
743 addr = NIC_VNIC_RSS_KEY_0_4;
744 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
745 val = nicvf_cpu_to_be_64(*keyptr);
746 nicvf_reg_write(nic, addr, val);
747 addr += sizeof(uint64_t);
753 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
757 uint64_t *keyptr = (uint64_t *)key;
759 addr = NIC_VNIC_RSS_KEY_0_4;
760 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
761 val = nicvf_reg_read(nic, addr);
762 *keyptr = nicvf_be_to_cpu_64(val);
763 addr += sizeof(uint64_t);
769 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
771 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
775 nicvf_rss_get_cfg(struct nicvf *nic)
777 return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
781 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
784 struct nicvf_rss_reta_info *rss = &nic->rss_info;
786 /* result will be stored in nic->rss_info.rss_size */
787 if (nicvf_mbox_get_rss_size(nic))
788 return NICVF_ERR_RSS_GET_SZ;
790 assert(rss->rss_size > 0);
791 rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
792 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
793 rss->ind_tbl[idx] = tbl[idx];
795 if (nicvf_mbox_config_rss(nic))
796 return NICVF_ERR_RSS_TBL_UPDATE;
802 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
805 struct nicvf_rss_reta_info *rss = &nic->rss_info;
807 /* result will be stored in nic->rss_info.rss_size */
808 if (nicvf_mbox_get_rss_size(nic))
809 return NICVF_ERR_RSS_GET_SZ;
811 assert(rss->rss_size > 0);
812 rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
814 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
815 tbl[idx] = rss->ind_tbl[idx];
821 nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg)
824 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
825 uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
826 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
827 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
828 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
829 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
830 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
833 if (nic->cpi_alg != CPI_ALG_NONE)
839 /* Update default RSS key and cfg */
840 nicvf_rss_set_key(nic, default_key);
841 nicvf_rss_set_cfg(nic, cfg);
843 /* Update default RSS RETA */
844 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
845 default_reta[idx] = idx % qcnt;
847 return nicvf_rss_reta_update(nic, default_reta,
848 NIC_MAX_RSS_IDR_TBL_SIZE);
852 nicvf_rss_term(struct nicvf *nic)
855 uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
857 nicvf_rss_set_cfg(nic, 0);
858 /* Redirect the output to 0th queue */
859 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
860 disable_rss[idx] = 0;
862 return nicvf_rss_reta_update(nic, disable_rss,
863 NIC_MAX_RSS_IDR_TBL_SIZE);
867 nicvf_loopback_config(struct nicvf *nic, bool enable)
869 if (enable && nic->loopback_supported == 0)
870 return NICVF_ERR_LOOPBACK_CFG;
872 return nicvf_mbox_loopback_config(nic, enable);
876 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
878 stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
879 stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
880 stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
881 stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
882 stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
883 stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
884 stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
885 stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
886 stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
887 stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
888 stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
889 stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
890 stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
891 stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
893 stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
894 stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
895 stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
896 stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
897 stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
901 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
905 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
906 qstats->q_rx_packets =
907 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
911 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
915 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
916 qstats->q_tx_packets =
917 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);