4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 #include "nicvf_plat.h"
45 struct nicvf_reg_info {
50 #define NICVF_REG_POLL_ITER_NR (10)
51 #define NICVF_REG_POLL_DELAY_US (2000)
52 #define NICVF_REG_INFO(reg) {reg, #reg}
54 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55 NICVF_REG_INFO(NIC_VF_CFG),
56 NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57 NICVF_REG_INFO(NIC_VF_INT),
58 NICVF_REG_INFO(NIC_VF_INT_W1S),
59 NICVF_REG_INFO(NIC_VF_ENA_W1C),
60 NICVF_REG_INFO(NIC_VF_ENA_W1S),
61 NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62 NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
65 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66 {NIC_VNIC_RSS_KEY_0_4 + 0, "NIC_VNIC_RSS_KEY_0"},
67 {NIC_VNIC_RSS_KEY_0_4 + 8, "NIC_VNIC_RSS_KEY_1"},
68 {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69 {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70 {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71 {NIC_VNIC_TX_STAT_0_4 + 0, "NIC_VNIC_STAT_TX_OCTS"},
72 {NIC_VNIC_TX_STAT_0_4 + 8, "NIC_VNIC_STAT_TX_UCAST"},
73 {NIC_VNIC_TX_STAT_0_4 + 16, "NIC_VNIC_STAT_TX_BCAST"},
74 {NIC_VNIC_TX_STAT_0_4 + 24, "NIC_VNIC_STAT_TX_MCAST"},
75 {NIC_VNIC_TX_STAT_0_4 + 32, "NIC_VNIC_STAT_TX_DROP"},
76 {NIC_VNIC_RX_STAT_0_13 + 0, "NIC_VNIC_STAT_RX_OCTS"},
77 {NIC_VNIC_RX_STAT_0_13 + 8, "NIC_VNIC_STAT_RX_UCAST"},
78 {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79 {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80 {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81 {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82 {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83 {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84 {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85 {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86 {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87 {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88 {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89 {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
92 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94 NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95 NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96 NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97 NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98 NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101 NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102 NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
105 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106 NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108 NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
111 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112 NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113 NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114 NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115 NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116 NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119 NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121 NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
124 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133 NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
137 nicvf_base_init(struct nicvf *nic)
140 if (nic->subsystem_device_id == 0)
141 return NICVF_ERR_BASE_INIT;
143 if (nicvf_hw_version(nic) == NICVF_PASS2)
144 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
149 /* dump on stdout if data is NULL */
151 nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
156 dump_stdout = data ? 0 : 1;
158 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
160 nicvf_log("%24s = 0x%" PRIx64 "\n",
161 nicvf_reg_tbl[i].name,
162 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
164 *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
166 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
168 nicvf_log("%24s = 0x%" PRIx64 "\n",
169 nicvf_multi_reg_tbl[i].name,
171 nicvf_multi_reg_tbl[i].offset));
173 *data++ = nicvf_reg_read(nic,
174 nicvf_multi_reg_tbl[i].offset);
176 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
177 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
179 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
180 nicvf_qset_cq_reg_tbl[i].name, q,
181 nicvf_queue_reg_read(nic,
182 nicvf_qset_cq_reg_tbl[i].offset, q));
184 *data++ = nicvf_queue_reg_read(nic,
185 nicvf_qset_cq_reg_tbl[i].offset, q);
187 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
188 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
190 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
191 nicvf_qset_rq_reg_tbl[i].name, q,
192 nicvf_queue_reg_read(nic,
193 nicvf_qset_rq_reg_tbl[i].offset, q));
195 *data++ = nicvf_queue_reg_read(nic,
196 nicvf_qset_rq_reg_tbl[i].offset, q);
198 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
199 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
201 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
202 nicvf_qset_sq_reg_tbl[i].name, q,
203 nicvf_queue_reg_read(nic,
204 nicvf_qset_sq_reg_tbl[i].offset, q));
206 *data++ = nicvf_queue_reg_read(nic,
207 nicvf_qset_sq_reg_tbl[i].offset, q);
209 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
210 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
212 nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
213 nicvf_qset_rbdr_reg_tbl[i].name, q,
214 nicvf_queue_reg_read(nic,
215 nicvf_qset_rbdr_reg_tbl[i].offset, q));
217 *data++ = nicvf_queue_reg_read(nic,
218 nicvf_qset_rbdr_reg_tbl[i].offset, q);
223 nicvf_reg_get_count(void)
227 nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
228 nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
229 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
230 MAX_CMP_QUEUES_PER_QS;
231 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
232 MAX_RCV_QUEUES_PER_QS;
233 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
234 MAX_SND_QUEUES_PER_QS;
235 nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
236 MAX_RCV_BUF_DESC_RINGS_PER_QS;
242 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
245 struct pf_qs_cfg pf_qs_cfg = {.value = 0};
247 pf_qs_cfg.ena = enable ? 1 : 0;
248 pf_qs_cfg.vnic = nic->vf_id;
249 ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
250 return ret ? NICVF_ERR_SET_QS : 0;
253 /* Requests PF to assign and enable Qset */
255 nicvf_qset_config(struct nicvf *nic)
258 return nicvf_qset_config_internal(nic, true);
262 nicvf_qset_reclaim(struct nicvf *nic)
265 return nicvf_qset_config_internal(nic, false);
269 cmpfunc(const void *a, const void *b)
271 return (*(const uint32_t *)a - *(const uint32_t *)b);
275 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
279 qsort(list, entries, sizeof(uint32_t), cmpfunc);
280 for (i = 0; i < entries; i++)
283 /* Not in the list */
291 nicvf_handle_qset_err_intr(struct nicvf *nic)
296 nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
297 nicvf_reg_dump(nic, NULL);
299 for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
300 status = nicvf_queue_reg_read(
301 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
302 if (!(status & NICVF_CQ_ERR_MASK))
305 if (status & NICVF_CQ_WR_FULL)
306 nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
307 if (status & NICVF_CQ_WR_DISABLE)
308 nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
309 if (status & NICVF_CQ_WR_FAULT)
310 nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
311 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
314 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
315 status = nicvf_queue_reg_read(
316 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
317 if (!(status & NICVF_SQ_ERR_MASK))
320 if (status & NICVF_SQ_ERR_STOPPED)
321 nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
322 if (status & NICVF_SQ_ERR_SEND)
323 nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
324 if (status & NICVF_SQ_ERR_DPE)
325 nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
326 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
329 for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
330 status = nicvf_queue_reg_read(nic,
331 NIC_QSET_RBDR_0_1_STATUS0, qidx);
332 status &= NICVF_RBDR_FIFO_STATE_MASK;
333 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
335 if (status == RBDR_FIFO_STATE_FAIL)
336 nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
337 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
340 nicvf_disable_all_interrupts(nic);
345 * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
346 * This function is not re-entrant.
347 * The caller should provide proper serialization.
350 nicvf_reg_poll_interrupts(struct nicvf *nic)
355 intr = nicvf_reg_read(nic, NIC_VF_INT);
356 if (intr & NICVF_INTR_MBOX_MASK) {
357 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
358 msg = nicvf_handle_mbx_intr(nic);
360 if (intr & NICVF_INTR_QS_ERR_MASK) {
361 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
362 nicvf_handle_qset_err_intr(nic);
368 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
369 uint32_t bit_pos, uint32_t bits, uint64_t val)
373 int timeout = NICVF_REG_POLL_ITER_NR;
375 bit_mask = (1ULL << bits) - 1;
376 bit_mask = (bit_mask << bit_pos);
379 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
380 if (((reg_val & bit_mask) >> bit_pos) == val)
382 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
385 return NICVF_ERR_REG_POLL;
389 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
392 int timeout = NICVF_REG_POLL_ITER_NR;
393 struct nicvf_rbdr *rbdr = nic->rbdr;
395 /* Save head and tail pointers for freeing up buffers */
397 rbdr->head = nicvf_queue_reg_read(nic,
398 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
399 rbdr->tail = nicvf_queue_reg_read(nic,
400 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
401 rbdr->next_tail = rbdr->tail;
405 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
409 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
410 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
412 return NICVF_ERR_RBDR_DISABLE;
415 status = nicvf_queue_reg_read(nic,
416 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
417 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
419 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
422 return NICVF_ERR_RBDR_PREFETCH;
425 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
427 if (nicvf_qset_poll_reg(nic, qidx,
428 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
429 return NICVF_ERR_RBDR_RESET1;
431 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
432 if (nicvf_qset_poll_reg(nic, qidx,
433 NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
434 return NICVF_ERR_RBDR_RESET2;
440 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
444 val = ((uint32_t)log2(len) - len_shift);
445 assert(val >= NICVF_QSIZE_MIN_VAL);
446 assert(val <= NICVF_QSIZE_MAX_VAL);
451 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
455 struct nicvf_rbdr *rbdr = nic->rbdr;
456 struct rbdr_cfg rbdr_cfg = {.value = 0};
458 ret = nicvf_qset_rbdr_reclaim(nic, qidx);
462 /* Set descriptor base address */
463 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
465 /* Enable RBDR & set queue size */
469 rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
471 rbdr_cfg.avg_con = 0;
472 rbdr_cfg.lines = rbdr->buffsz / 128;
474 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
476 /* Verify proper RBDR reset */
477 head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
478 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
481 return NICVF_ERR_RBDR_RESET;
487 nicvf_qsize_rbdr_roundup(uint32_t val)
489 uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
490 RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
491 RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
493 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
497 nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
498 rbdr_pool_get_handler handler,
499 void *opaque, uint32_t max_buffs)
501 struct rbdr_entry_t *desc, *desc0;
502 struct nicvf_rbdr *rbdr = nic->rbdr;
504 nicvf_phys_addr_t phy;
506 assert(rbdr != NULL);
509 /* Don't fill beyond max numbers of desc */
510 while (count < rbdr->qlen_mask) {
511 if (count >= max_buffs)
513 desc0 = desc + count;
514 phy = handler(opaque);
516 desc0->full_addr = phy;
523 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
524 rbdr->tail = nicvf_queue_reg_read(nic,
525 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
526 rbdr->next_tail = rbdr->tail;
532 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
534 return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
538 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
541 struct sq_cfg sq_cfg;
543 sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
545 /* Disable send queue */
546 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
548 /* Check if SQ is stopped */
549 if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
550 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
551 return NICVF_ERR_SQ_DISABLE;
553 /* Reset send queue */
554 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
555 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
556 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
558 return NICVF_ERR_SQ_RESET;
564 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
567 struct sq_cfg sq_cfg = {.value = 0};
569 ret = nicvf_qset_sq_reclaim(nic, qidx);
573 /* Send a mailbox msg to PF to config SQ */
574 if (nicvf_mbox_sq_config(nic, qidx))
575 return NICVF_ERR_SQ_PF_CFG;
577 /* Set queue base address */
578 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
580 /* Enable send queue & set queue size */
584 sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
585 sq_cfg.tstmp_bgx_intf = 0;
586 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
588 /* Ring doorbell so that H/W restarts processing SQEs */
589 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
595 nicvf_qsize_sq_roundup(uint32_t val)
597 uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
598 SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
599 SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
601 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
605 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
607 /* Disable receive queue */
608 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
609 return nicvf_mbox_rq_sync(nic);
613 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
615 struct pf_rq_cfg pf_rq_cfg = {.value = 0};
616 struct rq_cfg rq_cfg = {.value = 0};
618 if (nicvf_qset_rq_reclaim(nic, qidx))
619 return NICVF_ERR_RQ_CLAIM;
621 pf_rq_cfg.strip_pre_l2 = 0;
622 /* First cache line of RBDR data will be allocated into L2C */
623 pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
624 pf_rq_cfg.cq_qs = nic->vf_id;
625 pf_rq_cfg.cq_idx = qidx;
626 pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
627 pf_rq_cfg.rbdr_cont_idx = 0;
628 pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
629 pf_rq_cfg.rbdr_strt_idx = 0;
631 /* Send a mailbox msg to PF to config RQ */
632 if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
633 return NICVF_ERR_RQ_PF_CFG;
635 /* Select Rx backpressure */
636 if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
637 return NICVF_ERR_RQ_BP_CFG;
639 /* Send a mailbox msg to PF to config RQ drop */
640 if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
641 return NICVF_ERR_RQ_DROP_CFG;
643 /* Enable Receive queue */
645 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
651 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
655 /* Disable completion queue */
656 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
657 if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
658 return NICVF_ERR_CQ_DISABLE;
660 /* Reset completion queue */
661 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
662 tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
663 head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
665 return NICVF_ERR_CQ_RESET;
667 /* Disable timer threshold (doesn't get reset upon CQ reset) */
668 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
673 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
676 struct cq_cfg cq_cfg = {.value = 0};
678 ret = nicvf_qset_cq_reclaim(nic, qidx);
682 /* Set completion queue base address */
683 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
687 /* Writes of CQE will be allocated into L2C */
689 cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
691 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
693 /* Set threshold value for interrupt generation */
694 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
695 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
700 nicvf_qsize_cq_roundup(uint32_t val)
702 uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
703 CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
704 CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
706 return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
711 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
715 val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
717 val |= (STRIP_FIRST_VLAN << 25);
719 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
721 nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
725 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
729 uint64_t *keyptr = (uint64_t *)key;
731 addr = NIC_VNIC_RSS_KEY_0_4;
732 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
733 val = nicvf_cpu_to_be_64(*keyptr);
734 nicvf_reg_write(nic, addr, val);
735 addr += sizeof(uint64_t);
741 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
745 uint64_t *keyptr = (uint64_t *)key;
747 addr = NIC_VNIC_RSS_KEY_0_4;
748 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
749 val = nicvf_reg_read(nic, addr);
750 *keyptr = nicvf_be_to_cpu_64(val);
751 addr += sizeof(uint64_t);
757 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
759 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
763 nicvf_rss_get_cfg(struct nicvf *nic)
765 return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
769 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
772 struct nicvf_rss_reta_info *rss = &nic->rss_info;
774 /* result will be stored in nic->rss_info.rss_size */
775 if (nicvf_mbox_get_rss_size(nic))
776 return NICVF_ERR_RSS_GET_SZ;
778 assert(rss->rss_size > 0);
779 rss->hash_bits = (uint8_t)log2(rss->rss_size);
780 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
781 rss->ind_tbl[idx] = tbl[idx];
783 if (nicvf_mbox_config_rss(nic))
784 return NICVF_ERR_RSS_TBL_UPDATE;
790 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
793 struct nicvf_rss_reta_info *rss = &nic->rss_info;
795 /* result will be stored in nic->rss_info.rss_size */
796 if (nicvf_mbox_get_rss_size(nic))
797 return NICVF_ERR_RSS_GET_SZ;
799 assert(rss->rss_size > 0);
800 rss->hash_bits = (uint8_t)log2(rss->rss_size);
801 for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
802 tbl[idx] = rss->ind_tbl[idx];
808 nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg)
811 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
812 uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
813 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
814 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
815 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
816 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
817 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
820 if (nic->cpi_alg != CPI_ALG_NONE)
826 /* Update default RSS key and cfg */
827 nicvf_rss_set_key(nic, default_key);
828 nicvf_rss_set_cfg(nic, cfg);
830 /* Update default RSS RETA */
831 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
832 default_reta[idx] = idx % qcnt;
834 return nicvf_rss_reta_update(nic, default_reta,
835 NIC_MAX_RSS_IDR_TBL_SIZE);
839 nicvf_rss_term(struct nicvf *nic)
842 uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
844 nicvf_rss_set_cfg(nic, 0);
845 /* Redirect the output to 0th queue */
846 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
847 disable_rss[idx] = 0;
849 return nicvf_rss_reta_update(nic, disable_rss,
850 NIC_MAX_RSS_IDR_TBL_SIZE);
854 nicvf_loopback_config(struct nicvf *nic, bool enable)
856 if (enable && nic->loopback_supported == 0)
857 return NICVF_ERR_LOOPBACK_CFG;
859 return nicvf_mbox_loopback_config(nic, enable);
863 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
865 stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
866 stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
867 stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
868 stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
869 stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
870 stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
871 stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
872 stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
873 stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
874 stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
875 stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
876 stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
877 stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
878 stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
880 stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
881 stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
882 stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
883 stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
884 stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
888 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
892 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
893 qstats->q_rx_packets =
894 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
898 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
902 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
903 qstats->q_tx_packets =
904 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);