1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 #include "hinic_compat.h"
7 #include "hinic_pmd_hwdev.h"
8 #include "hinic_pmd_hwif.h"
9 #include "hinic_pmd_mgmt.h"
10 #include "hinic_pmd_eqs.h"
12 #define AEQ_CTRL_0_INTR_IDX_SHIFT 0
13 #define AEQ_CTRL_0_DMA_ATTR_SHIFT 12
14 #define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
15 #define AEQ_CTRL_0_INTR_MODE_SHIFT 31
17 #define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
18 #define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
19 #define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
20 #define AEQ_CTRL_0_INTR_MODE_MASK 0x1U
22 #define AEQ_CTRL_0_SET(val, member) \
23 (((val) & AEQ_CTRL_0_##member##_MASK) << \
24 AEQ_CTRL_0_##member##_SHIFT)
26 #define AEQ_CTRL_0_CLEAR(val, member) \
27 ((val) & (~(AEQ_CTRL_0_##member##_MASK \
28 << AEQ_CTRL_0_##member##_SHIFT)))
30 #define AEQ_CTRL_1_LEN_SHIFT 0
31 #define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
32 #define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
34 #define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU
35 #define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U
36 #define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
38 #define AEQ_CTRL_1_SET(val, member) \
39 (((val) & AEQ_CTRL_1_##member##_MASK) << \
40 AEQ_CTRL_1_##member##_SHIFT)
42 #define AEQ_CTRL_1_CLEAR(val, member) \
43 ((val) & (~(AEQ_CTRL_1_##member##_MASK \
44 << AEQ_CTRL_1_##member##_SHIFT)))
46 #define CEQ_CTRL_0_INTR_IDX_SHIFT 0
47 #define CEQ_CTRL_0_DMA_ATTR_SHIFT 12
48 #define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20
49 #define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
50 #define CEQ_CTRL_0_INTR_MODE_SHIFT 31
52 #define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
53 #define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
54 #define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU
55 #define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
56 #define CEQ_CTRL_0_INTR_MODE_MASK 0x1U
58 #define CEQ_CTRL_0_SET(val, member) \
59 (((val) & CEQ_CTRL_0_##member##_MASK) << \
60 CEQ_CTRL_0_##member##_SHIFT)
62 #define CEQ_CTRL_1_LEN_SHIFT 0
63 #define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
65 #define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU
66 #define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
68 #define CEQ_CTRL_1_SET(val, member) \
69 (((val) & CEQ_CTRL_1_##member##_MASK) << \
70 CEQ_CTRL_1_##member##_SHIFT)
72 #define EQ_CONS_IDX_CONS_IDX_SHIFT 0
73 #define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24
74 #define EQ_CONS_IDX_INT_ARMED_SHIFT 31
76 #define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU
77 #define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU
78 #define EQ_CONS_IDX_INT_ARMED_MASK 0x1U
80 #define EQ_CONS_IDX_SET(val, member) \
81 (((val) & EQ_CONS_IDX_##member##_MASK) << \
82 EQ_CONS_IDX_##member##_SHIFT)
84 #define EQ_CONS_IDX_CLEAR(val, member) \
85 ((val) & (~(EQ_CONS_IDX_##member##_MASK \
86 << EQ_CONS_IDX_##member##_SHIFT)))
88 #define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT)
90 #define EQ_CONS_IDX(eq) ((eq)->cons_idx | \
91 ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
93 #define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
94 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) :\
95 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
97 #define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
98 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) :\
99 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
101 #define GET_EQ_NUM_PAGES(eq, size) \
102 ((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \
105 #define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size)
107 #define PAGE_IN_4K(page_size) ((page_size) >> 12)
108 #define EQ_SET_HW_PAGE_SIZE_VAL(eq) ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
110 #define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
111 #define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
113 #define AEQ_DMA_ATTR_DEFAULT 0
114 #define CEQ_DMA_ATTR_DEFAULT 0
116 #define CEQ_LMT_KICK_DEFAULT 0
118 #define EQ_WRAPPED_SHIFT 20
120 #define EQ_VALID_SHIFT 31
122 #define aeq_to_aeqs(eq) \
123 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
125 static u8 eq_cons_idx_checksum_set(u32 val)
130 for (idx = 0; idx < 32; idx += 4)
131 checksum ^= ((val >> idx) & 0xF);
133 return (checksum & 0xF);
137 * set_eq_cons_idx - write the cons idx to the hw
138 * @eq: The event queue to update the cons idx for
139 * @arm_state: indicate whether report interrupts when generate eq element
141 static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)
143 u32 eq_cons_idx, eq_wrap_ci, val;
144 u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
146 eq_wrap_ci = EQ_CONS_IDX(eq);
148 /* Read Modify Write */
149 val = hinic_hwif_read_reg(eq->hwdev->hwif, addr);
151 val = EQ_CONS_IDX_CLEAR(val, CONS_IDX) &
152 EQ_CONS_IDX_CLEAR(val, INT_ARMED) &
153 EQ_CONS_IDX_CLEAR(val, XOR_CHKSUM);
155 /* Just aeq0 use int_arm mode for pmd drv to recv
156 * asyn event&mbox recv data
159 eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
160 EQ_CONS_IDX_SET(arm_state, INT_ARMED);
162 eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
163 EQ_CONS_IDX_SET(HINIC_EQ_NOT_ARMED, INT_ARMED);
167 val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
169 hinic_hwif_write_reg(eq->hwdev->hwif, addr, val);
173 * eq_update_ci - update the cons idx of event queue
174 * @eq: the event queue to update the cons idx for
176 void eq_update_ci(struct hinic_eq *eq)
178 set_eq_cons_idx(eq, HINIC_EQ_ARMED);
181 struct hinic_ceq_ctrl_reg {
182 struct hinic_mgmt_msg_head mgmt_msg_head;
190 static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id,
191 u32 ctrl0, u32 ctrl1)
193 struct hinic_ceq_ctrl_reg ceq_ctrl;
194 u16 in_size = sizeof(ceq_ctrl);
196 memset(&ceq_ctrl, 0, in_size);
197 ceq_ctrl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
198 ceq_ctrl.func_id = hinic_global_func_id(hwdev);
199 ceq_ctrl.q_id = q_id;
200 ceq_ctrl.ctrl0 = ctrl0;
201 ceq_ctrl.ctrl1 = ctrl1;
203 return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
204 HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP,
205 &ceq_ctrl, in_size, NULL, NULL, 0);
209 * set_eq_ctrls - setting eq's ctrls registers
210 * @eq: the event queue for setting
212 static int set_eq_ctrls(struct hinic_eq *eq)
214 enum hinic_eq_type type = eq->type;
215 struct hinic_hwif *hwif = eq->hwdev->hwif;
216 struct irq_info *eq_irq = &eq->eq_irq;
217 u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
218 u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);
221 if (type == HINIC_AEQ) {
223 addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
225 val = hinic_hwif_read_reg(hwif, addr);
227 val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
228 AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
229 AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
230 AEQ_CTRL_0_CLEAR(val, INTR_MODE);
232 ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
233 AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
234 AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
235 AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
239 hinic_hwif_write_reg(hwif, addr, val);
242 addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
244 page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
245 elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
247 ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
248 AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
249 AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
251 hinic_hwif_write_reg(hwif, addr, ctrl1);
253 ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
254 CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
255 CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) |
256 CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
257 CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
259 page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
261 ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) |
262 CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
264 /* set ceq ctrl reg through mgmt cpu */
265 ret = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1);
272 * ceq_elements_init - Initialize all the elements in the ceq
273 * @eq: the event queue
274 * @init_val: value to init with it the elements
276 static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
281 for (i = 0; i < eq->eq_len; i++) {
282 ceqe = GET_CEQ_ELEM(eq, i);
283 *(ceqe) = cpu_to_be32(init_val);
286 rte_wmb(); /* Write the init values */
290 * aeq_elements_init - initialize all the elements in the aeq
291 * @eq: the event queue
292 * @init_val: value to init with it the elements
294 static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
296 struct hinic_aeq_elem *aeqe;
299 for (i = 0; i < eq->eq_len; i++) {
300 aeqe = GET_AEQ_ELEM(eq, i);
301 aeqe->desc = cpu_to_be32(init_val);
304 rte_wmb(); /* Write the init values */
308 * alloc_eq_pages - allocate the pages for the queue
309 * @eq: the event queue
311 static int alloc_eq_pages(struct hinic_eq *eq)
313 struct hinic_hwif *hwif = eq->hwdev->hwif;
315 u64 dma_addr_size, virt_addr_size;
319 dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
320 virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
322 eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
324 PMD_DRV_LOG(ERR, "Allocate dma addr array failed");
328 eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
329 if (!eq->virt_addr) {
330 PMD_DRV_LOG(ERR, "Allocate virt addr array failed");
332 goto virt_addr_alloc_err;
335 for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
336 eq->virt_addr[pg_num] =
337 (u8 *)dma_zalloc_coherent_aligned(eq->hwdev,
338 eq->page_size, &eq->dma_addr[pg_num],
340 if (!eq->virt_addr[pg_num]) {
345 hinic_hwif_write_reg(hwif,
346 HINIC_EQ_HI_PHYS_ADDR_REG(eq->type,
348 upper_32_bits(eq->dma_addr[pg_num]));
350 hinic_hwif_write_reg(hwif,
351 HINIC_EQ_LO_PHYS_ADDR_REG(eq->type,
353 lower_32_bits(eq->dma_addr[pg_num]));
356 init_val = EQ_WRAPPED(eq);
358 if (eq->type == HINIC_AEQ)
359 aeq_elements_init(eq, init_val);
361 ceq_elements_init(eq, init_val);
366 for (i = 0; i < pg_num; i++)
367 dma_free_coherent(eq->hwdev, eq->page_size,
368 eq->virt_addr[i], eq->dma_addr[i]);
376 * free_eq_pages - free the pages of the queue
377 * @eq: the event queue
379 static void free_eq_pages(struct hinic_eq *eq)
381 struct hinic_hwdev *hwdev = eq->hwdev;
384 for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
385 dma_free_coherent(hwdev, eq->page_size,
386 eq->virt_addr[pg_num],
387 eq->dma_addr[pg_num]);
389 kfree(eq->virt_addr);
393 #define MSIX_ENTRY_IDX_0 (0)
396 * init_eq - initialize eq
397 * @eq: the event queue
398 * @hwdev: the pointer to the private hardware device object
399 * @q_id: Queue id number
400 * @q_len: the number of EQ elements
401 * @type: the type of the event queue, ceq or aeq
402 * @page_size: the page size of the event queue
403 * @entry: msix entry associated with the event queue
404 * Return: 0 - Success, Negative - failure
406 static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,
407 u16 q_len, enum hinic_eq_type type, u32 page_size,
408 __rte_unused struct irq_info *entry)
415 eq->page_size = page_size;
418 /* clear eq_len to force eqe drop in hardware */
419 if (eq->type == HINIC_AEQ) {
420 hinic_hwif_write_reg(eq->hwdev->hwif,
421 HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
423 err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
425 PMD_DRV_LOG(ERR, "Set ceq control registers ctrl0[0] ctrl1[0] failed");
433 eq->elem_size = (type == HINIC_AEQ) ?
434 HINIC_AEQE_SIZE : HINIC_CEQE_SIZE;
435 eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
436 eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size);
438 if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
439 PMD_DRV_LOG(ERR, "Number element in eq page is not power of 2");
443 if (eq->num_pages > HINIC_EQ_MAX_PAGES) {
444 PMD_DRV_LOG(ERR, "Too many pages for eq, num_pages: %d",
449 err = alloc_eq_pages(eq);
451 PMD_DRV_LOG(ERR, "Allocate pages for eq failed");
455 /* pmd use MSIX_ENTRY_IDX_0*/
456 eq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0;
458 err = set_eq_ctrls(eq);
460 PMD_DRV_LOG(ERR, "Init eq control registers failed");
461 goto init_eq_ctrls_err;
464 hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
465 set_eq_cons_idx(eq, HINIC_EQ_ARMED);
468 hinic_set_msix_state(hwdev, 0, HINIC_MSIX_ENABLE);
470 eq->poll_retry_nr = HINIC_RETRY_NUM;
481 * remove_eq - remove eq
482 * @eq: the event queue
484 static void remove_eq(struct hinic_eq *eq)
486 struct irq_info *entry = &eq->eq_irq;
488 if (eq->type == HINIC_AEQ) {
490 hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,
493 /* clear eq_len to avoid hw access host memory */
494 hinic_hwif_write_reg(eq->hwdev->hwif,
495 HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
497 (void)set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
500 /* update cons_idx to avoid invalid interrupt */
501 eq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif,
502 EQ_PROD_IDX_REG_ADDR(eq));
503 set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
509 * hinic_aeqs_init - init all the aeqs
510 * @hwdev: the pointer to the private hardware device object
511 * @num_aeqs: number of aeq
512 * @msix_entries: msix entries associated with the event queues
513 * Return: 0 - Success, Negative - failure
516 hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
517 struct irq_info *msix_entries)
519 struct hinic_aeqs *aeqs;
523 aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
529 aeqs->num_aeqs = num_aeqs;
531 for (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) {
532 err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
533 HINIC_DEFAULT_AEQ_LEN, HINIC_AEQ,
534 HINIC_EQ_PAGE_SIZE, &msix_entries[q_id]);
536 PMD_DRV_LOG(ERR, "Init aeq %d failed", q_id);
544 for (i = 0; i < q_id; i++)
545 remove_eq(&aeqs->aeq[i]);
553 * hinic_aeqs_free - free all the aeqs
554 * @hwdev: the pointer to the private hardware device object
556 static void hinic_aeqs_free(struct hinic_hwdev *hwdev)
558 struct hinic_aeqs *aeqs = hwdev->aeqs;
561 /* hinic pmd use aeq[1~3], aeq[0] used in kernel only */
562 for (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++)
563 remove_eq(&aeqs->aeq[q_id]);
568 void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
574 for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
575 eq = &hwdev->aeqs->aeq[q_id];
576 addr = EQ_CONS_IDX_REG_ADDR(eq);
577 ci = hinic_hwif_read_reg(hwdev->hwif, addr);
578 addr = EQ_PROD_IDX_REG_ADDR(eq);
579 pi = hinic_hwif_read_reg(hwdev->hwif, addr);
580 PMD_DRV_LOG(ERR, "aeq id: %d, ci: 0x%x, pi: 0x%x",
585 int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev)
589 struct irq_info aeq_irqs[HINIC_MAX_AEQS];
591 num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
592 if (num_aeqs < HINIC_MAX_AEQS) {
593 PMD_DRV_LOG(ERR, "Warning: PMD need %d AEQs, Chip have %d",
594 HINIC_MAX_AEQS, num_aeqs);
598 memset(aeq_irqs, 0, sizeof(aeq_irqs));
599 rc = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs);
601 PMD_DRV_LOG(ERR, "Initialize aeqs failed, rc: %d", rc);
606 void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev)
608 hinic_aeqs_free(hwdev);