1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 #include "hinic_compat.h"
6 #include "hinic_pmd_hwdev.h"
7 #include "hinic_pmd_hwif.h"
8 #include "hinic_pmd_wq.h"
9 #include "hinic_pmd_mgmt.h"
10 #include "hinic_pmd_mbox.h"
11 #include "hinic_pmd_cmdq.h"
13 #define CMDQ_CMD_TIMEOUT 5000 /* millisecond */
15 #define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
16 #define LOWER_8_BITS(data) ((data) & 0xFF)
18 #define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
19 #define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23
20 #define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
21 #define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27
23 #define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU
24 #define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U
25 #define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U
26 #define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU
28 #define CMDQ_DB_INFO_SET(val, member) \
29 (((val) & CMDQ_DB_INFO_##member##_MASK) << \
30 CMDQ_DB_INFO_##member##_SHIFT)
32 #define CMDQ_CTRL_PI_SHIFT 0
33 #define CMDQ_CTRL_CMD_SHIFT 16
34 #define CMDQ_CTRL_MOD_SHIFT 24
35 #define CMDQ_CTRL_ACK_TYPE_SHIFT 29
36 #define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
38 #define CMDQ_CTRL_PI_MASK 0xFFFFU
39 #define CMDQ_CTRL_CMD_MASK 0xFFU
40 #define CMDQ_CTRL_MOD_MASK 0x1FU
41 #define CMDQ_CTRL_ACK_TYPE_MASK 0x3U
42 #define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U
44 #define CMDQ_CTRL_SET(val, member) \
45 (((val) & CMDQ_CTRL_##member##_MASK) << CMDQ_CTRL_##member##_SHIFT)
47 #define CMDQ_CTRL_GET(val, member) \
48 (((val) >> CMDQ_CTRL_##member##_SHIFT) & CMDQ_CTRL_##member##_MASK)
50 #define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
51 #define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
52 #define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
53 #define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
54 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
55 #define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
56 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31
58 #define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU
59 #define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U
60 #define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U
61 #define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U
62 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U
63 #define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U
64 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U
66 #define CMDQ_WQE_HEADER_SET(val, member) \
67 (((val) & CMDQ_WQE_HEADER_##member##_MASK) << \
68 CMDQ_WQE_HEADER_##member##_SHIFT)
70 #define CMDQ_WQE_HEADER_GET(val, member) \
71 (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \
72 CMDQ_WQE_HEADER_##member##_MASK)
74 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
75 #define CMDQ_CTXT_EQ_ID_SHIFT 56
76 #define CMDQ_CTXT_CEQ_ARM_SHIFT 61
77 #define CMDQ_CTXT_CEQ_EN_SHIFT 62
78 #define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63
80 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
81 #define CMDQ_CTXT_EQ_ID_MASK 0x1F
82 #define CMDQ_CTXT_CEQ_ARM_MASK 0x1
83 #define CMDQ_CTXT_CEQ_EN_MASK 0x1
84 #define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1
86 #define CMDQ_CTXT_PAGE_INFO_SET(val, member) \
87 (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
89 #define CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
90 ((val) & (~((u64)CMDQ_CTXT_##member##_MASK << \
91 CMDQ_CTXT_##member##_SHIFT)))
93 #define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
94 #define CMDQ_CTXT_CI_SHIFT 52
96 #define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
97 #define CMDQ_CTXT_CI_MASK 0xFFF
99 #define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
100 (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
102 #define SAVED_DATA_ARM_SHIFT 31
104 #define SAVED_DATA_ARM_MASK 0x1U
106 #define SAVED_DATA_SET(val, member) \
107 (((val) & SAVED_DATA_##member##_MASK) << SAVED_DATA_##member##_SHIFT)
109 #define SAVED_DATA_CLEAR(val, member) \
110 ((val) & (~(SAVED_DATA_##member##_MASK << SAVED_DATA_##member##_SHIFT)))
112 #define WQE_ERRCODE_VAL_SHIFT 20
114 #define WQE_ERRCODE_VAL_MASK 0xF
116 #define WQE_ERRCODE_GET(val, member) \
117 (((val) >> WQE_ERRCODE_##member##_SHIFT) & WQE_ERRCODE_##member##_MASK)
119 #define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
121 #define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
123 #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
125 #define CMDQ_DB_ADDR(db_base, pi) \
126 (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
128 #define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
130 #define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
132 #define WQE_LCMD_SIZE 64
133 #define WQE_SCMD_SIZE 64
135 #define COMPLETE_LEN 3
137 #define CMDQ_WQEBB_SIZE 64
138 #define CMDQ_WQEBB_SHIFT 6
140 #define CMDQ_WQE_SIZE 64
142 #define HINIC_CMDQ_WQ_BUF_SIZE 4096
144 #define WQE_NUM_WQEBBS(wqe_size, wq) \
145 ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
147 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
148 struct hinic_cmdqs, cmdq[0])
150 #define WAIT_CMDQ_ENABLE_TIMEOUT 300
153 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
154 struct hinic_cmdq_ctxt *cmdq_ctxt);
155 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
157 bool hinic_cmdq_idle(struct hinic_cmdq *cmdq)
159 struct hinic_wq *wq = cmdq->wq;
161 return ((wq->delta) == wq->q_depth ? true : false);
164 struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev)
166 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
167 struct hinic_cmd_buf *cmd_buf;
169 cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_KERNEL);
171 PMD_DRV_LOG(ERR, "Allocate cmd buffer failed");
175 cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, &cmd_buf->dma_addr);
177 PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
178 goto alloc_pci_buf_err;
188 void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf)
190 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
192 pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
196 static u32 cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
202 wqe_size = WQE_LCMD_SIZE;
205 wqe_size = WQE_SCMD_SIZE;
212 static int cmdq_get_wqe_size(enum bufdesc_len len)
217 case BUFDESC_LCMD_LEN:
218 wqe_size = WQE_LCMD_SIZE;
220 case BUFDESC_SCMD_LEN:
221 wqe_size = WQE_SCMD_SIZE;
228 static void cmdq_set_completion(struct hinic_cmdq_completion *complete,
229 struct hinic_cmd_buf *buf_out)
231 struct hinic_sge_resp *sge_resp = &complete->sge_resp;
233 hinic_set_sge(&sge_resp->sge, buf_out->dma_addr,
234 HINIC_CMDQ_BUF_SIZE);
237 static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe,
238 struct hinic_cmd_buf *buf_in)
240 hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
243 static void cmdq_fill_db(struct hinic_cmdq_db *db,
244 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
246 db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
247 CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
248 CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
249 CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
252 static void cmdq_set_db(struct hinic_cmdq *cmdq,
253 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
255 struct hinic_cmdq_db db;
257 cmdq_fill_db(&db, cmdq_type, prod_idx);
259 /* The data that is written to HW should be in Big Endian Format */
260 db.db_info = cpu_to_be32(db.db_info);
262 rte_wmb(); /* write all before the doorbell */
264 writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
267 static void cmdq_wqe_fill(void *dst, void *src)
269 memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
270 (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
271 CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
273 rte_wmb();/* The first 8 bytes should be written last */
275 *(u64 *)dst = *(u64 *)src;
278 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
279 enum hinic_ack_type ack_type,
280 enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
281 enum completion_format complete_format,
282 enum data_format local_data_format,
283 enum bufdesc_len buf_len)
285 struct hinic_ctrl *ctrl;
286 enum ctrl_sect_len ctrl_len;
287 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
288 struct hinic_cmdq_wqe_scmd *wqe_scmd;
289 u32 saved_data = WQE_HEADER(wqe)->saved_data;
291 if (local_data_format == DATA_SGE) {
292 wqe_lcmd = &wqe->wqe_lcmd;
294 wqe_lcmd->status.status_info = 0;
295 ctrl = &wqe_lcmd->ctrl;
296 ctrl_len = CTRL_SECT_LEN;
298 wqe_scmd = &wqe->inline_wqe.wqe_scmd;
300 wqe_scmd->status.status_info = 0;
301 ctrl = &wqe_scmd->ctrl;
302 ctrl_len = CTRL_DIRECT_SECT_LEN;
305 ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) |
306 CMDQ_CTRL_SET(cmd, CMD) |
307 CMDQ_CTRL_SET(mod, MOD) |
308 CMDQ_CTRL_SET(ack_type, ACK_TYPE);
310 WQE_HEADER(wqe)->header_info =
311 CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
312 CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
313 CMDQ_WQE_HEADER_SET(local_data_format, DATA_FMT) |
314 CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
315 CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
316 CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
317 CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
319 if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) {
320 saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
321 WQE_HEADER(wqe)->saved_data = saved_data |
322 SAVED_DATA_SET(1, ARM);
324 saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
325 WQE_HEADER(wqe)->saved_data = saved_data;
329 static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
330 enum cmdq_cmd_type cmd_type,
331 struct hinic_cmd_buf *buf_in,
332 struct hinic_cmd_buf *buf_out, int wrapped,
333 enum hinic_ack_type ack_type,
334 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
336 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
337 enum completion_format complete_format = COMPLETE_DIRECT;
340 case SYNC_CMD_SGE_RESP:
342 complete_format = COMPLETE_SGE;
343 cmdq_set_completion(&wqe_lcmd->completion, buf_out);
346 case SYNC_CMD_DIRECT_RESP:
347 complete_format = COMPLETE_DIRECT;
348 wqe_lcmd->completion.direct_resp = 0;
351 complete_format = COMPLETE_DIRECT;
352 wqe_lcmd->completion.direct_resp = 0;
354 wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
358 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
359 prod_idx, complete_format, DATA_SGE,
362 cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
365 static int cmdq_params_valid(struct hinic_cmd_buf *buf_in)
367 if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
368 PMD_DRV_LOG(ERR, "Invalid CMDQ buffer size");
375 static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs)
379 end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
381 if (cmdqs->status & HINIC_CMDQ_ENABLE)
384 } while (time_before(jiffies, end));
389 static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
392 cmdq->errcode[prod_idx] = errcode;
395 static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
396 struct hinic_cmdq_wqe *wqe)
398 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
399 struct hinic_cmdq_inline_wqe *inline_wqe;
400 struct hinic_cmdq_wqe_scmd *wqe_scmd;
401 struct hinic_ctrl *ctrl;
402 u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
403 int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
404 int wqe_size = cmdq_get_wqe_size(buf_len);
407 if (wqe_size == WQE_LCMD_SIZE) {
408 wqe_lcmd = &wqe->wqe_lcmd;
409 ctrl = &wqe_lcmd->ctrl;
411 inline_wqe = &wqe->inline_wqe;
412 wqe_scmd = &inline_wqe->wqe_scmd;
413 ctrl = &wqe_scmd->ctrl;
416 /* clear HW busy bit */
419 rte_wmb(); /* verify wqe is clear */
421 num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
422 hinic_put_wqe(cmdq->wq, num_wqebbs);
425 static int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
427 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
428 struct hinic_cmdq_ctxt *cmdq_ctxt;
429 struct hinic_cmdq_ctxt cmdq_ctxt_out;
430 enum hinic_cmdq_type cmdq_type;
431 u16 out_size = sizeof(cmdq_ctxt_out);
435 cmdq_type = HINIC_CMDQ_SYNC;
436 memset(&cmdq_ctxt_out, 0, out_size);
437 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
438 cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
439 cmdq_ctxt->resp_aeq_num = HINIC_AEQ1;
440 in_size = sizeof(*cmdq_ctxt);
441 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
442 HINIC_MGMT_CMD_CMDQ_CTXT_SET,
443 cmdq_ctxt, in_size, &cmdq_ctxt_out,
445 if (err || !out_size || cmdq_ctxt_out.status) {
446 if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW ||
447 err == HINIC_DEV_BUSY_ACTIVE_FW) {
448 cmdqs->status |= HINIC_CMDQ_SET_FAIL;
449 PMD_DRV_LOG(ERR, "PF or VF fw is hot active");
451 PMD_DRV_LOG(ERR, "Set cmdq ctxt failed, err: %d, status: 0x%x, out_size: 0x%x",
452 err, cmdq_ctxt_out.status, out_size);
457 cmdqs->status &= ~HINIC_CMDQ_SET_FAIL;
458 cmdqs->status |= HINIC_CMDQ_ENABLE;
463 void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev)
465 hinic_cmdqs_free(hwdev);
468 int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
470 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
471 enum hinic_cmdq_type cmdq_type;
473 cmdq_type = HINIC_CMDQ_SYNC;
474 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
475 cmdqs->cmdq[cmdq_type].wrapped = 1;
476 hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
479 return hinic_set_cmdq_ctxts(hwdev);
482 static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev,
483 struct hinic_wq *wq, enum hinic_cmdq_type q_type)
485 void __iomem *db_base;
488 size_t cmd_infos_size;
491 cmdq->cmdq_type = q_type;
494 spin_lock_init(&cmdq->cmdq_lock);
496 errcode_size = wq->q_depth * sizeof(*cmdq->errcode);
497 cmdq->errcode = kzalloc(errcode_size, GFP_KERNEL);
498 if (!cmdq->errcode) {
499 PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
500 spin_lock_deinit(&cmdq->cmdq_lock);
504 cmd_infos_size = wq->q_depth * sizeof(*cmdq->cmd_infos);
505 cmdq->cmd_infos = kzalloc(cmd_infos_size, GFP_KERNEL);
506 if (!cmdq->cmd_infos) {
507 PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
512 err = hinic_alloc_db_addr(hwdev, &db_base);
516 cmdq->db_base = (u8 *)db_base;
520 kfree(cmdq->cmd_infos);
523 kfree(cmdq->errcode);
524 spin_lock_deinit(&cmdq->cmdq_lock);
529 static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq)
531 hinic_free_db_addr(hwdev, cmdq->db_base);
532 kfree(cmdq->cmd_infos);
533 kfree(cmdq->errcode);
534 spin_lock_deinit(&cmdq->cmdq_lock);
537 static int hinic_cmdqs_init(struct hinic_hwdev *hwdev)
539 struct hinic_cmdqs *cmdqs;
540 struct hinic_cmdq_ctxt *cmdq_ctxt;
541 enum hinic_cmdq_type type, cmdq_type;
542 size_t saved_wqs_size;
545 cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
549 hwdev->cmdqs = cmdqs;
550 cmdqs->hwdev = hwdev;
552 saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
553 cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
554 if (!cmdqs->saved_wqs) {
555 PMD_DRV_LOG(ERR, "Allocate saved wqs failed");
560 cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev,
562 HINIC_CMDQ_BUF_SIZE, 0ULL);
563 if (!cmdqs->cmd_buf_pool) {
564 PMD_DRV_LOG(ERR, "Create cmdq buffer pool failed");
566 goto pool_create_err;
569 err = hinic_cmdq_alloc(cmdqs->saved_wqs, hwdev,
570 HINIC_MAX_CMDQ_TYPES, HINIC_CMDQ_WQ_BUF_SIZE,
571 CMDQ_WQEBB_SHIFT, HINIC_CMDQ_DEPTH);
573 PMD_DRV_LOG(ERR, "Allocate cmdq failed");
577 cmdq_type = HINIC_CMDQ_SYNC;
578 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
579 err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
580 &cmdqs->saved_wqs[cmdq_type], cmdq_type);
582 PMD_DRV_LOG(ERR, "Initialize cmdq failed");
586 cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
587 cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], cmdq_ctxt);
590 err = hinic_set_cmdq_ctxts(hwdev);
597 type = HINIC_CMDQ_SYNC;
598 for ( ; type < cmdq_type; type++)
599 free_cmdq(hwdev, &cmdqs->cmdq[type]);
601 hinic_cmdq_free(hwdev, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES);
604 dma_pool_destroy(cmdqs->cmd_buf_pool);
607 kfree(cmdqs->saved_wqs);
615 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev)
617 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
618 enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
620 cmdqs->status &= ~HINIC_CMDQ_ENABLE;
622 for ( ; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
623 free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
625 hinic_cmdq_free(hwdev, cmdqs->saved_wqs,
626 HINIC_MAX_CMDQ_TYPES);
628 dma_pool_destroy(cmdqs->cmd_buf_pool);
630 kfree(cmdqs->saved_wqs);
635 static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
637 struct hinic_root_ctxt root_ctxt;
638 u16 out_size = sizeof(root_ctxt);
641 memset(&root_ctxt, 0, sizeof(root_ctxt));
642 root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
643 root_ctxt.func_idx = hinic_global_func_id(hwdev);
644 root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
645 root_ctxt.set_cmdq_depth = 1;
646 root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
647 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
648 HINIC_MGMT_CMD_VAT_SET,
649 &root_ctxt, sizeof(root_ctxt),
650 &root_ctxt, &out_size, 0);
651 if (err || !out_size || root_ctxt.mgmt_msg_head.status) {
652 PMD_DRV_LOG(ERR, "Set cmdq depth failed, err: %d, status: 0x%x, out_size: 0x%x",
653 err, root_ctxt.mgmt_msg_head.status, out_size);
660 int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
664 err = hinic_cmdqs_init(hwdev);
666 PMD_DRV_LOG(ERR, "Init cmd queues failed");
670 err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH);
672 PMD_DRV_LOG(ERR, "Set cmdq depth failed");
673 goto set_cmdq_depth_err;
679 hinic_cmdqs_free(hwdev);
684 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
685 struct hinic_cmdq_ctxt *cmdq_ctxt)
687 struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)cmdq_to_cmdqs(cmdq);
688 struct hinic_hwdev *hwdev = cmdqs->hwdev;
689 struct hinic_wq *wq = cmdq->wq;
690 struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
691 u64 wq_first_page_paddr, pfn;
693 u16 start_ci = (u16)(wq->cons_idx);
695 /* The data in the HW is in Big Endian Format */
696 wq_first_page_paddr = wq->queue_buf_paddr;
698 pfn = CMDQ_PFN(wq_first_page_paddr, HINIC_PAGE_SIZE);
699 ctxt_info->curr_wqe_page_pfn =
700 CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
701 CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
702 CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM) |
703 CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
704 CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
706 ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
707 CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
708 cmdq_ctxt->func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
709 cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
710 cmdq_ctxt->cmdq_id = cmdq->cmdq_type;
713 static int hinic_cmdq_poll_msg(struct hinic_cmdq *cmdq, u32 timeout)
715 struct hinic_cmdq_wqe *wqe;
716 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
717 struct hinic_ctrl *ctrl;
718 struct hinic_cmdq_cmd_info *cmd_info;
719 u32 status_info, ctrl_info;
726 wqe = hinic_read_wqe(cmdq->wq, 1, &ci);
728 PMD_DRV_LOG(ERR, "No outstanding cmdq msg");
732 cmd_info = &cmdq->cmd_infos[ci];
733 /* this cmd has not been filled and send to hw, or get TMO msg ack*/
734 if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) {
735 PMD_DRV_LOG(ERR, "Cmdq msg has not been filled and send to hw, or get TMO msg ack. cmdq ci: %u",
740 /* only arm bit is using scmd wqe, the wqe is lcmd */
741 wqe_lcmd = &wqe->wqe_lcmd;
742 ctrl = &wqe_lcmd->ctrl;
743 end = jiffies + msecs_to_jiffies(timeout);
745 ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
746 if (WQE_COMPLETED(ctrl_info)) {
752 } while (time_before(jiffies, end));
755 status_info = be32_to_cpu(wqe_lcmd->status.status_info);
756 errcode = WQE_ERRCODE_GET(status_info, VAL);
757 cmdq_update_errcode(cmdq, ci, errcode);
758 clear_wqe_complete_bit(cmdq, wqe);
761 PMD_DRV_LOG(ERR, "Poll cmdq msg time out, ci: %u", ci);
765 /* set this cmd invalid */
766 cmd_info->cmd_type = HINIC_CMD_TYPE_NONE;
771 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
772 enum hinic_ack_type ack_type,
773 enum hinic_mod_type mod, u8 cmd,
774 struct hinic_cmd_buf *buf_in,
775 u64 *out_param, u32 timeout)
777 struct hinic_wq *wq = cmdq->wq;
778 struct hinic_cmdq_wqe *curr_wqe, wqe;
779 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
780 u16 curr_prod_idx, next_prod_idx, num_wqebbs;
785 wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
786 num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
788 /* Keep wrapped and doorbell index correct. */
789 spin_lock(&cmdq->cmdq_lock);
791 curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
797 memset(&wqe, 0, sizeof(wqe));
798 wrapped = cmdq->wrapped;
800 next_prod_idx = curr_prod_idx + num_wqebbs;
801 if (next_prod_idx >= wq->q_depth) {
802 cmdq->wrapped = !cmdq->wrapped;
803 next_prod_idx -= wq->q_depth;
806 cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
807 wrapped, ack_type, mod, cmd, curr_prod_idx);
809 /* The data that is written to HW should be in Big Endian Format */
810 hinic_cpu_to_be32(&wqe, wqe_size);
812 /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
813 cmdq_wqe_fill(curr_wqe, &wqe);
815 cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_NORMAL;
817 cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
819 timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
820 err = hinic_cmdq_poll_msg(cmdq, timeo);
822 PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x",
828 rte_smp_rmb(); /* read error code after completion */
831 wqe_lcmd = &curr_wqe->wqe_lcmd;
832 *out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp);
835 if (cmdq->errcode[curr_prod_idx] > 1) {
836 err = cmdq->errcode[curr_prod_idx];
841 spin_unlock(&cmdq->cmdq_lock);
846 int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
847 enum hinic_mod_type mod, u8 cmd,
848 struct hinic_cmd_buf *buf_in,
849 u64 *out_param, u32 timeout)
851 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
852 int err = cmdq_params_valid(buf_in);
855 PMD_DRV_LOG(ERR, "Invalid CMDQ parameters");
859 err = wait_cmdqs_enable(cmdqs);
861 PMD_DRV_LOG(ERR, "Cmdq is disable");
865 return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
866 ack_type, mod, cmd, buf_in,