1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 #include "hinic_compat.h"
6 #include "hinic_pmd_hwdev.h"
7 #include "hinic_pmd_hwif.h"
8 #include "hinic_pmd_wq.h"
9 #include "hinic_pmd_mgmt.h"
10 #include "hinic_pmd_cmdq.h"
12 #define CMDQ_CMD_TIMEOUT 5000 /* millisecond */
14 #define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
15 #define LOWER_8_BITS(data) ((data) & 0xFF)
17 #define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
18 #define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23
19 #define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
20 #define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27
22 #define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU
23 #define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U
24 #define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U
25 #define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU
27 #define CMDQ_DB_INFO_SET(val, member) \
28 (((val) & CMDQ_DB_INFO_##member##_MASK) << \
29 CMDQ_DB_INFO_##member##_SHIFT)
31 #define CMDQ_CTRL_PI_SHIFT 0
32 #define CMDQ_CTRL_CMD_SHIFT 16
33 #define CMDQ_CTRL_MOD_SHIFT 24
34 #define CMDQ_CTRL_ACK_TYPE_SHIFT 29
35 #define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
37 #define CMDQ_CTRL_PI_MASK 0xFFFFU
38 #define CMDQ_CTRL_CMD_MASK 0xFFU
39 #define CMDQ_CTRL_MOD_MASK 0x1FU
40 #define CMDQ_CTRL_ACK_TYPE_MASK 0x3U
41 #define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U
43 #define CMDQ_CTRL_SET(val, member) \
44 (((val) & CMDQ_CTRL_##member##_MASK) << CMDQ_CTRL_##member##_SHIFT)
46 #define CMDQ_CTRL_GET(val, member) \
47 (((val) >> CMDQ_CTRL_##member##_SHIFT) & CMDQ_CTRL_##member##_MASK)
49 #define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
50 #define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
51 #define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
52 #define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
53 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
54 #define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
55 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31
57 #define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU
58 #define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U
59 #define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U
60 #define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U
61 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U
62 #define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U
63 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U
65 #define CMDQ_WQE_HEADER_SET(val, member) \
66 (((val) & CMDQ_WQE_HEADER_##member##_MASK) << \
67 CMDQ_WQE_HEADER_##member##_SHIFT)
69 #define CMDQ_WQE_HEADER_GET(val, member) \
70 (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \
71 CMDQ_WQE_HEADER_##member##_MASK)
73 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
74 #define CMDQ_CTXT_EQ_ID_SHIFT 56
75 #define CMDQ_CTXT_CEQ_ARM_SHIFT 61
76 #define CMDQ_CTXT_CEQ_EN_SHIFT 62
77 #define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63
79 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
80 #define CMDQ_CTXT_EQ_ID_MASK 0x1F
81 #define CMDQ_CTXT_CEQ_ARM_MASK 0x1
82 #define CMDQ_CTXT_CEQ_EN_MASK 0x1
83 #define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1
85 #define CMDQ_CTXT_PAGE_INFO_SET(val, member) \
86 (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
88 #define CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
89 ((val) & (~((u64)CMDQ_CTXT_##member##_MASK << \
90 CMDQ_CTXT_##member##_SHIFT)))
92 #define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
93 #define CMDQ_CTXT_CI_SHIFT 52
95 #define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
96 #define CMDQ_CTXT_CI_MASK 0xFFF
98 #define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
99 (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
101 #define SAVED_DATA_ARM_SHIFT 31
103 #define SAVED_DATA_ARM_MASK 0x1U
105 #define SAVED_DATA_SET(val, member) \
106 (((val) & SAVED_DATA_##member##_MASK) << SAVED_DATA_##member##_SHIFT)
108 #define SAVED_DATA_CLEAR(val, member) \
109 ((val) & (~(SAVED_DATA_##member##_MASK << SAVED_DATA_##member##_SHIFT)))
111 #define WQE_ERRCODE_VAL_SHIFT 20
113 #define WQE_ERRCODE_VAL_MASK 0xF
115 #define WQE_ERRCODE_GET(val, member) \
116 (((val) >> WQE_ERRCODE_##member##_SHIFT) & WQE_ERRCODE_##member##_MASK)
118 #define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
120 #define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
122 #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
124 #define CMDQ_DB_ADDR(db_base, pi) \
125 (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
127 #define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
129 #define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
131 #define WQE_LCMD_SIZE 64
132 #define WQE_SCMD_SIZE 64
134 #define COMPLETE_LEN 3
136 #define CMDQ_WQEBB_SIZE 64
137 #define CMDQ_WQEBB_SHIFT 6
139 #define CMDQ_WQE_SIZE 64
141 #define HINIC_CMDQ_WQ_BUF_SIZE 4096
143 #define WQE_NUM_WQEBBS(wqe_size, wq) \
144 ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
146 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
147 struct hinic_cmdqs, cmdq[0])
149 #define WAIT_CMDQ_ENABLE_TIMEOUT 300
152 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
153 struct hinic_cmdq_ctxt *cmdq_ctxt);
154 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
156 bool hinic_cmdq_idle(struct hinic_cmdq *cmdq)
158 struct hinic_wq *wq = cmdq->wq;
160 return ((wq->delta) == wq->q_depth ? true : false);
163 struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev)
165 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
166 struct hinic_cmd_buf *cmd_buf;
168 cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_KERNEL);
170 PMD_DRV_LOG(ERR, "Allocate cmd buffer failed");
174 cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, &cmd_buf->dma_addr);
176 PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
177 goto alloc_pci_buf_err;
187 void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf)
189 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
191 pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
195 static u32 cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
201 wqe_size = WQE_LCMD_SIZE;
204 wqe_size = WQE_SCMD_SIZE;
211 static int cmdq_get_wqe_size(enum bufdesc_len len)
216 case BUFDESC_LCMD_LEN:
217 wqe_size = WQE_LCMD_SIZE;
219 case BUFDESC_SCMD_LEN:
220 wqe_size = WQE_SCMD_SIZE;
227 static void cmdq_set_completion(struct hinic_cmdq_completion *complete,
228 struct hinic_cmd_buf *buf_out)
230 struct hinic_sge_resp *sge_resp = &complete->sge_resp;
232 hinic_set_sge(&sge_resp->sge, buf_out->dma_addr,
233 HINIC_CMDQ_BUF_SIZE);
236 static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe,
237 struct hinic_cmd_buf *buf_in)
239 hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
242 static void cmdq_fill_db(struct hinic_cmdq_db *db,
243 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
245 db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
246 CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
247 CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
248 CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
251 static void cmdq_set_db(struct hinic_cmdq *cmdq,
252 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
254 struct hinic_cmdq_db db;
256 cmdq_fill_db(&db, cmdq_type, prod_idx);
258 /* The data that is written to HW should be in Big Endian Format */
259 db.db_info = cpu_to_be32(db.db_info);
261 rte_wmb(); /* write all before the doorbell */
263 writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
266 static void cmdq_wqe_fill(void *dst, void *src)
268 memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
269 (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
270 CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
272 rte_wmb();/* The first 8 bytes should be written last */
274 *(u64 *)dst = *(u64 *)src;
277 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
278 enum hinic_ack_type ack_type,
279 enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
280 enum completion_format complete_format,
281 enum data_format local_data_format,
282 enum bufdesc_len buf_len)
284 struct hinic_ctrl *ctrl;
285 enum ctrl_sect_len ctrl_len;
286 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
287 struct hinic_cmdq_wqe_scmd *wqe_scmd;
288 u32 saved_data = WQE_HEADER(wqe)->saved_data;
290 if (local_data_format == DATA_SGE) {
291 wqe_lcmd = &wqe->wqe_lcmd;
293 wqe_lcmd->status.status_info = 0;
294 ctrl = &wqe_lcmd->ctrl;
295 ctrl_len = CTRL_SECT_LEN;
297 wqe_scmd = &wqe->inline_wqe.wqe_scmd;
299 wqe_scmd->status.status_info = 0;
300 ctrl = &wqe_scmd->ctrl;
301 ctrl_len = CTRL_DIRECT_SECT_LEN;
304 ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) |
305 CMDQ_CTRL_SET(cmd, CMD) |
306 CMDQ_CTRL_SET(mod, MOD) |
307 CMDQ_CTRL_SET(ack_type, ACK_TYPE);
309 WQE_HEADER(wqe)->header_info =
310 CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
311 CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
312 CMDQ_WQE_HEADER_SET(local_data_format, DATA_FMT) |
313 CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
314 CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
315 CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
316 CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
318 if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) {
319 saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
320 WQE_HEADER(wqe)->saved_data = saved_data |
321 SAVED_DATA_SET(1, ARM);
323 saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
324 WQE_HEADER(wqe)->saved_data = saved_data;
328 static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
329 enum cmdq_cmd_type cmd_type,
330 struct hinic_cmd_buf *buf_in,
331 struct hinic_cmd_buf *buf_out, int wrapped,
332 enum hinic_ack_type ack_type,
333 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
335 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
336 enum completion_format complete_format = COMPLETE_DIRECT;
339 case SYNC_CMD_SGE_RESP:
341 complete_format = COMPLETE_SGE;
342 cmdq_set_completion(&wqe_lcmd->completion, buf_out);
345 case SYNC_CMD_DIRECT_RESP:
346 complete_format = COMPLETE_DIRECT;
347 wqe_lcmd->completion.direct_resp = 0;
350 complete_format = COMPLETE_DIRECT;
351 wqe_lcmd->completion.direct_resp = 0;
353 wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
357 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
358 prod_idx, complete_format, DATA_SGE,
361 cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
364 static int cmdq_params_valid(struct hinic_cmd_buf *buf_in)
366 if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
367 PMD_DRV_LOG(ERR, "Invalid CMDQ buffer size");
374 static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs)
378 end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
380 if (cmdqs->status & HINIC_CMDQ_ENABLE)
383 } while (time_before(jiffies, end));
388 static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
391 cmdq->errcode[prod_idx] = errcode;
394 static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
395 struct hinic_cmdq_wqe *wqe)
397 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
398 struct hinic_cmdq_inline_wqe *inline_wqe;
399 struct hinic_cmdq_wqe_scmd *wqe_scmd;
400 struct hinic_ctrl *ctrl;
401 u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
402 int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
403 int wqe_size = cmdq_get_wqe_size(buf_len);
406 if (wqe_size == WQE_LCMD_SIZE) {
407 wqe_lcmd = &wqe->wqe_lcmd;
408 ctrl = &wqe_lcmd->ctrl;
410 inline_wqe = &wqe->inline_wqe;
411 wqe_scmd = &inline_wqe->wqe_scmd;
412 ctrl = &wqe_scmd->ctrl;
415 /* clear HW busy bit */
418 rte_wmb(); /* verify wqe is clear */
420 num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
421 hinic_put_wqe(cmdq->wq, num_wqebbs);
424 static int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
426 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
427 struct hinic_cmdq_ctxt *cmdq_ctxt;
428 enum hinic_cmdq_type cmdq_type;
432 cmdq_type = HINIC_CMDQ_SYNC;
433 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
434 cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
435 cmdq_ctxt->resp_aeq_num = HINIC_AEQ1;
436 in_size = sizeof(*cmdq_ctxt);
437 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
438 HINIC_MGMT_CMD_CMDQ_CTXT_SET,
439 cmdq_ctxt, in_size, NULL,
442 PMD_DRV_LOG(ERR, "Set cmdq ctxt failed");
447 cmdqs->status |= HINIC_CMDQ_ENABLE;
452 void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev)
454 hinic_cmdqs_free(hwdev);
457 int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
459 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
460 enum hinic_cmdq_type cmdq_type;
462 cmdq_type = HINIC_CMDQ_SYNC;
463 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
464 cmdqs->cmdq[cmdq_type].wrapped = 1;
465 hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
468 return hinic_set_cmdq_ctxts(hwdev);
471 static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev,
472 struct hinic_wq *wq, enum hinic_cmdq_type q_type)
474 void __iomem *db_base;
477 size_t cmd_infos_size;
480 cmdq->cmdq_type = q_type;
483 spin_lock_init(&cmdq->cmdq_lock);
485 errcode_size = wq->q_depth * sizeof(*cmdq->errcode);
486 cmdq->errcode = kzalloc(errcode_size, GFP_KERNEL);
487 if (!cmdq->errcode) {
488 PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
489 spin_lock_deinit(&cmdq->cmdq_lock);
493 cmd_infos_size = wq->q_depth * sizeof(*cmdq->cmd_infos);
494 cmdq->cmd_infos = kzalloc(cmd_infos_size, GFP_KERNEL);
495 if (!cmdq->cmd_infos) {
496 PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
501 err = hinic_alloc_db_addr(hwdev, &db_base);
505 cmdq->db_base = (u8 *)db_base;
509 kfree(cmdq->cmd_infos);
512 kfree(cmdq->errcode);
513 spin_lock_deinit(&cmdq->cmdq_lock);
518 static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq)
520 hinic_free_db_addr(hwdev, cmdq->db_base);
521 kfree(cmdq->cmd_infos);
522 kfree(cmdq->errcode);
523 spin_lock_deinit(&cmdq->cmdq_lock);
526 static int hinic_cmdqs_init(struct hinic_hwdev *hwdev)
528 struct hinic_cmdqs *cmdqs;
529 struct hinic_cmdq_ctxt *cmdq_ctxt;
530 enum hinic_cmdq_type type, cmdq_type;
531 size_t saved_wqs_size;
534 cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
538 hwdev->cmdqs = cmdqs;
539 cmdqs->hwdev = hwdev;
541 saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
542 cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
543 if (!cmdqs->saved_wqs) {
544 PMD_DRV_LOG(ERR, "Allocate saved wqs failed");
549 cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev,
551 HINIC_CMDQ_BUF_SIZE, 0ULL);
552 if (!cmdqs->cmd_buf_pool) {
553 PMD_DRV_LOG(ERR, "Create cmdq buffer pool failed");
555 goto pool_create_err;
558 err = hinic_cmdq_alloc(cmdqs->saved_wqs, hwdev,
559 HINIC_MAX_CMDQ_TYPES, HINIC_CMDQ_WQ_BUF_SIZE,
560 CMDQ_WQEBB_SHIFT, HINIC_CMDQ_DEPTH);
562 PMD_DRV_LOG(ERR, "Allocate cmdq failed");
566 cmdq_type = HINIC_CMDQ_SYNC;
567 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
568 err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
569 &cmdqs->saved_wqs[cmdq_type], cmdq_type);
571 PMD_DRV_LOG(ERR, "Initialize cmdq failed");
575 cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
576 cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], cmdq_ctxt);
579 err = hinic_set_cmdq_ctxts(hwdev);
586 type = HINIC_CMDQ_SYNC;
587 for ( ; type < cmdq_type; type++)
588 free_cmdq(hwdev, &cmdqs->cmdq[type]);
590 hinic_cmdq_free(hwdev, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES);
593 dma_pool_destroy(cmdqs->cmd_buf_pool);
596 kfree(cmdqs->saved_wqs);
604 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev)
606 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
607 enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
609 cmdqs->status &= ~HINIC_CMDQ_ENABLE;
611 for ( ; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
612 free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
614 hinic_cmdq_free(hwdev, cmdqs->saved_wqs,
615 HINIC_MAX_CMDQ_TYPES);
617 dma_pool_destroy(cmdqs->cmd_buf_pool);
619 kfree(cmdqs->saved_wqs);
624 static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
626 struct hinic_root_ctxt root_ctxt;
628 memset(&root_ctxt, 0, sizeof(root_ctxt));
629 root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
630 root_ctxt.func_idx = hinic_global_func_id(hwdev);
631 root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
632 root_ctxt.set_cmdq_depth = 1;
633 root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
634 return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
635 HINIC_MGMT_CMD_VAT_SET,
636 &root_ctxt, sizeof(root_ctxt),
640 int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
644 err = hinic_cmdqs_init(hwdev);
646 PMD_DRV_LOG(ERR, "Init cmd queues failed");
650 err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH);
652 PMD_DRV_LOG(ERR, "Set cmdq depth failed");
653 goto set_cmdq_depth_err;
659 hinic_cmdqs_free(hwdev);
664 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
665 struct hinic_cmdq_ctxt *cmdq_ctxt)
667 struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)cmdq_to_cmdqs(cmdq);
668 struct hinic_hwdev *hwdev = cmdqs->hwdev;
669 struct hinic_wq *wq = cmdq->wq;
670 struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
671 u64 wq_first_page_paddr, pfn;
673 u16 start_ci = (u16)(wq->cons_idx);
675 /* The data in the HW is in Big Endian Format */
676 wq_first_page_paddr = wq->queue_buf_paddr;
678 pfn = CMDQ_PFN(wq_first_page_paddr, HINIC_PAGE_SIZE);
679 ctxt_info->curr_wqe_page_pfn =
680 CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
681 CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
682 CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM) |
683 CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
684 CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
686 ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
687 CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
688 cmdq_ctxt->func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
689 cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
690 cmdq_ctxt->cmdq_id = cmdq->cmdq_type;
693 static int hinic_cmdq_poll_msg(struct hinic_cmdq *cmdq, u32 timeout)
695 struct hinic_cmdq_wqe *wqe;
696 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
697 struct hinic_ctrl *ctrl;
698 struct hinic_cmdq_cmd_info *cmd_info;
699 u32 status_info, ctrl_info;
706 wqe = hinic_read_wqe(cmdq->wq, 1, &ci);
708 PMD_DRV_LOG(ERR, "No outstanding cmdq msg");
712 cmd_info = &cmdq->cmd_infos[ci];
713 /* this cmd has not been filled and send to hw, or get TMO msg ack*/
714 if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) {
715 PMD_DRV_LOG(ERR, "Cmdq msg has not been filled and send to hw, or get TMO msg ack. cmdq ci: %u",
720 /* only arm bit is using scmd wqe, the wqe is lcmd */
721 wqe_lcmd = &wqe->wqe_lcmd;
722 ctrl = &wqe_lcmd->ctrl;
723 end = jiffies + msecs_to_jiffies(timeout);
725 ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
726 if (WQE_COMPLETED(ctrl_info)) {
732 } while (time_before(jiffies, end));
735 status_info = be32_to_cpu(wqe_lcmd->status.status_info);
736 errcode = WQE_ERRCODE_GET(status_info, VAL);
737 cmdq_update_errcode(cmdq, ci, errcode);
738 clear_wqe_complete_bit(cmdq, wqe);
741 PMD_DRV_LOG(ERR, "Poll cmdq msg time out, ci: %u", ci);
745 /* set this cmd invalid */
746 cmd_info->cmd_type = HINIC_CMD_TYPE_NONE;
751 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
752 enum hinic_ack_type ack_type,
753 enum hinic_mod_type mod, u8 cmd,
754 struct hinic_cmd_buf *buf_in,
755 u64 *out_param, u32 timeout)
757 struct hinic_wq *wq = cmdq->wq;
758 struct hinic_cmdq_wqe *curr_wqe, wqe;
759 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
760 u16 curr_prod_idx, next_prod_idx, num_wqebbs;
765 wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
766 num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
768 /* Keep wrapped and doorbell index correct. */
769 spin_lock(&cmdq->cmdq_lock);
771 curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
777 memset(&wqe, 0, sizeof(wqe));
778 wrapped = cmdq->wrapped;
780 next_prod_idx = curr_prod_idx + num_wqebbs;
781 if (next_prod_idx >= wq->q_depth) {
782 cmdq->wrapped = !cmdq->wrapped;
783 next_prod_idx -= wq->q_depth;
786 cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
787 wrapped, ack_type, mod, cmd, curr_prod_idx);
789 /* The data that is written to HW should be in Big Endian Format */
790 hinic_cpu_to_be32(&wqe, wqe_size);
792 /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
793 cmdq_wqe_fill(curr_wqe, &wqe);
795 cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_NORMAL;
797 cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
799 timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
800 err = hinic_cmdq_poll_msg(cmdq, timeo);
802 PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x",
808 rte_smp_rmb(); /* read error code after completion */
811 wqe_lcmd = &curr_wqe->wqe_lcmd;
812 *out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp);
815 if (cmdq->errcode[curr_prod_idx] > 1) {
816 err = cmdq->errcode[curr_prod_idx];
821 spin_unlock(&cmdq->cmdq_lock);
826 int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
827 enum hinic_mod_type mod, u8 cmd,
828 struct hinic_cmd_buf *buf_in,
829 u64 *out_param, u32 timeout)
831 struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
832 int err = cmdq_params_valid(buf_in);
835 PMD_DRV_LOG(ERR, "Invalid CMDQ parameters");
839 err = wait_cmdqs_enable(cmdqs);
841 PMD_DRV_LOG(ERR, "Cmdq is disable");
845 return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
846 ack_type, mod, cmd, buf_in,