net/hinic: allocate IO memory with socket id
[dpdk.git] / drivers / net / hinic / base / hinic_pmd_cmdq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include "hinic_compat.h"
6 #include "hinic_pmd_hwdev.h"
7 #include "hinic_pmd_hwif.h"
8 #include "hinic_pmd_wq.h"
9 #include "hinic_pmd_mgmt.h"
10 #include "hinic_pmd_cmdq.h"
11
12 #define CMDQ_CMD_TIMEOUT                                5000 /* millisecond */
13
14 #define UPPER_8_BITS(data)                              (((data) >> 8) & 0xFF)
15 #define LOWER_8_BITS(data)                              ((data) & 0xFF)
16
17 #define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT                  0
18 #define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT                   23
19 #define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT                    24
20 #define CMDQ_DB_INFO_SRC_TYPE_SHIFT                     27
21
22 #define CMDQ_DB_INFO_HI_PROD_IDX_MASK                   0xFFU
23 #define CMDQ_DB_INFO_QUEUE_TYPE_MASK                    0x1U
24 #define CMDQ_DB_INFO_CMDQ_TYPE_MASK                     0x7U
25 #define CMDQ_DB_INFO_SRC_TYPE_MASK                      0x1FU
26
27 #define CMDQ_DB_INFO_SET(val, member)           \
28         (((val) & CMDQ_DB_INFO_##member##_MASK) <<      \
29                 CMDQ_DB_INFO_##member##_SHIFT)
30
31 #define CMDQ_CTRL_PI_SHIFT                              0
32 #define CMDQ_CTRL_CMD_SHIFT                             16
33 #define CMDQ_CTRL_MOD_SHIFT                             24
34 #define CMDQ_CTRL_ACK_TYPE_SHIFT                        29
35 #define CMDQ_CTRL_HW_BUSY_BIT_SHIFT                     31
36
37 #define CMDQ_CTRL_PI_MASK                               0xFFFFU
38 #define CMDQ_CTRL_CMD_MASK                              0xFFU
39 #define CMDQ_CTRL_MOD_MASK                              0x1FU
40 #define CMDQ_CTRL_ACK_TYPE_MASK                         0x3U
41 #define CMDQ_CTRL_HW_BUSY_BIT_MASK                      0x1U
42
43 #define CMDQ_CTRL_SET(val, member)              \
44         (((val) & CMDQ_CTRL_##member##_MASK) << CMDQ_CTRL_##member##_SHIFT)
45
46 #define CMDQ_CTRL_GET(val, member)              \
47         (((val) >> CMDQ_CTRL_##member##_SHIFT) & CMDQ_CTRL_##member##_MASK)
48
49 #define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT               0
50 #define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT              15
51 #define CMDQ_WQE_HEADER_DATA_FMT_SHIFT                  22
52 #define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT              23
53 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT         27
54 #define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT                  29
55 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT               31
56
57 #define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK                0xFFU
58 #define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK               0x1U
59 #define CMDQ_WQE_HEADER_DATA_FMT_MASK                   0x1U
60 #define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK               0x1U
61 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK          0x3U
62 #define CMDQ_WQE_HEADER_CTRL_LEN_MASK                   0x3U
63 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK                0x1U
64
65 #define CMDQ_WQE_HEADER_SET(val, member)        \
66         (((val) & CMDQ_WQE_HEADER_##member##_MASK) <<   \
67                 CMDQ_WQE_HEADER_##member##_SHIFT)
68
69 #define CMDQ_WQE_HEADER_GET(val, member)        \
70         (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) &  \
71                 CMDQ_WQE_HEADER_##member##_MASK)
72
73 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT               0
74 #define CMDQ_CTXT_EQ_ID_SHIFT                           56
75 #define CMDQ_CTXT_CEQ_ARM_SHIFT                         61
76 #define CMDQ_CTXT_CEQ_EN_SHIFT                          62
77 #define CMDQ_CTXT_HW_BUSY_BIT_SHIFT                     63
78
79 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK                0xFFFFFFFFFFFFF
80 #define CMDQ_CTXT_EQ_ID_MASK                            0x1F
81 #define CMDQ_CTXT_CEQ_ARM_MASK                          0x1
82 #define CMDQ_CTXT_CEQ_EN_MASK                           0x1
83 #define CMDQ_CTXT_HW_BUSY_BIT_MASK                      0x1
84
85 #define CMDQ_CTXT_PAGE_INFO_SET(val, member)            \
86         (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
87
88 #define CMDQ_CTXT_PAGE_INFO_CLEAR(val, member)          \
89         ((val) & (~((u64)CMDQ_CTXT_##member##_MASK <<   \
90                 CMDQ_CTXT_##member##_SHIFT)))
91
92 #define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT                    0
93 #define CMDQ_CTXT_CI_SHIFT                              52
94
95 #define CMDQ_CTXT_WQ_BLOCK_PFN_MASK                     0xFFFFFFFFFFFFF
96 #define CMDQ_CTXT_CI_MASK                               0xFFF
97
98 #define CMDQ_CTXT_BLOCK_INFO_SET(val, member)           \
99         (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
100
101 #define SAVED_DATA_ARM_SHIFT                    31
102
103 #define SAVED_DATA_ARM_MASK                     0x1U
104
105 #define SAVED_DATA_SET(val, member)             \
106         (((val) & SAVED_DATA_##member##_MASK) << SAVED_DATA_##member##_SHIFT)
107
108 #define SAVED_DATA_CLEAR(val, member)           \
109         ((val) & (~(SAVED_DATA_##member##_MASK << SAVED_DATA_##member##_SHIFT)))
110
111 #define WQE_ERRCODE_VAL_SHIFT                   20
112
113 #define WQE_ERRCODE_VAL_MASK                    0xF
114
115 #define WQE_ERRCODE_GET(val, member)            \
116         (((val) >> WQE_ERRCODE_##member##_SHIFT) & WQE_ERRCODE_##member##_MASK)
117
118 #define WQE_COMPLETED(ctrl_info)        CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
119
120 #define WQE_HEADER(wqe)         ((struct hinic_cmdq_header *)(wqe))
121
122 #define CMDQ_DB_PI_OFF(pi)              (((u16)LOWER_8_BITS(pi)) << 3)
123
124 #define CMDQ_DB_ADDR(db_base, pi)       \
125         (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
126
127 #define CMDQ_PFN(addr, page_size)       ((addr) >> (ilog2(page_size)))
128
129 #define FIRST_DATA_TO_WRITE_LAST        sizeof(u64)
130
131 #define WQE_LCMD_SIZE           64
132 #define WQE_SCMD_SIZE           64
133
134 #define COMPLETE_LEN            3
135
136 #define CMDQ_WQEBB_SIZE         64
137 #define CMDQ_WQEBB_SHIFT        6
138
139 #define CMDQ_WQE_SIZE           64
140
141 #define HINIC_CMDQ_WQ_BUF_SIZE  4096
142
143 #define WQE_NUM_WQEBBS(wqe_size, wq)    \
144         ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
145
146 #define cmdq_to_cmdqs(cmdq)     container_of((cmdq) - (cmdq)->cmdq_type, \
147                                 struct hinic_cmdqs, cmdq[0])
148
149 #define WAIT_CMDQ_ENABLE_TIMEOUT        300
150
151
152 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
153                                  struct hinic_cmdq_ctxt *cmdq_ctxt);
154 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
155
156 bool hinic_cmdq_idle(struct hinic_cmdq *cmdq)
157 {
158         struct hinic_wq *wq = cmdq->wq;
159
160         return ((wq->delta) == wq->q_depth ? true : false);
161 }
162
163 struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev)
164 {
165         struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
166         struct hinic_cmd_buf *cmd_buf;
167
168         cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_KERNEL);
169         if (!cmd_buf) {
170                 PMD_DRV_LOG(ERR, "Allocate cmd buffer failed");
171                 return NULL;
172         }
173
174         cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, &cmd_buf->dma_addr);
175         if (!cmd_buf->buf) {
176                 PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
177                 goto alloc_pci_buf_err;
178         }
179
180         return cmd_buf;
181
182 alloc_pci_buf_err:
183         kfree(cmd_buf);
184         return NULL;
185 }
186
187 void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf)
188 {
189         struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
190
191         pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
192         kfree(cmd_buf);
193 }
194
195 static u32 cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
196 {
197         u32 wqe_size = 0;
198
199         switch (wqe_type) {
200         case WQE_LCMD_TYPE:
201                 wqe_size = WQE_LCMD_SIZE;
202                 break;
203         case WQE_SCMD_TYPE:
204                 wqe_size = WQE_SCMD_SIZE;
205                 break;
206         }
207
208         return wqe_size;
209 }
210
211 static int cmdq_get_wqe_size(enum bufdesc_len len)
212 {
213         int wqe_size = 0;
214
215         switch (len) {
216         case BUFDESC_LCMD_LEN:
217                 wqe_size = WQE_LCMD_SIZE;
218                 break;
219         case BUFDESC_SCMD_LEN:
220                 wqe_size = WQE_SCMD_SIZE;
221                 break;
222         }
223
224         return wqe_size;
225 }
226
227 static void cmdq_set_completion(struct hinic_cmdq_completion *complete,
228                                         struct hinic_cmd_buf *buf_out)
229 {
230         struct hinic_sge_resp *sge_resp = &complete->sge_resp;
231
232         hinic_set_sge(&sge_resp->sge, buf_out->dma_addr,
233                       HINIC_CMDQ_BUF_SIZE);
234 }
235
236 static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe,
237                                         struct hinic_cmd_buf *buf_in)
238 {
239         hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
240 }
241
242 static void cmdq_fill_db(struct hinic_cmdq_db *db,
243                         enum hinic_cmdq_type cmdq_type, u16 prod_idx)
244 {
245         db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
246                         CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
247                         CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE)          |
248                         CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
249 }
250
251 static void cmdq_set_db(struct hinic_cmdq *cmdq,
252                         enum hinic_cmdq_type cmdq_type, u16 prod_idx)
253 {
254         struct hinic_cmdq_db db;
255
256         cmdq_fill_db(&db, cmdq_type, prod_idx);
257
258         /* The data that is written to HW should be in Big Endian Format */
259         db.db_info = cpu_to_be32(db.db_info);
260
261         rte_wmb();      /* write all before the doorbell */
262
263         writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
264 }
265
266 static void cmdq_wqe_fill(void *dst, void *src)
267 {
268         memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
269                (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
270                CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
271
272         rte_wmb();/* The first 8 bytes should be written last */
273
274         *(u64 *)dst = *(u64 *)src;
275 }
276
277 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
278                                   enum hinic_ack_type ack_type,
279                                   enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
280                                   enum completion_format complete_format,
281                                   enum data_format local_data_format,
282                                   enum bufdesc_len buf_len)
283 {
284         struct hinic_ctrl *ctrl;
285         enum ctrl_sect_len ctrl_len;
286         struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
287         struct hinic_cmdq_wqe_scmd *wqe_scmd;
288         u32 saved_data = WQE_HEADER(wqe)->saved_data;
289
290         if (local_data_format == DATA_SGE) {
291                 wqe_lcmd = &wqe->wqe_lcmd;
292
293                 wqe_lcmd->status.status_info = 0;
294                 ctrl = &wqe_lcmd->ctrl;
295                 ctrl_len = CTRL_SECT_LEN;
296         } else {
297                 wqe_scmd = &wqe->inline_wqe.wqe_scmd;
298
299                 wqe_scmd->status.status_info = 0;
300                 ctrl = &wqe_scmd->ctrl;
301                 ctrl_len = CTRL_DIRECT_SECT_LEN;
302         }
303
304         ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI)           |
305                         CMDQ_CTRL_SET(cmd, CMD)                 |
306                         CMDQ_CTRL_SET(mod, MOD)                 |
307                         CMDQ_CTRL_SET(ack_type, ACK_TYPE);
308
309         WQE_HEADER(wqe)->header_info =
310                 CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
311                 CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
312                 CMDQ_WQE_HEADER_SET(local_data_format, DATA_FMT)        |
313                 CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ)      |
314                 CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
315                 CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN)         |
316                 CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
317
318         if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) {
319                 saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
320                 WQE_HEADER(wqe)->saved_data = saved_data |
321                                                 SAVED_DATA_SET(1, ARM);
322         } else {
323                 saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
324                 WQE_HEADER(wqe)->saved_data = saved_data;
325         }
326 }
327
328 static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
329                               enum cmdq_cmd_type cmd_type,
330                               struct hinic_cmd_buf *buf_in,
331                               struct hinic_cmd_buf *buf_out, int wrapped,
332                               enum hinic_ack_type ack_type,
333                               enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
334 {
335         struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
336         enum completion_format complete_format = COMPLETE_DIRECT;
337
338         switch (cmd_type) {
339         case SYNC_CMD_SGE_RESP:
340                 if (buf_out) {
341                         complete_format = COMPLETE_SGE;
342                         cmdq_set_completion(&wqe_lcmd->completion, buf_out);
343                 }
344                 break;
345         case SYNC_CMD_DIRECT_RESP:
346                 complete_format = COMPLETE_DIRECT;
347                 wqe_lcmd->completion.direct_resp = 0;
348                 break;
349         case ASYNC_CMD:
350                 complete_format = COMPLETE_DIRECT;
351                 wqe_lcmd->completion.direct_resp = 0;
352
353                 wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
354                 break;
355         }
356
357         cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
358                               prod_idx, complete_format, DATA_SGE,
359                               BUFDESC_LCMD_LEN);
360
361         cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
362 }
363
364 static int cmdq_params_valid(struct hinic_cmd_buf *buf_in)
365 {
366         if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
367                 PMD_DRV_LOG(ERR, "Invalid CMDQ buffer size");
368                 return -EINVAL;
369         }
370
371         return 0;
372 }
373
374 static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs)
375 {
376         unsigned long end;
377
378         end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
379         do {
380                 if (cmdqs->status & HINIC_CMDQ_ENABLE)
381                         return 0;
382
383         } while (time_before(jiffies, end));
384
385         return -EBUSY;
386 }
387
388 static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
389                                 int errcode)
390 {
391         cmdq->errcode[prod_idx] = errcode;
392 }
393
394 static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
395                                    struct hinic_cmdq_wqe *wqe)
396 {
397         struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
398         struct hinic_cmdq_inline_wqe *inline_wqe;
399         struct hinic_cmdq_wqe_scmd *wqe_scmd;
400         struct hinic_ctrl *ctrl;
401         u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
402         int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
403         int wqe_size = cmdq_get_wqe_size(buf_len);
404         u16 num_wqebbs;
405
406         if (wqe_size == WQE_LCMD_SIZE) {
407                 wqe_lcmd = &wqe->wqe_lcmd;
408                 ctrl = &wqe_lcmd->ctrl;
409         } else {
410                 inline_wqe = &wqe->inline_wqe;
411                 wqe_scmd = &inline_wqe->wqe_scmd;
412                 ctrl = &wqe_scmd->ctrl;
413         }
414
415         /* clear HW busy bit */
416         ctrl->ctrl_info = 0;
417
418         rte_wmb();      /* verify wqe is clear */
419
420         num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
421         hinic_put_wqe(cmdq->wq, num_wqebbs);
422 }
423
424 static int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
425 {
426         struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
427         struct hinic_cmdq_ctxt *cmdq_ctxt;
428         enum hinic_cmdq_type cmdq_type;
429         u16 in_size;
430         int err;
431
432         cmdq_type = HINIC_CMDQ_SYNC;
433         for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
434                 cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
435                 cmdq_ctxt->resp_aeq_num = HINIC_AEQ1;
436                 in_size = sizeof(*cmdq_ctxt);
437                 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
438                                              HINIC_MGMT_CMD_CMDQ_CTXT_SET,
439                                              cmdq_ctxt, in_size, NULL,
440                                              NULL, 0);
441                 if (err) {
442                         PMD_DRV_LOG(ERR, "Set cmdq ctxt failed");
443                         return -EFAULT;
444                 }
445         }
446
447         cmdqs->status |= HINIC_CMDQ_ENABLE;
448
449         return 0;
450 }
451
452 void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev)
453 {
454         hinic_cmdqs_free(hwdev);
455 }
456
457 int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
458 {
459         struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
460         enum hinic_cmdq_type cmdq_type;
461
462         cmdq_type = HINIC_CMDQ_SYNC;
463         for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
464                 cmdqs->cmdq[cmdq_type].wrapped = 1;
465                 hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
466         }
467
468         return hinic_set_cmdq_ctxts(hwdev);
469 }
470
471 static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev,
472                      struct hinic_wq *wq, enum hinic_cmdq_type q_type)
473 {
474         void __iomem *db_base;
475         int err = 0;
476         size_t errcode_size;
477         size_t cmd_infos_size;
478
479         cmdq->wq = wq;
480         cmdq->cmdq_type = q_type;
481         cmdq->wrapped = 1;
482
483         spin_lock_init(&cmdq->cmdq_lock);
484
485         errcode_size = wq->q_depth * sizeof(*cmdq->errcode);
486         cmdq->errcode = kzalloc(errcode_size, GFP_KERNEL);
487         if (!cmdq->errcode) {
488                 PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
489                 spin_lock_deinit(&cmdq->cmdq_lock);
490                 return -ENOMEM;
491         }
492
493         cmd_infos_size = wq->q_depth * sizeof(*cmdq->cmd_infos);
494         cmdq->cmd_infos = kzalloc(cmd_infos_size, GFP_KERNEL);
495         if (!cmdq->cmd_infos) {
496                 PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
497                 err = -ENOMEM;
498                 goto cmd_infos_err;
499         }
500
501         err = hinic_alloc_db_addr(hwdev, &db_base);
502         if (err)
503                 goto alloc_db_err;
504
505         cmdq->db_base = (u8 *)db_base;
506         return 0;
507
508 alloc_db_err:
509         kfree(cmdq->cmd_infos);
510
511 cmd_infos_err:
512         kfree(cmdq->errcode);
513         spin_lock_deinit(&cmdq->cmdq_lock);
514
515         return err;
516 }
517
518 static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq)
519 {
520         hinic_free_db_addr(hwdev, cmdq->db_base);
521         kfree(cmdq->cmd_infos);
522         kfree(cmdq->errcode);
523         spin_lock_deinit(&cmdq->cmdq_lock);
524 }
525
526 static int hinic_cmdqs_init(struct hinic_hwdev *hwdev)
527 {
528         struct hinic_cmdqs *cmdqs;
529         struct hinic_cmdq_ctxt *cmdq_ctxt;
530         enum hinic_cmdq_type type, cmdq_type;
531         size_t saved_wqs_size;
532         int err;
533
534         cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
535         if (!cmdqs)
536                 return -ENOMEM;
537
538         hwdev->cmdqs = cmdqs;
539         cmdqs->hwdev = hwdev;
540
541         saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
542         cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
543         if (!cmdqs->saved_wqs) {
544                 PMD_DRV_LOG(ERR, "Allocate saved wqs failed");
545                 err = -ENOMEM;
546                 goto alloc_wqs_err;
547         }
548
549         cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev,
550                                               HINIC_CMDQ_BUF_SIZE,
551                                               HINIC_CMDQ_BUF_SIZE, 0ULL);
552         if (!cmdqs->cmd_buf_pool) {
553                 PMD_DRV_LOG(ERR, "Create cmdq buffer pool failed");
554                 err = -ENOMEM;
555                 goto pool_create_err;
556         }
557
558         err = hinic_cmdq_alloc(cmdqs->saved_wqs, hwdev,
559                                HINIC_MAX_CMDQ_TYPES, HINIC_CMDQ_WQ_BUF_SIZE,
560                                CMDQ_WQEBB_SHIFT, HINIC_CMDQ_DEPTH);
561         if (err) {
562                 PMD_DRV_LOG(ERR, "Allocate cmdq failed");
563                 goto cmdq_alloc_err;
564         }
565
566         cmdq_type = HINIC_CMDQ_SYNC;
567         for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
568                 err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
569                                 &cmdqs->saved_wqs[cmdq_type], cmdq_type);
570                 if (err) {
571                         PMD_DRV_LOG(ERR, "Initialize cmdq failed");
572                         goto init_cmdq_err;
573                 }
574
575                 cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
576                 cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], cmdq_ctxt);
577         }
578
579         err = hinic_set_cmdq_ctxts(hwdev);
580         if (err)
581                 goto init_cmdq_err;
582
583         return 0;
584
585 init_cmdq_err:
586         type = HINIC_CMDQ_SYNC;
587         for ( ; type < cmdq_type; type++)
588                 free_cmdq(hwdev, &cmdqs->cmdq[type]);
589
590         hinic_cmdq_free(hwdev, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES);
591
592 cmdq_alloc_err:
593         dma_pool_destroy(cmdqs->cmd_buf_pool);
594
595 pool_create_err:
596         kfree(cmdqs->saved_wqs);
597
598 alloc_wqs_err:
599         kfree(cmdqs);
600
601         return err;
602 }
603
604 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev)
605 {
606         struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
607         enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
608
609         cmdqs->status &= ~HINIC_CMDQ_ENABLE;
610
611         for ( ; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
612                 free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
613
614         hinic_cmdq_free(hwdev, cmdqs->saved_wqs,
615                         HINIC_MAX_CMDQ_TYPES);
616
617         dma_pool_destroy(cmdqs->cmd_buf_pool);
618
619         kfree(cmdqs->saved_wqs);
620
621         kfree(cmdqs);
622 }
623
624 static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
625 {
626         struct hinic_root_ctxt root_ctxt;
627
628         memset(&root_ctxt, 0, sizeof(root_ctxt));
629         root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
630         root_ctxt.func_idx = hinic_global_func_id(hwdev);
631         root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
632         root_ctxt.set_cmdq_depth = 1;
633         root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
634         return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
635                                       HINIC_MGMT_CMD_VAT_SET,
636                                       &root_ctxt, sizeof(root_ctxt),
637                                       NULL, NULL, 0);
638 }
639
640 int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
641 {
642         int err;
643
644         err = hinic_cmdqs_init(hwdev);
645         if (err) {
646                 PMD_DRV_LOG(ERR, "Init cmd queues failed");
647                 return err;
648         }
649
650         err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH);
651         if (err) {
652                 PMD_DRV_LOG(ERR, "Set cmdq depth failed");
653                 goto set_cmdq_depth_err;
654         }
655
656         return 0;
657
658 set_cmdq_depth_err:
659         hinic_cmdqs_free(hwdev);
660
661         return err;
662 }
663
664 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
665                                  struct hinic_cmdq_ctxt *cmdq_ctxt)
666 {
667         struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)cmdq_to_cmdqs(cmdq);
668         struct hinic_hwdev *hwdev = cmdqs->hwdev;
669         struct hinic_wq *wq = cmdq->wq;
670         struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
671         u64 wq_first_page_paddr, pfn;
672
673         u16 start_ci = (u16)(wq->cons_idx);
674
675         /* The data in the HW is in Big Endian Format */
676         wq_first_page_paddr = wq->queue_buf_paddr;
677
678         pfn = CMDQ_PFN(wq_first_page_paddr, HINIC_PAGE_SIZE);
679         ctxt_info->curr_wqe_page_pfn =
680                 CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
681                 CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN)      |
682                 CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM)     |
683                 CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
684                 CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
685
686         ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
687                                 CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
688         cmdq_ctxt->func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
689         cmdq_ctxt->ppf_idx  = HINIC_HWIF_PPF_IDX(hwdev->hwif);
690         cmdq_ctxt->cmdq_id  = cmdq->cmdq_type;
691 }
692
693 static int hinic_cmdq_poll_msg(struct hinic_cmdq *cmdq, u32 timeout)
694 {
695         struct hinic_cmdq_wqe *wqe;
696         struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
697         struct hinic_ctrl *ctrl;
698         struct hinic_cmdq_cmd_info *cmd_info;
699         u32 status_info, ctrl_info;
700         u16 ci;
701         int errcode;
702         unsigned long end;
703         int done = 0;
704         int rc = 0;
705
706         wqe = hinic_read_wqe(cmdq->wq, 1, &ci);
707         if (wqe == NULL) {
708                 PMD_DRV_LOG(ERR, "No outstanding cmdq msg");
709                 return -EINVAL;
710         }
711
712         cmd_info = &cmdq->cmd_infos[ci];
713         /* this cmd has not been filled and send to hw, or get TMO msg ack*/
714         if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) {
715                 PMD_DRV_LOG(ERR, "Cmdq msg has not been filled and send to hw, or get TMO msg ack. cmdq ci: %u",
716                             ci);
717                 return -EINVAL;
718         }
719
720         /* only arm bit is using scmd wqe, the wqe is lcmd */
721         wqe_lcmd = &wqe->wqe_lcmd;
722         ctrl = &wqe_lcmd->ctrl;
723         end = jiffies + msecs_to_jiffies(timeout);
724         do {
725                 ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
726                 if (WQE_COMPLETED(ctrl_info)) {
727                         done = 1;
728                         break;
729                 }
730
731                 rte_delay_ms(1);
732         } while (time_before(jiffies, end));
733
734         if (done) {
735                 status_info = be32_to_cpu(wqe_lcmd->status.status_info);
736                 errcode = WQE_ERRCODE_GET(status_info, VAL);
737                 cmdq_update_errcode(cmdq, ci, errcode);
738                 clear_wqe_complete_bit(cmdq, wqe);
739                 rc = 0;
740         } else {
741                 PMD_DRV_LOG(ERR, "Poll cmdq msg time out, ci: %u", ci);
742                 rc = -ETIMEDOUT;
743         }
744
745         /* set this cmd invalid */
746         cmd_info->cmd_type = HINIC_CMD_TYPE_NONE;
747
748         return rc;
749 }
750
751 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
752                                      enum hinic_ack_type ack_type,
753                                      enum hinic_mod_type mod, u8 cmd,
754                                      struct hinic_cmd_buf *buf_in,
755                                      u64 *out_param, u32 timeout)
756 {
757         struct hinic_wq *wq = cmdq->wq;
758         struct hinic_cmdq_wqe *curr_wqe, wqe;
759         struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
760         u16 curr_prod_idx, next_prod_idx, num_wqebbs;
761         int wrapped;
762         u32 timeo, wqe_size;
763         int err;
764
765         wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
766         num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
767
768         /* Keep wrapped and doorbell index correct. */
769         spin_lock(&cmdq->cmdq_lock);
770
771         curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
772         if (!curr_wqe) {
773                 err = -EBUSY;
774                 goto cmdq_unlock;
775         }
776
777         memset(&wqe, 0, sizeof(wqe));
778         wrapped = cmdq->wrapped;
779
780         next_prod_idx = curr_prod_idx + num_wqebbs;
781         if (next_prod_idx >= wq->q_depth) {
782                 cmdq->wrapped = !cmdq->wrapped;
783                 next_prod_idx -= wq->q_depth;
784         }
785
786         cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
787                           wrapped, ack_type, mod, cmd, curr_prod_idx);
788
789         /* The data that is written to HW should be in Big Endian Format */
790         hinic_cpu_to_be32(&wqe, wqe_size);
791
792         /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
793         cmdq_wqe_fill(curr_wqe, &wqe);
794
795         cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_NORMAL;
796
797         cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
798
799         timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
800         err = hinic_cmdq_poll_msg(cmdq, timeo);
801         if (err) {
802                 PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x",
803                         curr_prod_idx);
804                 err = -ETIMEDOUT;
805                 goto cmdq_unlock;
806         }
807
808         rte_smp_rmb();  /* read error code after completion */
809
810         if (out_param) {
811                 wqe_lcmd = &curr_wqe->wqe_lcmd;
812                 *out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp);
813         }
814
815         if (cmdq->errcode[curr_prod_idx] > 1) {
816                 err = cmdq->errcode[curr_prod_idx];
817                 goto cmdq_unlock;
818         }
819
820 cmdq_unlock:
821         spin_unlock(&cmdq->cmdq_lock);
822
823         return err;
824 }
825
826 int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
827                            enum hinic_mod_type mod, u8 cmd,
828                            struct hinic_cmd_buf *buf_in,
829                            u64 *out_param, u32 timeout)
830 {
831         struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
832         int err = cmdq_params_valid(buf_in);
833
834         if (err) {
835                 PMD_DRV_LOG(ERR, "Invalid CMDQ parameters");
836                 return err;
837         }
838
839         err = wait_cmdqs_enable(cmdqs);
840         if (err) {
841                 PMD_DRV_LOG(ERR, "Cmdq is disable");
842                 return err;
843         }
844
845         return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
846                                          ack_type, mod, cmd, buf_in,
847                                          out_param, timeout);
848 }