net/hinic/base: modify VHD type for SDI
[dpdk.git] / drivers / net / hinic / base / hinic_pmd_nicio.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 #include<rte_bus_pci.h>
5
6 #include "hinic_compat.h"
7 #include "hinic_pmd_hwdev.h"
8 #include "hinic_pmd_hwif.h"
9 #include "hinic_pmd_wq.h"
10 #include "hinic_pmd_mgmt.h"
11 #include "hinic_pmd_cmdq.h"
12 #include "hinic_pmd_cfg.h"
13 #include "hinic_pmd_niccfg.h"
14 #include "hinic_pmd_nicio.h"
15
16 #define WQ_PREFETCH_MAX                 6
17 #define WQ_PREFETCH_MIN                 1
18 #define WQ_PREFETCH_THRESHOLD           256
19
20 #define DEFAULT_RX_BUF_SIZE             ((u16)0xB)
21
22 enum {
23         RECYCLE_MODE_NIC = 0x0,
24         RECYCLE_MODE_DPDK = 0x1,
25 };
26
27 /* Queue buffer related define */
28 enum hinic_rx_buf_size {
29         HINIC_RX_BUF_SIZE_32B = 0x20,
30         HINIC_RX_BUF_SIZE_64B = 0x40,
31         HINIC_RX_BUF_SIZE_96B = 0x60,
32         HINIC_RX_BUF_SIZE_128B = 0x80,
33         HINIC_RX_BUF_SIZE_192B = 0xC0,
34         HINIC_RX_BUF_SIZE_256B = 0x100,
35         HINIC_RX_BUF_SIZE_384B = 0x180,
36         HINIC_RX_BUF_SIZE_512B = 0x200,
37         HINIC_RX_BUF_SIZE_768B = 0x300,
38         HINIC_RX_BUF_SIZE_1K = 0x400,
39         HINIC_RX_BUF_SIZE_1_5K = 0x600,
40         HINIC_RX_BUF_SIZE_2K = 0x800,
41         HINIC_RX_BUF_SIZE_3K = 0xC00,
42         HINIC_RX_BUF_SIZE_4K = 0x1000,
43         HINIC_RX_BUF_SIZE_8K = 0x2000,
44         HINIC_RX_BUF_SIZE_16K = 0x4000,
45 };
46
47 const u32 hinic_hw_rx_buf_size[] = {
48         HINIC_RX_BUF_SIZE_32B,
49         HINIC_RX_BUF_SIZE_64B,
50         HINIC_RX_BUF_SIZE_96B,
51         HINIC_RX_BUF_SIZE_128B,
52         HINIC_RX_BUF_SIZE_192B,
53         HINIC_RX_BUF_SIZE_256B,
54         HINIC_RX_BUF_SIZE_384B,
55         HINIC_RX_BUF_SIZE_512B,
56         HINIC_RX_BUF_SIZE_768B,
57         HINIC_RX_BUF_SIZE_1K,
58         HINIC_RX_BUF_SIZE_1_5K,
59         HINIC_RX_BUF_SIZE_2K,
60         HINIC_RX_BUF_SIZE_3K,
61         HINIC_RX_BUF_SIZE_4K,
62         HINIC_RX_BUF_SIZE_8K,
63         HINIC_RX_BUF_SIZE_16K,
64 };
65
66 struct hinic_qp_ctxt_header {
67         u16     num_queues;
68         u16     queue_type;
69         u32     addr_offset;
70 };
71
72 struct hinic_sq_ctxt {
73         u32     ceq_attr;
74
75         u32     ci_owner;
76
77         u32     wq_pfn_hi;
78         u32     wq_pfn_lo;
79
80         u32     pref_cache;
81         u32     pref_owner;
82         u32     pref_wq_pfn_hi_ci;
83         u32     pref_wq_pfn_lo;
84
85         u32     rsvd8;
86         u32     rsvd9;
87
88         u32     wq_block_pfn_hi;
89         u32     wq_block_pfn_lo;
90 };
91
92 struct hinic_rq_ctxt {
93         u32     ceq_attr;
94
95         u32     pi_intr_attr;
96
97         u32     wq_pfn_hi_ci;
98         u32     wq_pfn_lo;
99
100         u32     pref_cache;
101         u32     pref_owner;
102
103         u32     pref_wq_pfn_hi_ci;
104         u32     pref_wq_pfn_lo;
105
106         u32     pi_paddr_hi;
107         u32     pi_paddr_lo;
108
109         u32     wq_block_pfn_hi;
110         u32     wq_block_pfn_lo;
111 };
112
113 struct hinic_sq_ctxt_block {
114         struct hinic_qp_ctxt_header     cmdq_hdr;
115         struct hinic_sq_ctxt            sq_ctxt[HINIC_Q_CTXT_MAX];
116 };
117
118 struct hinic_rq_ctxt_block {
119         struct hinic_qp_ctxt_header     cmdq_hdr;
120         struct hinic_rq_ctxt            rq_ctxt[HINIC_Q_CTXT_MAX];
121 };
122
123 struct hinic_clean_queue_ctxt {
124         struct hinic_qp_ctxt_header     cmdq_hdr;
125         u32                             ctxt_size;
126 };
127
128
129 static void
130 hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
131                              enum hinic_qp_ctxt_type ctxt_type,
132                              u16 num_queues, u16 max_queues, u16 q_id)
133 {
134         qp_ctxt_hdr->queue_type = ctxt_type;
135         qp_ctxt_hdr->num_queues = num_queues;
136
137         if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
138                 qp_ctxt_hdr->addr_offset =
139                                 SQ_CTXT_OFFSET(max_queues, max_queues, q_id);
140         else
141                 qp_ctxt_hdr->addr_offset =
142                                 RQ_CTXT_OFFSET(max_queues, max_queues, q_id);
143
144         qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
145
146         hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
147 }
148
149 static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,
150                            struct hinic_sq_ctxt *sq_ctxt)
151 {
152         struct hinic_wq *wq = sq->wq;
153         u64 wq_page_addr;
154         u64 wq_page_pfn, wq_block_pfn;
155         u32 wq_page_pfn_hi, wq_page_pfn_lo;
156         u32 wq_block_pfn_hi, wq_block_pfn_lo;
157         u16 pi_start, ci_start;
158
159         ci_start = (u16)(wq->cons_idx);
160         pi_start = (u16)(wq->prod_idx);
161
162         /* read the first page from the HW table */
163         wq_page_addr = wq->queue_buf_paddr;
164
165         wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
166         wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
167         wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
168
169         wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
170         wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
171         wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
172
173         /* must config as ceq disabled */
174         sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |
175                                 SQ_CTXT_CEQ_ATTR_SET(0, ARM) |
176                                 SQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID) |
177                                 SQ_CTXT_CEQ_ATTR_SET(0, EN);
178
179         sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |
180                                 SQ_CTXT_CI_SET(1, OWNER);
181
182         sq_ctxt->wq_pfn_hi =
183                         SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
184                         SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
185
186         sq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
187
188         sq_ctxt->pref_cache =
189                 SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
190                 SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
191                 SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
192
193         sq_ctxt->pref_owner = 1;
194
195         sq_ctxt->pref_wq_pfn_hi_ci =
196                 SQ_CTXT_PREF_SET(ci_start, CI) |
197                 SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);
198
199         sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
200
201         sq_ctxt->wq_block_pfn_hi =
202                 SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
203
204         sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
205
206         hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
207 }
208
209 static void hinic_rq_prepare_ctxt(struct hinic_rq *rq,
210                         struct hinic_rq_ctxt *rq_ctxt)
211 {
212         struct hinic_wq *wq = rq->wq;
213         u64 wq_page_addr;
214         u64 wq_page_pfn, wq_block_pfn;
215         u32 wq_page_pfn_hi, wq_page_pfn_lo;
216         u32 wq_block_pfn_hi, wq_block_pfn_lo;
217         u16 pi_start, ci_start;
218
219         ci_start = (u16)(wq->cons_idx);
220         pi_start = (u16)(wq->prod_idx);
221
222         /* read the first page from the HW table */
223         wq_page_addr = wq->queue_buf_paddr;
224
225         wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
226         wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
227         wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
228
229         wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
230         wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
231         wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
232
233         /* must config as ceq enable but do not generate ceq */
234         rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) |
235                             RQ_CTXT_CEQ_ATTR_SET(1, OWNER);
236
237         rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |
238                                 RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR) |
239                                 RQ_CTXT_PI_SET(0, CEQ_ARM);
240
241         rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
242                                 RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
243
244         rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
245
246         rq_ctxt->pref_cache =
247                 RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
248                 RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
249                 RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
250
251         rq_ctxt->pref_owner = 1;
252
253         rq_ctxt->pref_wq_pfn_hi_ci =
254                 RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
255                 RQ_CTXT_PREF_SET(ci_start, CI);
256
257         rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
258
259         rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
260         rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
261
262         rq_ctxt->wq_block_pfn_hi =
263                 RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
264
265         rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
266
267         hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
268 }
269
270 static int init_sq_ctxts(struct hinic_nic_io *nic_io)
271 {
272         struct hinic_hwdev *hwdev = nic_io->hwdev;
273         struct hinic_sq_ctxt_block *sq_ctxt_block;
274         struct hinic_sq_ctxt *sq_ctxt;
275         struct hinic_cmd_buf *cmd_buf;
276         struct hinic_qp *qp;
277         u64 out_param = EIO;
278         u16 q_id, curr_id, global_qpn, max_ctxts, i;
279         int err = 0;
280
281         cmd_buf = hinic_alloc_cmd_buf(hwdev);
282         if (!cmd_buf) {
283                 PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
284                 return -ENOMEM;
285         }
286
287         q_id = 0;
288         /* sq and rq number may not equal */
289         while (q_id < nic_io->num_sqs) {
290                 sq_ctxt_block = cmd_buf->buf;
291                 sq_ctxt = sq_ctxt_block->sq_ctxt;
292
293                 max_ctxts = (nic_io->num_sqs - q_id) > HINIC_Q_CTXT_MAX ?
294                                 HINIC_Q_CTXT_MAX : (nic_io->num_sqs - q_id);
295
296                 hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
297                                              HINIC_QP_CTXT_TYPE_SQ, max_ctxts,
298                                              nic_io->max_qps, q_id);
299
300                 for (i = 0; i < max_ctxts; i++) {
301                         curr_id = q_id + i;
302                         qp = &nic_io->qps[curr_id];
303                         global_qpn = nic_io->global_qpn + curr_id;
304
305                         hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);
306                 }
307
308                 cmd_buf->size = SQ_CTXT_SIZE(max_ctxts);
309
310                 err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
311                                              HINIC_MOD_L2NIC,
312                                              HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
313                                              cmd_buf, &out_param, 0);
314                 if (err || out_param != 0) {
315                         PMD_DRV_LOG(ERR, "Failed to set SQ ctxts, err: %d",
316                                 err);
317                         err = -EIO;
318                         break;
319                 }
320
321                 q_id += max_ctxts;
322         }
323
324         hinic_free_cmd_buf(hwdev, cmd_buf);
325
326         return err;
327 }
328
329 static int init_rq_ctxts(struct hinic_nic_io *nic_io)
330 {
331         struct hinic_hwdev *hwdev = nic_io->hwdev;
332         struct hinic_rq_ctxt_block *rq_ctxt_block;
333         struct hinic_rq_ctxt *rq_ctxt;
334         struct hinic_cmd_buf *cmd_buf;
335         struct hinic_qp *qp;
336         u64 out_param = 0;
337         u16 q_id, curr_id, max_ctxts, i;
338         int err = 0;
339
340         cmd_buf = hinic_alloc_cmd_buf(hwdev);
341         if (!cmd_buf) {
342                 PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
343                 return -ENOMEM;
344         }
345
346         q_id = 0;
347         /* sq and rq number may not equal */
348         while (q_id < nic_io->num_rqs) {
349                 rq_ctxt_block = cmd_buf->buf;
350                 rq_ctxt = rq_ctxt_block->rq_ctxt;
351
352                 max_ctxts = (nic_io->num_rqs - q_id) > HINIC_Q_CTXT_MAX ?
353                                 HINIC_Q_CTXT_MAX : (nic_io->num_rqs - q_id);
354
355                 hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
356                                              HINIC_QP_CTXT_TYPE_RQ, max_ctxts,
357                                              nic_io->max_qps, q_id);
358
359                 for (i = 0; i < max_ctxts; i++) {
360                         curr_id = q_id + i;
361                         qp = &nic_io->qps[curr_id];
362
363                         hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);
364                 }
365
366                 cmd_buf->size = RQ_CTXT_SIZE(max_ctxts);
367
368                 err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
369                                              HINIC_MOD_L2NIC,
370                                              HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
371                                              cmd_buf, &out_param, 0);
372                 if ((err) || out_param != 0) {
373                         PMD_DRV_LOG(ERR, "Failed to set RQ ctxts");
374                         err = -EIO;
375                         break;
376                 }
377
378                 q_id += max_ctxts;
379         }
380
381         hinic_free_cmd_buf(hwdev, cmd_buf);
382
383         return err;
384 }
385
386 static int init_qp_ctxts(struct hinic_nic_io *nic_io)
387 {
388         return (init_sq_ctxts(nic_io) || init_rq_ctxts(nic_io));
389 }
390
391 static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,
392                                     enum hinic_qp_ctxt_type ctxt_type)
393 {
394         struct hinic_hwdev *hwdev = nic_io->hwdev;
395         struct hinic_clean_queue_ctxt *ctxt_block;
396         struct hinic_cmd_buf *cmd_buf;
397         u64 out_param = 0;
398         int err;
399
400         cmd_buf = hinic_alloc_cmd_buf(hwdev);
401         if (!cmd_buf) {
402                 PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
403                 return -ENOMEM;
404         }
405
406         ctxt_block = cmd_buf->buf;
407         ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;
408         ctxt_block->cmdq_hdr.queue_type = ctxt_type;
409         ctxt_block->cmdq_hdr.addr_offset = 0;
410
411         /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
412         ctxt_block->ctxt_size = 0x3;
413
414         hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
415
416         cmd_buf->size = sizeof(*ctxt_block);
417
418         err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
419                                      HINIC_MOD_L2NIC,
420                                      HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
421                                      cmd_buf, &out_param, 0);
422
423         if ((err) || (out_param)) {
424                 PMD_DRV_LOG(ERR, "Failed to clean queue offload ctxts");
425                 err = -EIO;
426         }
427
428         hinic_free_cmd_buf(hwdev, cmd_buf);
429
430         return err;
431 }
432
433 static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)
434 {
435         /* clean LRO/TSO context space */
436         return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||
437                 clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));
438 }
439
440 /**
441  * get_hw_rx_buf_size - translate rx_buf_size into hw_rx_buf_size
442  * @rx_buf_sz: receive buffer size
443  * @return
444  *   hw rx buffer size
445  */
446 static u16 get_hw_rx_buf_size(u32 rx_buf_sz)
447 {
448         u16 num_hw_types = sizeof(hinic_hw_rx_buf_size)
449                            / sizeof(hinic_hw_rx_buf_size[0]);
450         u16 i;
451
452         for (i = 0; i < num_hw_types; i++) {
453                 if (hinic_hw_rx_buf_size[i] == rx_buf_sz)
454                         return i;
455         }
456
457         PMD_DRV_LOG(ERR, "Hw can't support rx buf size of %u", rx_buf_sz);
458
459         return DEFAULT_RX_BUF_SIZE;     /* default 2K */
460 }
461
462 /**
463  * hinic_set_root_ctxt - init root context in NIC
464  * @hwdev: the hardware interface of a nic device
465  * @rq_depth: the depth of receive queue
466  * @sq_depth: the depth of transmit queue
467  * @rx_buf_sz: receive buffer size from app
468  * Return: 0 on success, negative error value otherwise.
469  */
470 static int
471 hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz)
472 {
473         struct hinic_root_ctxt root_ctxt;
474         u16 out_size = sizeof(root_ctxt);
475         int err;
476
477         memset(&root_ctxt, 0, sizeof(root_ctxt));
478         root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
479         root_ctxt.func_idx = hinic_global_func_id(hwdev);
480         root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
481         root_ctxt.set_cmdq_depth = 0;
482         root_ctxt.cmdq_depth = 0;
483         root_ctxt.lro_en = 1;
484         root_ctxt.rq_depth  = (u16)ilog2(rq_depth);
485         root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz);
486         root_ctxt.sq_depth  = (u16)ilog2(sq_depth);
487
488         err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
489                                      HINIC_MGMT_CMD_VAT_SET,
490                                      &root_ctxt, sizeof(root_ctxt),
491                                      &root_ctxt, &out_size, 0);
492         if (err || !out_size || root_ctxt.mgmt_msg_head.status) {
493                 PMD_DRV_LOG(ERR,
494                         "Set root context failed, err: %d, status: 0x%x, out_size: 0x%x",
495                         err, root_ctxt.mgmt_msg_head.status, out_size);
496                 return -EIO;
497         }
498
499         return 0;
500 }
501
502 /**
503  * hinic_clean_root_ctxt - clean root context table in NIC
504  * @hwdev: the hardware interface of a nic device
505  * @return
506  *   0 on success,
507  *   negative error value otherwise.
508  */
509 static int hinic_clean_root_ctxt(void *hwdev)
510 {
511         struct hinic_root_ctxt root_ctxt;
512         u16 out_size = sizeof(root_ctxt);
513         int err;
514
515         memset(&root_ctxt, 0, sizeof(root_ctxt));
516         root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
517         root_ctxt.func_idx = hinic_global_func_id(hwdev);
518         root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
519         root_ctxt.set_cmdq_depth = 0;
520         root_ctxt.cmdq_depth = 0;
521         root_ctxt.lro_en = 0;
522         root_ctxt.rq_depth  = 0;
523         root_ctxt.rx_buf_sz = 0;
524         root_ctxt.sq_depth  = 0;
525
526         err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
527                                      HINIC_MGMT_CMD_VAT_SET,
528                                      &root_ctxt, sizeof(root_ctxt),
529                                      &root_ctxt, &out_size, 0);
530         if (err || !out_size || root_ctxt.mgmt_msg_head.status) {
531                 PMD_DRV_LOG(ERR,
532                         "Clean root context failed, err: %d, status: 0x%x, out_size: 0x%x",
533                         err, root_ctxt.mgmt_msg_head.status, out_size);
534                 return -EIO;
535         }
536
537         return 0;
538 }
539
540 /* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */
541 int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev)
542 {
543         struct hinic_nic_io *nic_io = hwdev->nic_io;
544         struct hinic_sq_attr sq_attr;
545         u16 q_id;
546         int err, rx_buf_sz;
547
548         /* set vat page size to max queue depth page_size */
549         err = hinic_set_pagesize(hwdev, HINIC_PAGE_SIZE_DPDK);
550         if (err != HINIC_OK) {
551                 PMD_DRV_LOG(ERR, "Set vat page size: %d failed, rc: %d",
552                         HINIC_PAGE_SIZE_DPDK, err);
553                 return err;
554         }
555
556         if (hwdev->cmdqs->status & HINIC_CMDQ_SET_FAIL) {
557                 err = hinic_reinit_cmdq_ctxts(hwdev);
558                 if (err) {
559                         PMD_DRV_LOG(ERR, "Reinit cmdq context failed when dev start, err: %d",
560                                 err);
561                         return err;
562                 }
563         }
564
565         err = init_qp_ctxts(nic_io);
566         if (err) {
567                 PMD_DRV_LOG(ERR, "Init QP ctxts failed, rc: %d", err);
568                 return err;
569         }
570
571         /* clean LRO/TSO context space */
572         err = clean_qp_offload_ctxt(nic_io);
573         if (err) {
574                 PMD_DRV_LOG(ERR, "Clean qp offload ctxts failed, rc: %d", err);
575                 return err;
576         }
577
578         rx_buf_sz = nic_io->rq_buf_size;
579
580         /* update rx buf size to function table */
581         err = hinic_set_rx_vhd_mode(hwdev, HINIC_VHD_TYPE_0B, rx_buf_sz);
582         if (err) {
583                 PMD_DRV_LOG(ERR, "Set rx vhd mode failed, rc: %d", err);
584                 return err;
585         }
586
587         err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,
588                                   nic_io->sq_depth, rx_buf_sz);
589         if (err) {
590                 PMD_DRV_LOG(ERR, "Set root context failed, rc: %d", err);
591                 return err;
592         }
593
594         for (q_id = 0; q_id < nic_io->num_sqs; q_id++) {
595                 sq_attr.ci_dma_base =
596                         HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;
597                 /* performance: sq ci update threshold as 8 */
598                 sq_attr.pending_limit = 1;
599                 sq_attr.coalescing_time = 1;
600                 sq_attr.intr_en = 0;
601                 sq_attr.l2nic_sqn = q_id;
602                 sq_attr.dma_attr_off = 0;
603                 err = hinic_set_ci_table(hwdev, q_id, &sq_attr);
604                 if (err) {
605                         PMD_DRV_LOG(ERR, "Set ci table failed, rc: %d", err);
606                         goto set_cons_idx_table_err;
607                 }
608         }
609
610         return 0;
611
612 set_cons_idx_table_err:
613         (void)hinic_clean_root_ctxt(hwdev);
614         return err;
615 }
616
617 void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev)
618 {
619         int err;
620
621         err = hinic_clean_root_ctxt(hwdev);
622         if (err)
623                 PMD_DRV_LOG(ERR, "Failed to clean root ctxt");
624 }
625
626 static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev)
627 {
628         struct hinic_nic_io *nic_io = hwdev->nic_io;
629         u16 global_qpn, rx_buf_sz;
630         int err;
631
632         err = hinic_get_base_qpn(hwdev, &global_qpn);
633         if (err) {
634                 PMD_DRV_LOG(ERR, "Failed to get base qpn");
635                 goto err_init_nic_hwdev;
636         }
637
638         nic_io->global_qpn = global_qpn;
639         rx_buf_sz = HINIC_IS_VF(hwdev) ? RX_BUF_LEN_1_5K : RX_BUF_LEN_16K;
640         err = hinic_init_function_table(hwdev, rx_buf_sz);
641         if (err) {
642                 PMD_DRV_LOG(ERR, "Failed to init function table");
643                 goto err_init_nic_hwdev;
644         }
645
646         err = hinic_vf_func_init(hwdev);
647         if (err) {
648                 PMD_DRV_LOG(ERR, "Failed to init nic mbox");
649                 goto err_init_nic_hwdev;
650         }
651
652         err = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK);
653         if (err) {
654                 PMD_DRV_LOG(ERR, "Failed to set fast recycle mode");
655                 goto err_init_nic_hwdev;
656         }
657
658         return 0;
659
660 err_init_nic_hwdev:
661         return err;
662 }
663
664 static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev)
665 {
666         hinic_vf_func_free(hwdev);
667         hwdev->nic_io = NULL;
668 }
669
670 int hinic_rx_tx_flush(struct hinic_hwdev *hwdev)
671 {
672         return hinic_func_rx_tx_flush(hwdev);
673 }
674
675 int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
676 {
677         struct hinic_nic_io *nic_io = hwdev->nic_io;
678         struct hinic_wq *wq = &nic_io->sq_wq[q_id];
679
680         return (wq->delta) - 1;
681 }
682
683 int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
684 {
685         struct hinic_nic_io *nic_io = hwdev->nic_io;
686         struct hinic_wq *wq = &nic_io->rq_wq[q_id];
687
688         return (wq->delta) - 1;
689 }
690
691 u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
692 {
693         struct hinic_nic_io *nic_io = hwdev->nic_io;
694         struct hinic_wq *wq = &nic_io->sq_wq[q_id];
695
696         return (wq->cons_idx) & wq->mask;
697 }
698
699 void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,
700                          int num_wqebbs, u16 owner)
701 {
702         struct hinic_nic_io *nic_io = hwdev->nic_io;
703         struct hinic_sq *sq = &nic_io->qps[q_id].sq;
704
705         if (owner != sq->owner)
706                 sq->owner = owner;
707
708         sq->wq->delta += num_wqebbs;
709         sq->wq->prod_idx -= num_wqebbs;
710 }
711
712 void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev,
713                               u16 q_id, int wqebb_cnt)
714 {
715         struct hinic_nic_io *nic_io = hwdev->nic_io;
716         struct hinic_sq *sq = &nic_io->qps[q_id].sq;
717
718         hinic_put_wqe(sq->wq, wqebb_cnt);
719 }
720
721 void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi)
722 {
723         struct hinic_nic_io *nic_io = hwdev->nic_io;
724         struct hinic_rq *rq = &nic_io->qps[q_id].rq;
725
726         return hinic_get_wqe(rq->wq, 1, pi);
727 }
728
729 void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs)
730 {
731         struct hinic_nic_io *nic_io = hwdev->nic_io;
732         struct hinic_rq *rq = &nic_io->qps[q_id].rq;
733
734         rq->wq->delta += num_wqebbs;
735         rq->wq->prod_idx -= num_wqebbs;
736 }
737
738 u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
739 {
740         struct hinic_nic_io *nic_io = hwdev->nic_io;
741         struct hinic_wq *wq = &nic_io->rq_wq[q_id];
742
743         return (wq->cons_idx) & wq->mask;
744 }
745
746 void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt)
747 {
748         struct hinic_nic_io *nic_io = hwdev->nic_io;
749         struct hinic_rq *rq = &nic_io->qps[q_id].rq;
750
751         hinic_put_wqe(rq->wq, wqe_cnt);
752 }
753
754 static int hinic_alloc_nicio(struct hinic_hwdev *hwdev)
755 {
756         struct hinic_nic_io *nic_io = hwdev->nic_io;
757         struct rte_pci_device *pdev = hwdev->pcidev_hdl;
758         u16 max_qps, num_qp;
759         int err;
760
761         max_qps = hinic_func_max_qnum(hwdev);
762         if ((max_qps & (max_qps - 1))) {
763                 PMD_DRV_LOG(ERR, "Wrong number of max_qps: %d",
764                         max_qps);
765                 return -EINVAL;
766         }
767
768         nic_io->max_qps = max_qps;
769         nic_io->num_qps = max_qps;
770         num_qp = max_qps;
771
772         nic_io->qps = kzalloc_aligned(num_qp * sizeof(*nic_io->qps),
773                                       GFP_KERNEL);
774         if (!nic_io->qps) {
775                 PMD_DRV_LOG(ERR, "Failed to allocate qps");
776                 err = -ENOMEM;
777                 goto alloc_qps_err;
778         }
779
780         nic_io->ci_vaddr_base = dma_zalloc_coherent(hwdev,
781                                     CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
782                                     &nic_io->ci_dma_base,
783                                     pdev->device.numa_node);
784         if (!nic_io->ci_vaddr_base) {
785                 PMD_DRV_LOG(ERR, "Failed to allocate ci area");
786                 err = -ENOMEM;
787                 goto ci_base_err;
788         }
789
790         nic_io->sq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->sq_wq),
791                                         GFP_KERNEL);
792         if (!nic_io->sq_wq) {
793                 PMD_DRV_LOG(ERR, "Failed to allocate sq wq array");
794                 err = -ENOMEM;
795                 goto sq_wq_err;
796         }
797
798         nic_io->rq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->rq_wq),
799                                         GFP_KERNEL);
800         if (!nic_io->rq_wq) {
801                 PMD_DRV_LOG(ERR, "Failed to allocate rq wq array");
802                 err = -ENOMEM;
803                 goto rq_wq_err;
804         }
805
806         return HINIC_OK;
807
808 rq_wq_err:
809         kfree(nic_io->sq_wq);
810
811 sq_wq_err:
812         dma_free_coherent(hwdev, CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
813                           nic_io->ci_vaddr_base, nic_io->ci_dma_base);
814
815 ci_base_err:
816         kfree(nic_io->qps);
817
818 alloc_qps_err:
819         return err;
820 }
821
822 static void hinic_free_nicio(struct hinic_hwdev *hwdev)
823 {
824         struct hinic_nic_io *nic_io = hwdev->nic_io;
825
826         /* nic_io->rq_wq */
827         kfree(nic_io->rq_wq);
828
829         /* nic_io->sq_wq */
830         kfree(nic_io->sq_wq);
831
832         /* nic_io->ci_vaddr_base */
833         dma_free_coherent(hwdev,
834                           CI_TABLE_SIZE(nic_io->max_qps, HINIC_PAGE_SIZE),
835                           nic_io->ci_vaddr_base, nic_io->ci_dma_base);
836
837         /* nic_io->qps */
838         kfree(nic_io->qps);
839 }
840
841 /* alloc nic hwdev and init function table */
842 int hinic_init_nicio(struct hinic_hwdev *hwdev)
843 {
844         int rc;
845
846         hwdev->nic_io = rte_zmalloc("hinic_nicio", sizeof(*hwdev->nic_io),
847                                       RTE_CACHE_LINE_SIZE);
848         if (!hwdev->nic_io) {
849                 PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
850                             hwdev->pcidev_hdl->name);
851                 return -ENOMEM;
852         }
853         hwdev->nic_io->hwdev = hwdev;
854
855         /* alloc root working queue set */
856         rc = hinic_alloc_nicio(hwdev);
857         if (rc) {
858                 PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
859                             hwdev->pcidev_hdl->name);
860                 goto allc_nicio_fail;
861         }
862
863         rc = hinic_init_nic_hwdev(hwdev);
864         if (rc) {
865                 PMD_DRV_LOG(ERR, "Initialize hwdev failed, dev_name: %s",
866                             hwdev->pcidev_hdl->name);
867                 goto init_nic_hwdev_fail;
868         }
869
870         return 0;
871
872 init_nic_hwdev_fail:
873         hinic_free_nicio(hwdev);
874
875 allc_nicio_fail:
876         rte_free(hwdev->nic_io);
877         return rc;
878 }
879
880 void hinic_deinit_nicio(struct hinic_hwdev *hwdev)
881 {
882         hinic_free_nicio(hwdev);
883
884         hinic_free_nic_hwdev(hwdev);
885
886         rte_free(hwdev->nic_io);
887         hwdev->nic_io = NULL;
888 }
889
890 /**
891  * hinic_convert_rx_buf_size - convert rx buffer size to hw size
892  * @rx_buf_sz: receive buffer size of mbuf
893  * @match_sz: receive buffer size of hardware
894  * @return
895  *   0 on success,
896  *   negative error value otherwise.
897  */
898 int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz)
899 {
900         u32 i, num_hw_types, best_match_sz;
901
902         if (unlikely(!match_sz || rx_buf_sz < HINIC_RX_BUF_SIZE_32B))
903                 return -EINVAL;
904
905         if (rx_buf_sz >= HINIC_RX_BUF_SIZE_16K) {
906                 best_match_sz =  HINIC_RX_BUF_SIZE_16K;
907                 goto size_matched;
908         }
909
910         num_hw_types = sizeof(hinic_hw_rx_buf_size) /
911                 sizeof(hinic_hw_rx_buf_size[0]);
912         best_match_sz = hinic_hw_rx_buf_size[0];
913         for (i = 0; i < num_hw_types; i++) {
914                 if (rx_buf_sz == hinic_hw_rx_buf_size[i]) {
915                         best_match_sz = hinic_hw_rx_buf_size[i];
916                         break;
917                 } else if (rx_buf_sz < hinic_hw_rx_buf_size[i]) {
918                         break;
919                 }
920                 best_match_sz = hinic_hw_rx_buf_size[i];
921         }
922
923 size_matched:
924         *match_sz = best_match_sz;
925
926         return 0;
927 }