1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 #include "hinic_compat.h"
6 #include "hinic_pmd_hwdev.h"
7 #include "hinic_pmd_wq.h"
9 static void free_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
11 dma_free_coherent(hwdev, wq->wq_buf_size, (void *)wq->queue_buf_vaddr,
12 (dma_addr_t)wq->queue_buf_paddr);
14 wq->queue_buf_paddr = 0;
15 wq->queue_buf_vaddr = 0;
18 static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
20 dma_addr_t dma_addr = 0;
22 wq->queue_buf_vaddr = (u64)(u64 *)
23 dma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size,
24 &dma_addr, GFP_KERNEL);
25 if (!wq->queue_buf_vaddr) {
26 PMD_DRV_LOG(ERR, "Failed to allocate wq page");
30 if (!ADDR_256K_ALIGNED(dma_addr)) {
31 PMD_DRV_LOG(ERR, "Wqe pages is not 256k aligned!");
32 dma_free_coherent(hwdev, wq->wq_buf_size,
33 (void *)wq->queue_buf_vaddr,
37 wq->queue_buf_paddr = dma_addr;
42 int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
43 u32 wqebb_shift, u16 q_depth)
47 if (q_depth & (q_depth - 1)) {
48 PMD_DRV_LOG(ERR, "WQ q_depth isn't power of 2");
52 wq->wqebb_size = 1 << wqebb_shift;
53 wq->wqebb_shift = wqebb_shift;
54 wq->wq_buf_size = ((u32)q_depth) << wqebb_shift;
55 wq->q_depth = q_depth;
57 if (wq->wq_buf_size > (HINIC_PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) {
58 PMD_DRV_LOG(ERR, "Invalid q_depth %u which one page_size can not hold",
63 err = alloc_wq_pages(hwdev, wq);
65 PMD_DRV_LOG(ERR, "Failed to allocate wq pages");
72 wq->mask = q_depth - 1;
77 void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
79 free_wq_pages(hwdev, wq);
82 void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)
84 wq->cons_idx += num_wqebbs;
85 wq->delta += num_wqebbs;
88 void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)
92 if ((wq->delta + num_wqebbs) > wq->q_depth)
95 curr_cons_idx = (u16)(wq->cons_idx);
97 curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
99 *cons_idx = curr_cons_idx;
101 return WQ_WQE_ADDR(wq, (u32)(*cons_idx));
104 int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev,
105 int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,
108 int i, j, err = -ENOMEM;
110 /* validate q_depth is power of 2 & wqebb_size is not 0 */
111 for (i = 0; i < cmdq_blocks; i++) {
112 wq[i].wqebb_size = 1 << wqebb_shift;
113 wq[i].wqebb_shift = wqebb_shift;
114 wq[i].wq_buf_size = wq_buf_size;
115 wq[i].q_depth = q_depth;
117 err = alloc_wq_pages(hwdev, &wq[i]);
119 PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks");
125 wq[i].delta = q_depth;
127 wq[i].mask = q_depth - 1;
133 for (j = 0; j < i; j++)
134 free_wq_pages(hwdev, &wq[j]);
139 void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
144 for (i = 0; i < cmdq_blocks; i++)
145 free_wq_pages(hwdev, &wq[i]);
148 void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)
153 memset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size);
156 void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)
160 wq->delta -= num_wqebbs;
161 curr_prod_idx = wq->prod_idx;
162 wq->prod_idx += num_wqebbs;
163 *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
165 return WQ_WQE_ADDR(wq, (u32)(*prod_idx));
169 * hinic_set_sge - set dma area in scatter gather entry
170 * @sge: scatter gather entry
172 * @len: length of relevant data in the dma address
174 void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len)
176 sge->hi_addr = upper_32_bits(addr);
177 sge->lo_addr = lower_32_bits(addr);