53ecc225c8f1dab787111f1a39b424fd5c0892e3
[dpdk.git] / drivers / net / hinic / base / hinic_pmd_wq.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #ifndef _HINIC_PMD_WQ_H_
6 #define _HINIC_PMD_WQ_H_
7
8 #define WQS_BLOCKS_PER_PAGE             4
9
10 #define WQ_SIZE(wq)             (u32)((u64)(wq)->q_depth * (wq)->wqebb_size)
11
12 #define WQE_PAGE_NUM(wq, idx)   (((idx) >> ((wq)->wqebbs_per_page_shift)) & \
13                                 ((wq)->num_q_pages - 1))
14
15 #define WQE_PAGE_OFF(wq, idx)   ((u64)((wq)->wqebb_size) * \
16                                 ((idx) & ((wq)->num_wqebbs_per_page - 1)))
17
18 #define WQ_PAGE_ADDR_SIZE               sizeof(u64)
19 #define WQ_PAGE_ADDR_SIZE_SHIFT         3
20 #define WQ_PAGE_ADDR(wq, idx)           \
21                 (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \
22                 (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT)))
23
24 #define WQ_BLOCK_SIZE           4096UL
25 #define WQS_PAGE_SIZE           (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
26 #define WQ_MAX_PAGES            (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)
27
28 #define CMDQ_BLOCKS_PER_PAGE            8
29 #define CMDQ_BLOCK_SIZE                 512UL
30 #define CMDQ_PAGE_SIZE                  ALIGN((CMDQ_BLOCKS_PER_PAGE * \
31                                                 CMDQ_BLOCK_SIZE), PAGE_SIZE)
32
33 #define ADDR_4K_ALIGNED(addr)           (0 == ((addr) & 0xfff))
34 #define ADDR_256K_ALIGNED(addr)         (0 == ((addr) & 0x3ffff))
35
36 #define WQ_BASE_VADDR(wqs, wq)          \
37                 (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \
38                                 + (wq)->block_idx * WQ_BLOCK_SIZE)
39
40 #define WQ_BASE_PADDR(wqs, wq)  (((wqs)->page_paddr[(wq)->page_idx]) \
41                                 + (u64)(wq)->block_idx * WQ_BLOCK_SIZE)
42
43 #define WQ_BASE_ADDR(wqs, wq)           \
44                 (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \
45                                 + (wq)->block_idx * WQ_BLOCK_SIZE)
46
47 #define CMDQ_BASE_VADDR(cmdq_pages, wq) \
48                         (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \
49                                 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
50
51 #define CMDQ_BASE_PADDR(cmdq_pages, wq) \
52                         (((u64)((cmdq_pages)->cmdq_page_paddr)) \
53                                 + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE)
54
55 #define CMDQ_BASE_ADDR(cmdq_pages, wq)  \
56                         (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \
57                                 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
58
59 #define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
60
61 #define WQE_SHADOW_PAGE(wq, wqe)        \
62                 (u16)(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
63                 / (wq)->max_wqe_size)
64
65 #define WQE_IN_RANGE(wqe, start, end)   \
66                 (((unsigned long)(wqe) >= (unsigned long)(start)) && \
67                 ((unsigned long)(wqe) < (unsigned long)(end)))
68
69 #define WQ_NUM_PAGES(num_wqs)   \
70         (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)
71
72 #define WQ_WQE_ADDR(wq, idx) ((void *)((u64)((wq)->queue_buf_vaddr) + \
73                               ((idx) << (wq)->wqebb_shift)))
74
75 #define WQ_PAGE_PFN_SHIFT                       12
76 #define WQ_BLOCK_PFN_SHIFT                      9
77
78 #define WQ_PAGE_PFN(page_addr)          ((page_addr) >> WQ_PAGE_PFN_SHIFT)
79 #define WQ_BLOCK_PFN(page_addr)         ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
80
81
82 #define HINIC_SQ_WQEBB_SIZE     64
83 #define HINIC_RQ_WQE_SIZE       32
84 #define HINIC_SQ_WQEBB_SHIFT    6
85 #define HINIC_RQ_WQEBB_SHIFT    5
86
87 struct hinic_sge {
88         u32             hi_addr;
89         u32             lo_addr;
90         u32             len;
91 };
92
93 /* Working Queue */
94 struct hinic_wq {
95         /* The addresses are 64 bit in the HW */
96         u64     queue_buf_vaddr;
97
98         u16             q_depth;
99         u16             mask;
100         u32             delta;
101
102         u32             cons_idx;
103         u32             prod_idx;
104
105         u64     queue_buf_paddr;
106
107         u32             wqebb_size;
108         u32             wqebb_shift;
109
110         u32             wq_buf_size;
111
112         u32             rsvd[5];
113 };
114
115 void hinic_wq_wqe_pg_clear(struct hinic_wq *wq);
116
117 int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev,
118                      int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,
119                      u16 q_depth);
120
121 void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
122                      int cmdq_blocks);
123
124 int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
125                       u32 wqebb_shift, u16 q_depth);
126
127 void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq);
128
129 void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx);
130
131 void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs);
132
133 void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx);
134
135 void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len);
136
137 #endif /* _HINIC_PMD_WQ_H_ */