1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #ifndef __DLB2_HW_TYPES_NEW_H
6 #define __DLB2_HW_TYPES_NEW_H
8 #include "../../dlb2_priv.h"
11 #include "dlb2_osdep_list.h"
12 #include "dlb2_osdep_types.h"
13 #include "dlb2_regs_new.h"
15 #define DLB2_BITS_SET(x, val, mask) (x = ((x) & ~(mask)) \
16 | (((val) << (mask##_LOC)) & (mask)))
17 #define DLB2_BITS_CLR(x, mask) (x &= ~(mask))
18 #define DLB2_BIT_SET(x, mask) ((x) |= (mask))
19 #define DLB2_BITS_GET(x, mask) (((x) & (mask)) >> (mask##_LOC))
21 #define DLB2_MAX_NUM_VDEVS 16
22 #define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2
23 #define DLB2_NUM_ARB_WEIGHTS 8
24 #define DLB2_MAX_NUM_AQED_ENTRIES 2048
25 #define DLB2_MAX_WEIGHT 255
26 #define DLB2_NUM_COS_DOMAINS 4
27 #define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2
28 #define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5
29 #define DLB2_MAX_CQ_COMP_CHECK_LOOPS 409600
30 #define DLB2_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
32 #define DLB2_FUNC_BAR 0
33 #define DLB2_CSR_BAR 2
35 #define PCI_DEVICE_ID_INTEL_DLB2_PF 0x2710
36 #define PCI_DEVICE_ID_INTEL_DLB2_VF 0x2711
38 #define PCI_DEVICE_ID_INTEL_DLB2_5_PF 0x2714
39 #define PCI_DEVICE_ID_INTEL_DLB2_5_VF 0x2715
41 #define DLB2_ALARM_HW_SOURCE_SYS 0
42 #define DLB2_ALARM_HW_SOURCE_DLB 1
44 #define DLB2_ALARM_HW_UNIT_CHP 4
46 #define DLB2_ALARM_SYS_AID_ILLEGAL_QID 3
47 #define DLB2_ALARM_SYS_AID_DISABLED_QID 4
48 #define DLB2_ALARM_SYS_AID_ILLEGAL_HCW 5
49 #define DLB2_ALARM_HW_CHP_AID_ILLEGAL_ENQ 1
50 #define DLB2_ALARM_HW_CHP_AID_EXCESS_TOKEN_POPS 2
53 * Hardware-defined base addresses. Those prefixed 'DLB2_DRV' are only used by
56 #define DLB2_DRV_LDB_PP_BASE 0x2300000
57 #define DLB2_DRV_LDB_PP_STRIDE 0x1000
58 #define DLB2_DRV_LDB_PP_BOUND (DLB2_DRV_LDB_PP_BASE + \
59 DLB2_DRV_LDB_PP_STRIDE * DLB2_MAX_NUM_LDB_PORTS)
60 #define DLB2_DRV_DIR_PP_BASE 0x2200000
61 #define DLB2_DRV_DIR_PP_STRIDE 0x1000
62 #define DLB2_DRV_DIR_PP_BOUND (DLB2_DRV_DIR_PP_BASE + \
63 DLB2_DRV_DIR_PP_STRIDE * DLB2_MAX_NUM_DIR_PORTS)
64 #define DLB2_LDB_PP_BASE 0x2100000
65 #define DLB2_LDB_PP_STRIDE 0x1000
66 #define DLB2_LDB_PP_BOUND (DLB2_LDB_PP_BASE + \
67 DLB2_LDB_PP_STRIDE * DLB2_MAX_NUM_LDB_PORTS)
68 #define DLB2_LDB_PP_OFFS(id) (DLB2_LDB_PP_BASE + (id) * DLB2_PP_SIZE)
69 #define DLB2_DIR_PP_BASE 0x2000000
70 #define DLB2_DIR_PP_STRIDE 0x1000
71 #define DLB2_DIR_PP_BOUND (DLB2_DIR_PP_BASE + \
72 DLB2_DIR_PP_STRIDE * \
73 DLB2_MAX_NUM_DIR_PORTS_V2_5)
74 #define DLB2_DIR_PP_OFFS(id) (DLB2_DIR_PP_BASE + (id) * DLB2_PP_SIZE)
76 struct dlb2_resource_id {
83 struct dlb2_freelist {
89 static inline u32 dlb2_freelist_count(struct dlb2_freelist *list)
91 return list->bound - list->base - list->offset;
117 struct dlb2_ldb_queue {
118 struct dlb2_list_entry domain_list;
119 struct dlb2_list_entry func_list;
120 struct dlb2_resource_id id;
121 struct dlb2_resource_id domain_id;
122 u32 num_qid_inflights;
124 u32 sn_group; /* sn == sequence number */
128 u8 num_pending_additions;
134 * Directed ports and queues are paired by nature, so the driver tracks them
135 * with a single data structure.
137 struct dlb2_dir_pq_pair {
138 struct dlb2_list_entry domain_list;
139 struct dlb2_list_entry func_list;
140 struct dlb2_resource_id id;
141 struct dlb2_resource_id domain_id;
150 enum dlb2_qid_map_state {
151 /* The slot does not contain a valid queue mapping */
153 /* The slot contains a valid queue mapping */
155 /* The driver is mapping a queue into this slot */
156 DLB2_QUEUE_MAP_IN_PROG,
157 /* The driver is unmapping a queue from this slot */
158 DLB2_QUEUE_UNMAP_IN_PROG,
160 * The driver is unmapping a queue from this slot, and once complete
161 * will replace it with another mapping.
163 DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP,
166 struct dlb2_ldb_port_qid_map {
167 enum dlb2_qid_map_state state;
174 struct dlb2_ldb_port {
175 struct dlb2_list_entry domain_list;
176 struct dlb2_list_entry func_list;
177 struct dlb2_resource_id id;
178 struct dlb2_resource_id domain_id;
179 /* The qid_map represents the hardware QID mapping state. */
180 struct dlb2_ldb_port_qid_map qid_map[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
181 u32 hist_list_entry_base;
182 u32 hist_list_entry_limit;
185 u8 num_pending_removals;
192 struct dlb2_sn_group {
194 u32 sequence_numbers_per_queue;
199 static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group)
202 0x0000ffff, /* 64 SNs per queue */
203 0x000000ff, /* 128 SNs per queue */
204 0x0000000f, /* 256 SNs per queue */
205 0x00000003, /* 512 SNs per queue */
206 0x00000001}; /* 1024 SNs per queue */
208 return group->slot_use_bitmap == mask[group->mode];
211 static inline int dlb2_sn_group_alloc_slot(struct dlb2_sn_group *group)
213 const u32 bound[] = {16, 8, 4, 2, 1};
216 for (i = 0; i < bound[group->mode]; i++) {
217 if (!(group->slot_use_bitmap & (1 << i))) {
218 group->slot_use_bitmap |= 1 << i;
227 dlb2_sn_group_free_slot(struct dlb2_sn_group *group, int slot)
229 group->slot_use_bitmap &= ~(1 << slot);
232 static inline int dlb2_sn_group_used_slots(struct dlb2_sn_group *group)
236 for (i = 0; i < 32; i++)
237 cnt += !!(group->slot_use_bitmap & (1 << i));
242 struct dlb2_hw_domain {
243 struct dlb2_function_resources *parent_func;
244 struct dlb2_list_entry func_list;
245 struct dlb2_list_head used_ldb_queues;
246 struct dlb2_list_head used_ldb_ports[DLB2_NUM_COS_DOMAINS];
247 struct dlb2_list_head used_dir_pq_pairs;
248 struct dlb2_list_head avail_ldb_queues;
249 struct dlb2_list_head avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
250 struct dlb2_list_head avail_dir_pq_pairs;
251 u32 total_hist_list_entries;
252 u32 avail_hist_list_entries;
253 u32 hist_list_entry_base;
254 u32 hist_list_entry_offset;
264 u32 num_avail_aqed_entries;
265 u32 num_used_aqed_entries;
266 struct dlb2_resource_id id;
267 int num_pending_removals;
268 int num_pending_additions;
275 struct dlb2_function_resources {
276 struct dlb2_list_head avail_domains;
277 struct dlb2_list_head used_domains;
278 struct dlb2_list_head avail_ldb_queues;
279 struct dlb2_list_head avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
280 struct dlb2_list_head avail_dir_pq_pairs;
281 struct dlb2_bitmap *avail_hist_list_entries;
282 u32 num_avail_domains;
283 u32 num_avail_ldb_queues;
284 u32 num_avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
285 u32 num_avail_dir_pq_pairs;
288 u32 num_avail_qed_entries;
289 u32 num_avail_dqed_entries;
292 u32 num_avail_entries;
295 u32 num_avail_aqed_entries;
296 u8 locked; /* (VDEV only) */
300 * After initialization, each resource in dlb2_hw_resources is located in one
301 * of the following lists:
302 * -- The PF's available resources list. These are unconfigured resources owned
303 * by the PF and not allocated to a dlb2 scheduling domain.
304 * -- A VDEV's available resources list. These are VDEV-owned unconfigured
305 * resources not allocated to a dlb2 scheduling domain.
306 * -- A domain's available resources list. These are domain-owned unconfigured
308 * -- A domain's used resources list. These are domain-owned configured
311 * A resource moves to a new list when a VDEV or domain is created or destroyed,
312 * or when the resource is configured.
314 struct dlb2_hw_resources {
315 struct dlb2_ldb_queue ldb_queues[DLB2_MAX_NUM_LDB_QUEUES];
316 struct dlb2_ldb_port ldb_ports[DLB2_MAX_NUM_LDB_PORTS];
317 struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS_V2_5];
318 struct dlb2_sn_group sn_groups[DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
323 u32 *isr_in_progress;
326 struct dlb2_sw_mbox {
327 struct dlb2_mbox vdev_to_pf;
328 struct dlb2_mbox pf_to_vdev;
329 void (*pf_to_vdev_inject)(void *arg);
330 void *pf_to_vdev_inject_arg;
338 unsigned long csr_phys_addr;
341 unsigned long func_phys_addr;
343 /* Resource tracking */
344 struct dlb2_hw_resources rsrcs;
345 struct dlb2_function_resources pf;
346 struct dlb2_function_resources vdev[DLB2_MAX_NUM_VDEVS];
347 struct dlb2_hw_domain domains[DLB2_MAX_NUM_DOMAINS];
348 u8 cos_reservation[DLB2_NUM_COS_DOMAINS];
352 struct dlb2_sw_mbox mbox[DLB2_MAX_NUM_VDEVS];
353 unsigned int pasid[DLB2_MAX_NUM_VDEVS];
356 #endif /* __DLB2_HW_TYPES_NEW_H */