event/dlb2: add v2.5 create LDB port
[dpdk.git] / drivers / event / dlb2 / pf / base / dlb2_resource_new.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #define DLB2_USE_NEW_HEADERS /* TEMPORARY FOR MERGE */
6
7 #include "dlb2_user.h"
8
9 #include "dlb2_hw_types_new.h"
10 #include "dlb2_osdep.h"
11 #include "dlb2_osdep_bitmap.h"
12 #include "dlb2_osdep_types.h"
13 #include "dlb2_regs_new.h"
14 #include "dlb2_resource_new.h" /* TEMP FOR UPSTREAMPATCHES */
15
16 #include "../../dlb2_priv.h"
17 #include "../../dlb2_inline_fns.h"
18
19 #define DLB2_DOM_LIST_HEAD(head, type) \
20         DLB2_LIST_HEAD((head), type, domain_list)
21
22 #define DLB2_FUNC_LIST_HEAD(head, type) \
23         DLB2_LIST_HEAD((head), type, func_list)
24
25 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
26         DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
27
28 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
29         DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
30
31 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
32         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
33
34 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
35         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
36
37 /*
38  * The PF driver cannot assume that a register write will affect subsequent HCW
39  * writes. To ensure a write completes, the driver must read back a CSR. This
40  * function only need be called for configuration that can occur after the
41  * domain has started; prior to starting, applications can't send HCWs.
42  */
43 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
44 {
45         DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
46 }
47
48 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
49 {
50         int i;
51
52         dlb2_list_init_head(&domain->used_ldb_queues);
53         dlb2_list_init_head(&domain->used_dir_pq_pairs);
54         dlb2_list_init_head(&domain->avail_ldb_queues);
55         dlb2_list_init_head(&domain->avail_dir_pq_pairs);
56
57         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
58                 dlb2_list_init_head(&domain->used_ldb_ports[i]);
59         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
60                 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
61 }
62
63 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
64 {
65         int i;
66         dlb2_list_init_head(&rsrc->avail_domains);
67         dlb2_list_init_head(&rsrc->used_domains);
68         dlb2_list_init_head(&rsrc->avail_ldb_queues);
69         dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
70
71         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
72                 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
73 }
74
75 /**
76  * dlb2_resource_free() - free device state memory
77  * @hw: dlb2_hw handle for a particular device.
78  *
79  * This function frees software state pointed to by dlb2_hw. This function
80  * should be called when resetting the device or unloading the driver.
81  */
82 void dlb2_resource_free(struct dlb2_hw *hw)
83 {
84         int i;
85
86         if (hw->pf.avail_hist_list_entries)
87                 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
88
89         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
90                 if (hw->vdev[i].avail_hist_list_entries)
91                         dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
92         }
93 }
94
95 /**
96  * dlb2_resource_init() - initialize the device
97  * @hw: pointer to struct dlb2_hw.
98  * @ver: device version.
99  *
100  * This function initializes the device's software state (pointed to by the hw
101  * argument) and programs global scheduling QoS registers. This function should
102  * be called during driver initialization, and the dlb2_hw structure should
103  * be zero-initialized before calling the function.
104  *
105  * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
106  * device is reset.
107  *
108  * Return:
109  * Returns 0 upon success, <0 otherwise.
110  */
111 int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
112 {
113         struct dlb2_list_entry *list;
114         unsigned int i;
115         int ret;
116
117         /*
118          * For optimal load-balancing, ports that map to one or more QIDs in
119          * common should not be in numerical sequence. The port->QID mapping is
120          * application dependent, but the driver interleaves port IDs as much
121          * as possible to reduce the likelihood of sequential ports mapping to
122          * the same QID(s). This initial allocation of port IDs maximizes the
123          * average distance between an ID and its immediate neighbors (i.e.
124          * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
125          * 3, etc.).
126          */
127         const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
128                 0,  7,  14,  5, 12,  3, 10,  1,  8, 15,  6, 13,  4, 11,  2,  9,
129                 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
130                 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
131                 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
132         };
133
134         hw->ver = ver;
135
136         dlb2_init_fn_rsrc_lists(&hw->pf);
137
138         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
139                 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
140
141         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
142                 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
143                 hw->domains[i].parent_func = &hw->pf;
144         }
145
146         /* Give all resources to the PF driver */
147         hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
148         for (i = 0; i < hw->pf.num_avail_domains; i++) {
149                 list = &hw->domains[i].func_list;
150
151                 dlb2_list_add(&hw->pf.avail_domains, list);
152         }
153
154         hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
155         for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
156                 list = &hw->rsrcs.ldb_queues[i].func_list;
157
158                 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
159         }
160
161         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
162                 hw->pf.num_avail_ldb_ports[i] =
163                         DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
164
165         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
166                 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
167                 struct dlb2_ldb_port *port;
168
169                 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
170
171                 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
172                               &port->func_list);
173         }
174
175         hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
176         for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
177                 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
178
179                 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
180         }
181
182         if (hw->ver == DLB2_HW_V2) {
183                 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
184                 hw->pf.num_avail_dqed_entries =
185                         DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
186         } else {
187                 hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
188         }
189
190         hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
191
192         ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
193                                 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
194         if (ret)
195                 goto unwind;
196
197         ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
198         if (ret)
199                 goto unwind;
200
201         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
202                 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
203                                         DLB2_MAX_NUM_HIST_LIST_ENTRIES);
204                 if (ret)
205                         goto unwind;
206
207                 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
208                 if (ret)
209                         goto unwind;
210         }
211
212         /* Initialize the hardware resource IDs */
213         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
214                 hw->domains[i].id.phys_id = i;
215                 hw->domains[i].id.vdev_owned = false;
216         }
217
218         for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
219                 hw->rsrcs.ldb_queues[i].id.phys_id = i;
220                 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
221         }
222
223         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
224                 hw->rsrcs.ldb_ports[i].id.phys_id = i;
225                 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
226         }
227
228         for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
229                 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
230                 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
231         }
232
233         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
234                 hw->rsrcs.sn_groups[i].id = i;
235                 /* Default mode (0) is 64 sequence numbers per queue */
236                 hw->rsrcs.sn_groups[i].mode = 0;
237                 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
238                 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
239         }
240
241         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
242                 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
243
244         return 0;
245
246 unwind:
247         dlb2_resource_free(hw);
248
249         return ret;
250 }
251
252 /**
253  * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
254  * @hw: dlb2_hw handle for a particular device.
255  * @ver: device version.
256  *
257  * Clearing the PMCSR must be done at initialization to make the device fully
258  * operational.
259  */
260 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
261 {
262         u32 pmcsr_dis;
263
264         pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
265
266         DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
267
268         DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
269 }
270
271 /**
272  * dlb2_hw_get_num_resources() - query the PCI function's available resources
273  * @hw: dlb2_hw handle for a particular device.
274  * @arg: pointer to resource counts.
275  * @vdev_req: indicates whether this request came from a vdev.
276  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
277  *
278  * This function returns the number of available resources for the PF or for a
279  * VF.
280  *
281  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
282  * device.
283  *
284  * Return:
285  * Returns 0 upon success, -EINVAL if vdev_req is true and vdev_id is
286  * invalid.
287  */
288 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
289                               struct dlb2_get_num_resources_args *arg,
290                               bool vdev_req,
291                               unsigned int vdev_id)
292 {
293         struct dlb2_function_resources *rsrcs;
294         struct dlb2_bitmap *map;
295         int i;
296
297         if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
298                 return -EINVAL;
299
300         if (vdev_req)
301                 rsrcs = &hw->vdev[vdev_id];
302         else
303                 rsrcs = &hw->pf;
304
305         arg->num_sched_domains = rsrcs->num_avail_domains;
306
307         arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
308
309         arg->num_ldb_ports = 0;
310         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
311                 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
312
313         arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
314         arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
315         arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
316         arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
317
318         arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
319
320         arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
321
322         map = rsrcs->avail_hist_list_entries;
323
324         arg->num_hist_list_entries = dlb2_bitmap_count(map);
325
326         arg->max_contiguous_hist_list_entries =
327                 dlb2_bitmap_longest_set_range(map);
328
329         if (hw->ver == DLB2_HW_V2) {
330                 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
331                 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
332         } else {
333                 arg->num_credits = rsrcs->num_avail_entries;
334         }
335         return 0;
336 }
337
338 static void dlb2_configure_domain_credits_v2_5(struct dlb2_hw *hw,
339                                                struct dlb2_hw_domain *domain)
340 {
341         u32 reg = 0;
342
343         DLB2_BITS_SET(reg, domain->num_credits, DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
344         DLB2_CSR_WR(hw, DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id), reg);
345 }
346
347 static void dlb2_configure_domain_credits_v2(struct dlb2_hw *hw,
348                                              struct dlb2_hw_domain *domain)
349 {
350         u32 reg = 0;
351
352         DLB2_BITS_SET(reg, domain->num_ldb_credits,
353                       DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
354         DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), reg);
355
356         reg = 0;
357         DLB2_BITS_SET(reg, domain->num_dir_credits,
358                       DLB2_CHP_CFG_DIR_VAS_CRD_COUNT);
359         DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), reg);
360 }
361
362 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
363                                           struct dlb2_hw_domain *domain)
364 {
365         if (hw->ver == DLB2_HW_V2)
366                 dlb2_configure_domain_credits_v2(hw, domain);
367         else
368                 dlb2_configure_domain_credits_v2_5(hw, domain);
369 }
370
371 static int dlb2_attach_credits(struct dlb2_function_resources *rsrcs,
372                                struct dlb2_hw_domain *domain,
373                                u32 num_credits,
374                                struct dlb2_cmd_response *resp)
375 {
376         if (rsrcs->num_avail_entries < num_credits) {
377                 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
378                 return -EINVAL;
379         }
380
381         rsrcs->num_avail_entries -= num_credits;
382         domain->num_credits += num_credits;
383         return 0;
384 }
385
386 static struct dlb2_ldb_port *
387 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
388                        struct dlb2_function_resources *rsrcs,
389                        u32 domain_id,
390                        u32 cos_id)
391 {
392         struct dlb2_list_entry *iter;
393         struct dlb2_ldb_port *port;
394         RTE_SET_USED(iter);
395
396         /*
397          * To reduce the odds of consecutive load-balanced ports mapping to the
398          * same queue(s), the driver attempts to allocate ports whose neighbors
399          * are owned by a different domain.
400          */
401         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
402                 u32 next, prev;
403                 u32 phys_id;
404
405                 phys_id = port->id.phys_id;
406                 next = phys_id + 1;
407                 prev = phys_id - 1;
408
409                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
410                         next = 0;
411                 if (phys_id == 0)
412                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
413
414                 if (!hw->rsrcs.ldb_ports[next].owned ||
415                     hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
416                         continue;
417
418                 if (!hw->rsrcs.ldb_ports[prev].owned ||
419                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
420                         continue;
421
422                 return port;
423         }
424
425         /*
426          * Failing that, the driver looks for a port with one neighbor owned by
427          * a different domain and the other unallocated.
428          */
429         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
430                 u32 next, prev;
431                 u32 phys_id;
432
433                 phys_id = port->id.phys_id;
434                 next = phys_id + 1;
435                 prev = phys_id - 1;
436
437                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
438                         next = 0;
439                 if (phys_id == 0)
440                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
441
442                 if (!hw->rsrcs.ldb_ports[prev].owned &&
443                     hw->rsrcs.ldb_ports[next].owned &&
444                     hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
445                         return port;
446
447                 if (!hw->rsrcs.ldb_ports[next].owned &&
448                     hw->rsrcs.ldb_ports[prev].owned &&
449                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
450                         return port;
451         }
452
453         /*
454          * Failing that, the driver looks for a port with both neighbors
455          * unallocated.
456          */
457         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
458                 u32 next, prev;
459                 u32 phys_id;
460
461                 phys_id = port->id.phys_id;
462                 next = phys_id + 1;
463                 prev = phys_id - 1;
464
465                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
466                         next = 0;
467                 if (phys_id == 0)
468                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
469
470                 if (!hw->rsrcs.ldb_ports[prev].owned &&
471                     !hw->rsrcs.ldb_ports[next].owned)
472                         return port;
473         }
474
475         /* If all else fails, the driver returns the next available port. */
476         return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
477                                    typeof(*port));
478 }
479
480 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
481                                    struct dlb2_function_resources *rsrcs,
482                                    struct dlb2_hw_domain *domain,
483                                    u32 num_ports,
484                                    u32 cos_id,
485                                    struct dlb2_cmd_response *resp)
486 {
487         unsigned int i;
488
489         if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
490                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
491                 return -EINVAL;
492         }
493
494         for (i = 0; i < num_ports; i++) {
495                 struct dlb2_ldb_port *port;
496
497                 port = dlb2_get_next_ldb_port(hw, rsrcs,
498                                               domain->id.phys_id, cos_id);
499                 if (port == NULL) {
500                         DLB2_HW_ERR(hw,
501                                     "[%s()] Internal error: domain validation failed\n",
502                                     __func__);
503                         return -EFAULT;
504                 }
505
506                 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
507                               &port->func_list);
508
509                 port->domain_id = domain->id;
510                 port->owned = true;
511
512                 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
513                               &port->domain_list);
514         }
515
516         rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
517
518         return 0;
519 }
520
521
522 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
523                                  struct dlb2_function_resources *rsrcs,
524                                  struct dlb2_hw_domain *domain,
525                                  struct dlb2_create_sched_domain_args *args,
526                                  struct dlb2_cmd_response *resp)
527 {
528         unsigned int i, j;
529         int ret;
530
531         if (args->cos_strict) {
532                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
533                         u32 num = args->num_cos_ldb_ports[i];
534
535                         /* Allocate ports from specific classes-of-service */
536                         ret = __dlb2_attach_ldb_ports(hw,
537                                                       rsrcs,
538                                                       domain,
539                                                       num,
540                                                       i,
541                                                       resp);
542                         if (ret)
543                                 return ret;
544                 }
545         } else {
546                 unsigned int k;
547                 u32 cos_id;
548
549                 /*
550                  * Attempt to allocate from specific class-of-service, but
551                  * fallback to the other classes if that fails.
552                  */
553                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
554                         for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
555                                 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
556                                         cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
557
558                                         ret = __dlb2_attach_ldb_ports(hw,
559                                                                       rsrcs,
560                                                                       domain,
561                                                                       1,
562                                                                       cos_id,
563                                                                       resp);
564                                         if (ret == 0)
565                                                 break;
566                                 }
567
568                                 if (ret)
569                                         return ret;
570                         }
571                 }
572         }
573
574         /* Allocate num_ldb_ports from any class-of-service */
575         for (i = 0; i < args->num_ldb_ports; i++) {
576                 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
577                         ret = __dlb2_attach_ldb_ports(hw,
578                                                       rsrcs,
579                                                       domain,
580                                                       1,
581                                                       j,
582                                                       resp);
583                         if (ret == 0)
584                                 break;
585                 }
586
587                 if (ret)
588                         return ret;
589         }
590
591         return 0;
592 }
593
594 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
595                                  struct dlb2_function_resources *rsrcs,
596                                  struct dlb2_hw_domain *domain,
597                                  u32 num_ports,
598                                  struct dlb2_cmd_response *resp)
599 {
600         unsigned int i;
601
602         if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
603                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
604                 return -EINVAL;
605         }
606
607         for (i = 0; i < num_ports; i++) {
608                 struct dlb2_dir_pq_pair *port;
609
610                 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
611                                            typeof(*port));
612                 if (port == NULL) {
613                         DLB2_HW_ERR(hw,
614                                     "[%s()] Internal error: domain validation failed\n",
615                                     __func__);
616                         return -EFAULT;
617                 }
618
619                 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
620
621                 port->domain_id = domain->id;
622                 port->owned = true;
623
624                 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
625         }
626
627         rsrcs->num_avail_dir_pq_pairs -= num_ports;
628
629         return 0;
630 }
631
632 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
633                                    struct dlb2_hw_domain *domain,
634                                    u32 num_credits,
635                                    struct dlb2_cmd_response *resp)
636 {
637         if (rsrcs->num_avail_qed_entries < num_credits) {
638                 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
639                 return -EINVAL;
640         }
641
642         rsrcs->num_avail_qed_entries -= num_credits;
643         domain->num_ldb_credits += num_credits;
644         return 0;
645 }
646
647 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
648                                    struct dlb2_hw_domain *domain,
649                                    u32 num_credits,
650                                    struct dlb2_cmd_response *resp)
651 {
652         if (rsrcs->num_avail_dqed_entries < num_credits) {
653                 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
654                 return -EINVAL;
655         }
656
657         rsrcs->num_avail_dqed_entries -= num_credits;
658         domain->num_dir_credits += num_credits;
659         return 0;
660 }
661
662
663 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
664                                         struct dlb2_hw_domain *domain,
665                                         u32 num_atomic_inflights,
666                                         struct dlb2_cmd_response *resp)
667 {
668         if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
669                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
670                 return -EINVAL;
671         }
672
673         rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
674         domain->num_avail_aqed_entries += num_atomic_inflights;
675         return 0;
676 }
677
678 static int
679 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
680                                      struct dlb2_hw_domain *domain,
681                                      u32 num_hist_list_entries,
682                                      struct dlb2_cmd_response *resp)
683 {
684         struct dlb2_bitmap *bitmap;
685         int base;
686
687         if (num_hist_list_entries) {
688                 bitmap = rsrcs->avail_hist_list_entries;
689
690                 base = dlb2_bitmap_find_set_bit_range(bitmap,
691                                                       num_hist_list_entries);
692                 if (base < 0)
693                         goto error;
694
695                 domain->total_hist_list_entries = num_hist_list_entries;
696                 domain->avail_hist_list_entries = num_hist_list_entries;
697                 domain->hist_list_entry_base = base;
698                 domain->hist_list_entry_offset = 0;
699
700                 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
701         }
702         return 0;
703
704 error:
705         resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
706         return -EINVAL;
707 }
708
709 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
710                                   struct dlb2_function_resources *rsrcs,
711                                   struct dlb2_hw_domain *domain,
712                                   u32 num_queues,
713                                   struct dlb2_cmd_response *resp)
714 {
715         unsigned int i;
716
717         if (rsrcs->num_avail_ldb_queues < num_queues) {
718                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
719                 return -EINVAL;
720         }
721
722         for (i = 0; i < num_queues; i++) {
723                 struct dlb2_ldb_queue *queue;
724
725                 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
726                                             typeof(*queue));
727                 if (queue == NULL) {
728                         DLB2_HW_ERR(hw,
729                                     "[%s()] Internal error: domain validation failed\n",
730                                     __func__);
731                         return -EFAULT;
732                 }
733
734                 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
735
736                 queue->domain_id = domain->id;
737                 queue->owned = true;
738
739                 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
740         }
741
742         rsrcs->num_avail_ldb_queues -= num_queues;
743
744         return 0;
745 }
746
747 static int
748 dlb2_domain_attach_resources(struct dlb2_hw *hw,
749                              struct dlb2_function_resources *rsrcs,
750                              struct dlb2_hw_domain *domain,
751                              struct dlb2_create_sched_domain_args *args,
752                              struct dlb2_cmd_response *resp)
753 {
754         int ret;
755
756         ret = dlb2_attach_ldb_queues(hw,
757                                      rsrcs,
758                                      domain,
759                                      args->num_ldb_queues,
760                                      resp);
761         if (ret)
762                 return ret;
763
764         ret = dlb2_attach_ldb_ports(hw,
765                                     rsrcs,
766                                     domain,
767                                     args,
768                                     resp);
769         if (ret)
770                 return ret;
771
772         ret = dlb2_attach_dir_ports(hw,
773                                     rsrcs,
774                                     domain,
775                                     args->num_dir_ports,
776                                     resp);
777         if (ret)
778                 return ret;
779
780         if (hw->ver == DLB2_HW_V2) {
781                 ret = dlb2_attach_ldb_credits(rsrcs,
782                                               domain,
783                                               args->num_ldb_credits,
784                                               resp);
785                 if (ret)
786                         return ret;
787
788                 ret = dlb2_attach_dir_credits(rsrcs,
789                                               domain,
790                                               args->num_dir_credits,
791                                               resp);
792                 if (ret)
793                         return ret;
794         } else {  /* DLB 2.5 */
795                 ret = dlb2_attach_credits(rsrcs,
796                                           domain,
797                                           args->num_credits,
798                                           resp);
799                 if (ret)
800                         return ret;
801         }
802
803         ret = dlb2_attach_domain_hist_list_entries(rsrcs,
804                                                    domain,
805                                                    args->num_hist_list_entries,
806                                                    resp);
807         if (ret)
808                 return ret;
809
810         ret = dlb2_attach_atomic_inflights(rsrcs,
811                                            domain,
812                                            args->num_atomic_inflights,
813                                            resp);
814         if (ret)
815                 return ret;
816
817         dlb2_configure_domain_credits(hw, domain);
818
819         domain->configured = true;
820
821         domain->started = false;
822
823         rsrcs->num_avail_domains--;
824
825         return 0;
826 }
827
828 static int
829 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
830                                   struct dlb2_create_sched_domain_args *args,
831                                   struct dlb2_cmd_response *resp,
832                                   struct dlb2_hw *hw,
833                                   struct dlb2_hw_domain **out_domain)
834 {
835         u32 num_avail_ldb_ports, req_ldb_ports;
836         struct dlb2_bitmap *avail_hl_entries;
837         unsigned int max_contig_hl_range;
838         struct dlb2_hw_domain *domain;
839         int i;
840
841         avail_hl_entries = rsrcs->avail_hist_list_entries;
842
843         max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
844
845         num_avail_ldb_ports = 0;
846         req_ldb_ports = 0;
847         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
848                 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
849
850                 req_ldb_ports += args->num_cos_ldb_ports[i];
851         }
852
853         req_ldb_ports += args->num_ldb_ports;
854
855         if (rsrcs->num_avail_domains < 1) {
856                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
857                 return -EINVAL;
858         }
859
860         domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
861         if (domain == NULL) {
862                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
863                 return -EFAULT;
864         }
865
866         if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
867                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
868                 return -EINVAL;
869         }
870
871         if (req_ldb_ports > num_avail_ldb_ports) {
872                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
873                 return -EINVAL;
874         }
875
876         for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
877                 if (args->num_cos_ldb_ports[i] >
878                     rsrcs->num_avail_ldb_ports[i]) {
879                         resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
880                         return -EINVAL;
881                 }
882         }
883
884         if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
885                 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
886                 return -EINVAL;
887         }
888
889         if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
890                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
891                 return -EINVAL;
892         }
893         if (hw->ver == DLB2_HW_V2_5) {
894                 if (rsrcs->num_avail_entries < args->num_credits) {
895                         resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
896                         return -EINVAL;
897                 }
898         } else {
899                 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
900                         resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
901                         return -EINVAL;
902                 }
903                 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
904                         resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
905                         return -EINVAL;
906                 }
907         }
908
909         if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
910                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
911                 return -EINVAL;
912         }
913
914         if (max_contig_hl_range < args->num_hist_list_entries) {
915                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
916                 return -EINVAL;
917         }
918
919         *out_domain = domain;
920
921         return 0;
922 }
923
924 static void
925 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
926                                   struct dlb2_create_sched_domain_args *args,
927                                   bool vdev_req,
928                                   unsigned int vdev_id)
929 {
930         DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
931         if (vdev_req)
932                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
933         DLB2_HW_DBG(hw, "\tNumber of LDB queues:          %d\n",
934                     args->num_ldb_queues);
935         DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
936                     args->num_ldb_ports);
937         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0):   %d\n",
938                     args->num_cos_ldb_ports[0]);
939         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1):   %d\n",
940                     args->num_cos_ldb_ports[1]);
941         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2):   %d\n",
942                     args->num_cos_ldb_ports[2]);
943         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3):   %d\n",
944                     args->num_cos_ldb_ports[3]);
945         DLB2_HW_DBG(hw, "\tStrict CoS allocation:         %d\n",
946                     args->cos_strict);
947         DLB2_HW_DBG(hw, "\tNumber of DIR ports:           %d\n",
948                     args->num_dir_ports);
949         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:       %d\n",
950                     args->num_atomic_inflights);
951         DLB2_HW_DBG(hw, "\tNumber of hist list entries:   %d\n",
952                     args->num_hist_list_entries);
953         if (hw->ver == DLB2_HW_V2) {
954                 DLB2_HW_DBG(hw, "\tNumber of LDB credits:         %d\n",
955                             args->num_ldb_credits);
956                 DLB2_HW_DBG(hw, "\tNumber of DIR credits:         %d\n",
957                             args->num_dir_credits);
958         } else {
959                 DLB2_HW_DBG(hw, "\tNumber of credits:         %d\n",
960                             args->num_credits);
961         }
962 }
963
964 /**
965  * dlb2_hw_create_sched_domain() - create a scheduling domain
966  * @hw: dlb2_hw handle for a particular device.
967  * @args: scheduling domain creation arguments.
968  * @resp: response structure.
969  * @vdev_req: indicates whether this request came from a vdev.
970  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
971  *
972  * This function creates a scheduling domain containing the resources specified
973  * in args. The individual resources (queues, ports, credits) can be configured
974  * after creating a scheduling domain.
975  *
976  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
977  * device.
978  *
979  * Return:
980  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
981  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
982  * contains the domain ID.
983  *
984  * resp->id contains a virtual ID if vdev_req is true.
985  *
986  * Errors:
987  * EINVAL - A requested resource is unavailable, or the requested domain name
988  *          is already in use.
989  * EFAULT - Internal error (resp->status not set).
990  */
991 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
992                                 struct dlb2_create_sched_domain_args *args,
993                                 struct dlb2_cmd_response *resp,
994                                 bool vdev_req,
995                                 unsigned int vdev_id)
996 {
997         struct dlb2_function_resources *rsrcs;
998         struct dlb2_hw_domain *domain;
999         int ret;
1000
1001         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1002
1003         dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
1004
1005         /*
1006          * Verify that hardware resources are available before attempting to
1007          * satisfy the request. This simplifies the error unwinding code.
1008          */
1009         ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp, hw, &domain);
1010         if (ret)
1011                 return ret;
1012
1013         dlb2_init_domain_rsrc_lists(domain);
1014
1015         ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
1016         if (ret) {
1017                 DLB2_HW_ERR(hw,
1018                             "[%s()] Internal error: failed to verify args.\n",
1019                             __func__);
1020
1021                 return ret;
1022         }
1023
1024         dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
1025
1026         dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
1027
1028         resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
1029         resp->status = 0;
1030
1031         return 0;
1032 }
1033
1034 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
1035                                      struct dlb2_dir_pq_pair *port)
1036 {
1037         u32 reg = 0;
1038
1039         DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
1040         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1041
1042         dlb2_flush_csr(hw);
1043 }
1044
1045 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
1046                                    struct dlb2_dir_pq_pair *port)
1047 {
1048         u32 cnt;
1049
1050         cnt = DLB2_CSR_RD(hw,
1051                           DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
1052
1053         /*
1054          * Account for the initial token count, which is used in order to
1055          * provide a CQ with depth less than 8.
1056          */
1057
1058         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
1059                port->init_tkn_cnt;
1060 }
1061
1062 static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
1063                               struct dlb2_dir_pq_pair *port)
1064 {
1065         unsigned int port_id = port->id.phys_id;
1066         u32 cnt;
1067
1068         /* Return any outstanding tokens */
1069         cnt = dlb2_dir_cq_token_count(hw, port);
1070
1071         if (cnt != 0) {
1072                 struct dlb2_hcw hcw_mem[8], *hcw;
1073                 void __iomem *pp_addr;
1074
1075                 pp_addr = os_map_producer_port(hw, port_id, false);
1076
1077                 /* Point hcw to a 64B-aligned location */
1078                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1079
1080                 /*
1081                  * Program the first HCW for a batch token return and
1082                  * the rest as NOOPS
1083                  */
1084                 memset(hcw, 0, 4 * sizeof(*hcw));
1085                 hcw->cq_token = 1;
1086                 hcw->lock_id = cnt - 1;
1087
1088                 dlb2_movdir64b(pp_addr, hcw);
1089
1090                 os_fence_hcw(hw, pp_addr);
1091
1092                 os_unmap_producer_port(hw, pp_addr);
1093         }
1094 }
1095
1096 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1097                                     struct dlb2_dir_pq_pair *port)
1098 {
1099         u32 reg = 0;
1100
1101         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1102
1103         dlb2_flush_csr(hw);
1104 }
1105
1106 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1107                                      struct dlb2_hw_domain *domain,
1108                                      bool toggle_port)
1109 {
1110         struct dlb2_list_entry *iter;
1111         struct dlb2_dir_pq_pair *port;
1112         RTE_SET_USED(iter);
1113
1114         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1115                 /*
1116                  * Can't drain a port if it's not configured, and there's
1117                  * nothing to drain if its queue is unconfigured.
1118                  */
1119                 if (!port->port_configured || !port->queue_configured)
1120                         continue;
1121
1122                 if (toggle_port)
1123                         dlb2_dir_port_cq_disable(hw, port);
1124
1125                 dlb2_drain_dir_cq(hw, port);
1126
1127                 if (toggle_port)
1128                         dlb2_dir_port_cq_enable(hw, port);
1129         }
1130
1131         return 0;
1132 }
1133
1134 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1135                                 struct dlb2_dir_pq_pair *queue)
1136 {
1137         u32 cnt;
1138
1139         cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
1140                                                       queue->id.phys_id));
1141
1142         return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
1143 }
1144
1145 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1146                                     struct dlb2_dir_pq_pair *queue)
1147 {
1148         return dlb2_dir_queue_depth(hw, queue) == 0;
1149 }
1150
1151 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1152                                          struct dlb2_hw_domain *domain)
1153 {
1154         struct dlb2_list_entry *iter;
1155         struct dlb2_dir_pq_pair *queue;
1156         RTE_SET_USED(iter);
1157
1158         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1159                 if (!dlb2_dir_queue_is_empty(hw, queue))
1160                         return false;
1161         }
1162
1163         return true;
1164 }
1165 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1166                                         struct dlb2_hw_domain *domain)
1167 {
1168         int i;
1169
1170         /* If the domain hasn't been started, there's no traffic to drain */
1171         if (!domain->started)
1172                 return 0;
1173
1174         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1175                 dlb2_domain_drain_dir_cqs(hw, domain, true);
1176
1177                 if (dlb2_domain_dir_queues_empty(hw, domain))
1178                         break;
1179         }
1180
1181         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1182                 DLB2_HW_ERR(hw,
1183                             "[%s()] Internal error: failed to empty queues\n",
1184                             __func__);
1185                 return -EFAULT;
1186         }
1187
1188         /*
1189          * Drain the CQs one more time. For the queues to go empty, they would
1190          * have scheduled one or more QEs.
1191          */
1192         dlb2_domain_drain_dir_cqs(hw, domain, true);
1193
1194         return 0;
1195 }
1196
1197 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1198                                     struct dlb2_ldb_port *port)
1199 {
1200         u32 reg = 0;
1201
1202         /*
1203          * Don't re-enable the port if a removal is pending. The caller should
1204          * mark this port as enabled (if it isn't already), and when the
1205          * removal completes the port will be enabled.
1206          */
1207         if (port->num_pending_removals)
1208                 return;
1209
1210         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1211
1212         dlb2_flush_csr(hw);
1213 }
1214
1215 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1216                                      struct dlb2_ldb_port *port)
1217 {
1218         u32 reg = 0;
1219
1220         DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
1221         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1222
1223         dlb2_flush_csr(hw);
1224 }
1225
1226 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1227                                       struct dlb2_ldb_port *port)
1228 {
1229         u32 cnt;
1230
1231         cnt = DLB2_CSR_RD(hw,
1232                           DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
1233
1234         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
1235 }
1236
1237 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1238                                    struct dlb2_ldb_port *port)
1239 {
1240         u32 cnt;
1241
1242         cnt = DLB2_CSR_RD(hw,
1243                           DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
1244
1245         /*
1246          * Account for the initial token count, which is used in order to
1247          * provide a CQ with depth less than 8.
1248          */
1249
1250         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
1251                 port->init_tkn_cnt;
1252 }
1253
1254 static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1255 {
1256         u32 infl_cnt, tkn_cnt;
1257         unsigned int i;
1258
1259         infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1260         tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1261
1262         if (infl_cnt || tkn_cnt) {
1263                 struct dlb2_hcw hcw_mem[8], *hcw;
1264                 void __iomem *pp_addr;
1265
1266                 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1267
1268                 /* Point hcw to a 64B-aligned location */
1269                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1270
1271                 /*
1272                  * Program the first HCW for a completion and token return and
1273                  * the other HCWs as NOOPS
1274                  */
1275
1276                 memset(hcw, 0, 4 * sizeof(*hcw));
1277                 hcw->qe_comp = (infl_cnt > 0);
1278                 hcw->cq_token = (tkn_cnt > 0);
1279                 hcw->lock_id = tkn_cnt - 1;
1280
1281                 /* Return tokens in the first HCW */
1282                 dlb2_movdir64b(pp_addr, hcw);
1283
1284                 hcw->cq_token = 0;
1285
1286                 /* Issue remaining completions (if any) */
1287                 for (i = 1; i < infl_cnt; i++)
1288                         dlb2_movdir64b(pp_addr, hcw);
1289
1290                 os_fence_hcw(hw, pp_addr);
1291
1292                 os_unmap_producer_port(hw, pp_addr);
1293         }
1294 }
1295
1296 static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1297                                       struct dlb2_hw_domain *domain,
1298                                       bool toggle_port)
1299 {
1300         struct dlb2_list_entry *iter;
1301         struct dlb2_ldb_port *port;
1302         int i;
1303         RTE_SET_USED(iter);
1304
1305         /* If the domain hasn't been started, there's no traffic to drain */
1306         if (!domain->started)
1307                 return;
1308
1309         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1310                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1311                         if (toggle_port)
1312                                 dlb2_ldb_port_cq_disable(hw, port);
1313
1314                         dlb2_drain_ldb_cq(hw, port);
1315
1316                         if (toggle_port)
1317                                 dlb2_ldb_port_cq_enable(hw, port);
1318                 }
1319         }
1320 }
1321
1322 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1323                                 struct dlb2_ldb_queue *queue)
1324 {
1325         u32 aqed, ldb, atm;
1326
1327         aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1328                                                        queue->id.phys_id));
1329         ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1330                                                       queue->id.phys_id));
1331         atm = DLB2_CSR_RD(hw,
1332                           DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
1333
1334         return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
1335                + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
1336                + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
1337 }
1338
1339 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1340                                     struct dlb2_ldb_queue *queue)
1341 {
1342         return dlb2_ldb_queue_depth(hw, queue) == 0;
1343 }
1344
1345 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1346                                             struct dlb2_hw_domain *domain)
1347 {
1348         struct dlb2_list_entry *iter;
1349         struct dlb2_ldb_queue *queue;
1350         RTE_SET_USED(iter);
1351
1352         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1353                 if (queue->num_mappings == 0)
1354                         continue;
1355
1356                 if (!dlb2_ldb_queue_is_empty(hw, queue))
1357                         return false;
1358         }
1359
1360         return true;
1361 }
1362
1363 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1364                                            struct dlb2_hw_domain *domain)
1365 {
1366         int i;
1367
1368         /* If the domain hasn't been started, there's no traffic to drain */
1369         if (!domain->started)
1370                 return 0;
1371
1372         if (domain->num_pending_removals > 0) {
1373                 DLB2_HW_ERR(hw,
1374                             "[%s()] Internal error: failed to unmap domain queues\n",
1375                             __func__);
1376                 return -EFAULT;
1377         }
1378
1379         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1380                 dlb2_domain_drain_ldb_cqs(hw, domain, true);
1381
1382                 if (dlb2_domain_mapped_queues_empty(hw, domain))
1383                         break;
1384         }
1385
1386         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1387                 DLB2_HW_ERR(hw,
1388                             "[%s()] Internal error: failed to empty queues\n",
1389                             __func__);
1390                 return -EFAULT;
1391         }
1392
1393         /*
1394          * Drain the CQs one more time. For the queues to go empty, they would
1395          * have scheduled one or more QEs.
1396          */
1397         dlb2_domain_drain_ldb_cqs(hw, domain, true);
1398
1399         return 0;
1400 }
1401
1402 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1403                                        struct dlb2_hw_domain *domain)
1404 {
1405         struct dlb2_list_entry *iter;
1406         struct dlb2_ldb_port *port;
1407         int i;
1408         RTE_SET_USED(iter);
1409
1410         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1411                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1412                         port->enabled = true;
1413
1414                         dlb2_ldb_port_cq_enable(hw, port);
1415                 }
1416         }
1417 }
1418
1419 static struct dlb2_ldb_queue *
1420 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1421                            u32 id,
1422                            bool vdev_req,
1423                            unsigned int vdev_id)
1424 {
1425         struct dlb2_list_entry *iter1;
1426         struct dlb2_list_entry *iter2;
1427         struct dlb2_function_resources *rsrcs;
1428         struct dlb2_hw_domain *domain;
1429         struct dlb2_ldb_queue *queue;
1430         RTE_SET_USED(iter1);
1431         RTE_SET_USED(iter2);
1432
1433         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1434                 return NULL;
1435
1436         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1437
1438         if (!vdev_req)
1439                 return &hw->rsrcs.ldb_queues[id];
1440
1441         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1442                 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
1443                         if (queue->id.virt_id == id)
1444                                 return queue;
1445                 }
1446         }
1447
1448         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
1449                 if (queue->id.virt_id == id)
1450                         return queue;
1451         }
1452
1453         return NULL;
1454 }
1455
1456 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1457                                                       u32 id,
1458                                                       bool vdev_req,
1459                                                       unsigned int vdev_id)
1460 {
1461         struct dlb2_list_entry *iteration;
1462         struct dlb2_function_resources *rsrcs;
1463         struct dlb2_hw_domain *domain;
1464         RTE_SET_USED(iteration);
1465
1466         if (id >= DLB2_MAX_NUM_DOMAINS)
1467                 return NULL;
1468
1469         if (!vdev_req)
1470                 return &hw->domains[id];
1471
1472         rsrcs = &hw->vdev[vdev_id];
1473
1474         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
1475                 if (domain->id.virt_id == id)
1476                         return domain;
1477         }
1478
1479         return NULL;
1480 }
1481
1482 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1483                                            struct dlb2_ldb_port *port,
1484                                            struct dlb2_ldb_queue *queue,
1485                                            int slot,
1486                                            enum dlb2_qid_map_state new_state)
1487 {
1488         enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1489         struct dlb2_hw_domain *domain;
1490         int domain_id;
1491
1492         domain_id = port->domain_id.phys_id;
1493
1494         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1495         if (domain == NULL) {
1496                 DLB2_HW_ERR(hw,
1497                             "[%s()] Internal error: unable to find domain %d\n",
1498                             __func__, domain_id);
1499                 return -EINVAL;
1500         }
1501
1502         switch (curr_state) {
1503         case DLB2_QUEUE_UNMAPPED:
1504                 switch (new_state) {
1505                 case DLB2_QUEUE_MAPPED:
1506                         queue->num_mappings++;
1507                         port->num_mappings++;
1508                         break;
1509                 case DLB2_QUEUE_MAP_IN_PROG:
1510                         queue->num_pending_additions++;
1511                         domain->num_pending_additions++;
1512                         break;
1513                 default:
1514                         goto error;
1515                 }
1516                 break;
1517         case DLB2_QUEUE_MAPPED:
1518                 switch (new_state) {
1519                 case DLB2_QUEUE_UNMAPPED:
1520                         queue->num_mappings--;
1521                         port->num_mappings--;
1522                         break;
1523                 case DLB2_QUEUE_UNMAP_IN_PROG:
1524                         port->num_pending_removals++;
1525                         domain->num_pending_removals++;
1526                         break;
1527                 case DLB2_QUEUE_MAPPED:
1528                         /* Priority change, nothing to update */
1529                         break;
1530                 default:
1531                         goto error;
1532                 }
1533                 break;
1534         case DLB2_QUEUE_MAP_IN_PROG:
1535                 switch (new_state) {
1536                 case DLB2_QUEUE_UNMAPPED:
1537                         queue->num_pending_additions--;
1538                         domain->num_pending_additions--;
1539                         break;
1540                 case DLB2_QUEUE_MAPPED:
1541                         queue->num_mappings++;
1542                         port->num_mappings++;
1543                         queue->num_pending_additions--;
1544                         domain->num_pending_additions--;
1545                         break;
1546                 default:
1547                         goto error;
1548                 }
1549                 break;
1550         case DLB2_QUEUE_UNMAP_IN_PROG:
1551                 switch (new_state) {
1552                 case DLB2_QUEUE_UNMAPPED:
1553                         port->num_pending_removals--;
1554                         domain->num_pending_removals--;
1555                         queue->num_mappings--;
1556                         port->num_mappings--;
1557                         break;
1558                 case DLB2_QUEUE_MAPPED:
1559                         port->num_pending_removals--;
1560                         domain->num_pending_removals--;
1561                         break;
1562                 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1563                         /* Nothing to update */
1564                         break;
1565                 default:
1566                         goto error;
1567                 }
1568                 break;
1569         case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1570                 switch (new_state) {
1571                 case DLB2_QUEUE_UNMAP_IN_PROG:
1572                         /* Nothing to update */
1573                         break;
1574                 case DLB2_QUEUE_UNMAPPED:
1575                         /*
1576                          * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1577                          * becomes UNMAPPED before it transitions to
1578                          * MAP_IN_PROG.
1579                          */
1580                         queue->num_mappings--;
1581                         port->num_mappings--;
1582                         port->num_pending_removals--;
1583                         domain->num_pending_removals--;
1584                         break;
1585                 default:
1586                         goto error;
1587                 }
1588                 break;
1589         default:
1590                 goto error;
1591         }
1592
1593         port->qid_map[slot].state = new_state;
1594
1595         DLB2_HW_DBG(hw,
1596                     "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1597                     __func__, queue->id.phys_id, port->id.phys_id,
1598                     curr_state, new_state);
1599         return 0;
1600
1601 error:
1602         DLB2_HW_ERR(hw,
1603                     "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1604                     __func__, queue->id.phys_id, port->id.phys_id,
1605                     curr_state, new_state);
1606         return -EFAULT;
1607 }
1608
1609 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1610                                 enum dlb2_qid_map_state state,
1611                                 int *slot)
1612 {
1613         int i;
1614
1615         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1616                 if (port->qid_map[i].state == state)
1617                         break;
1618         }
1619
1620         *slot = i;
1621
1622         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1623 }
1624
1625 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1626                                       enum dlb2_qid_map_state state,
1627                                       struct dlb2_ldb_queue *queue,
1628                                       int *slot)
1629 {
1630         int i;
1631
1632         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1633                 if (port->qid_map[i].state == state &&
1634                     port->qid_map[i].qid == queue->id.phys_id)
1635                         break;
1636         }
1637
1638         *slot = i;
1639
1640         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1641 }
1642
1643 /*
1644  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1645  * their function names imply, and should only be called by the dynamic CQ
1646  * mapping code.
1647  */
1648 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1649                                               struct dlb2_hw_domain *domain,
1650                                               struct dlb2_ldb_queue *queue)
1651 {
1652         struct dlb2_list_entry *iter;
1653         struct dlb2_ldb_port *port;
1654         int slot, i;
1655         RTE_SET_USED(iter);
1656
1657         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1658                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1659                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1660
1661                         if (!dlb2_port_find_slot_queue(port, state,
1662                                                        queue, &slot))
1663                                 continue;
1664
1665                         if (port->enabled)
1666                                 dlb2_ldb_port_cq_disable(hw, port);
1667                 }
1668         }
1669 }
1670
1671 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1672                                              struct dlb2_hw_domain *domain,
1673                                              struct dlb2_ldb_queue *queue)
1674 {
1675         struct dlb2_list_entry *iter;
1676         struct dlb2_ldb_port *port;
1677         int slot, i;
1678         RTE_SET_USED(iter);
1679
1680         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1681                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1682                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1683
1684                         if (!dlb2_port_find_slot_queue(port, state,
1685                                                        queue, &slot))
1686                                 continue;
1687
1688                         if (port->enabled)
1689                                 dlb2_ldb_port_cq_enable(hw, port);
1690                 }
1691         }
1692 }
1693
1694 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1695                                                 struct dlb2_ldb_port *port,
1696                                                 int slot)
1697 {
1698         u32 ctrl = 0;
1699
1700         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1701         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1702         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1703
1704         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1705
1706         dlb2_flush_csr(hw);
1707 }
1708
1709 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1710                                               struct dlb2_ldb_port *port,
1711                                               int slot)
1712 {
1713         u32 ctrl = 0;
1714
1715         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1716         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1717         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1718         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1719
1720         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1721
1722         dlb2_flush_csr(hw);
1723 }
1724
1725 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1726                                         struct dlb2_ldb_port *p,
1727                                         struct dlb2_ldb_queue *q,
1728                                         u8 priority)
1729 {
1730         enum dlb2_qid_map_state state;
1731         u32 lsp_qid2cq2;
1732         u32 lsp_qid2cq;
1733         u32 atm_qid2cq;
1734         u32 cq2priov;
1735         u32 cq2qid;
1736         int i;
1737
1738         /* Look for a pending or already mapped slot, else an unused slot */
1739         if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1740             !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1741             !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1742                 DLB2_HW_ERR(hw,
1743                             "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1744                             __func__, __LINE__);
1745                 return -EFAULT;
1746         }
1747
1748         /* Read-modify-write the priority and valid bit register */
1749         cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
1750
1751         cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
1752         cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
1753                     & DLB2_LSP_CQ2PRIOV_PRIO;
1754
1755         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
1756
1757         /* Read-modify-write the QID map register */
1758         if (i < 4)
1759                 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
1760                                                           p->id.phys_id));
1761         else
1762                 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
1763                                                           p->id.phys_id));
1764
1765         if (i == 0 || i == 4)
1766                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
1767         if (i == 1 || i == 5)
1768                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
1769         if (i == 2 || i == 6)
1770                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
1771         if (i == 3 || i == 7)
1772                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
1773
1774         if (i < 4)
1775                 DLB2_CSR_WR(hw,
1776                             DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
1777         else
1778                 DLB2_CSR_WR(hw,
1779                             DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
1780
1781         atm_qid2cq = DLB2_CSR_RD(hw,
1782                                  DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1783                                                 p->id.phys_id / 4));
1784
1785         lsp_qid2cq = DLB2_CSR_RD(hw,
1786                                  DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
1787                                                 p->id.phys_id / 4));
1788
1789         lsp_qid2cq2 = DLB2_CSR_RD(hw,
1790                                   DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
1791                                                   p->id.phys_id / 4));
1792
1793         switch (p->id.phys_id % 4) {
1794         case 0:
1795                 DLB2_BIT_SET(atm_qid2cq,
1796                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
1797                 DLB2_BIT_SET(lsp_qid2cq,
1798                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
1799                 DLB2_BIT_SET(lsp_qid2cq2,
1800                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
1801                 break;
1802
1803         case 1:
1804                 DLB2_BIT_SET(atm_qid2cq,
1805                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
1806                 DLB2_BIT_SET(lsp_qid2cq,
1807                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
1808                 DLB2_BIT_SET(lsp_qid2cq2,
1809                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
1810                 break;
1811
1812         case 2:
1813                 DLB2_BIT_SET(atm_qid2cq,
1814                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
1815                 DLB2_BIT_SET(lsp_qid2cq,
1816                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
1817                 DLB2_BIT_SET(lsp_qid2cq2,
1818                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
1819                 break;
1820
1821         case 3:
1822                 DLB2_BIT_SET(atm_qid2cq,
1823                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
1824                 DLB2_BIT_SET(lsp_qid2cq,
1825                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
1826                 DLB2_BIT_SET(lsp_qid2cq2,
1827                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
1828                 break;
1829         }
1830
1831         DLB2_CSR_WR(hw,
1832                     DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1833                     atm_qid2cq);
1834
1835         DLB2_CSR_WR(hw,
1836                     DLB2_LSP_QID2CQIDIX(hw->ver,
1837                                         q->id.phys_id, p->id.phys_id / 4),
1838                     lsp_qid2cq);
1839
1840         DLB2_CSR_WR(hw,
1841                     DLB2_LSP_QID2CQIDIX2(hw->ver,
1842                                          q->id.phys_id, p->id.phys_id / 4),
1843                     lsp_qid2cq2);
1844
1845         dlb2_flush_csr(hw);
1846
1847         p->qid_map[i].qid = q->id.phys_id;
1848         p->qid_map[i].priority = priority;
1849
1850         state = DLB2_QUEUE_MAPPED;
1851
1852         return dlb2_port_slot_state_transition(hw, p, q, i, state);
1853 }
1854
1855 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1856                                            struct dlb2_ldb_port *port,
1857                                            struct dlb2_ldb_queue *queue,
1858                                            int slot)
1859 {
1860         u32 ctrl = 0;
1861         u32 active;
1862         u32 enq;
1863
1864         /* Set the atomic scheduling haswork bit */
1865         active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1866                                                          queue->id.phys_id));
1867
1868         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1869         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1870         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1871         DLB2_BITS_SET(ctrl,
1872                       DLB2_BITS_GET(active,
1873                                     DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
1874                                     DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1875
1876         /* Set the non-atomic scheduling haswork bit */
1877         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1878
1879         enq = DLB2_CSR_RD(hw,
1880                           DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1881                                                        queue->id.phys_id));
1882
1883         memset(&ctrl, 0, sizeof(ctrl));
1884
1885         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1886         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1887         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1888         DLB2_BITS_SET(ctrl,
1889                       DLB2_BITS_GET(enq,
1890                                     DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
1891                       DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1892
1893         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1894
1895         dlb2_flush_csr(hw);
1896
1897         return 0;
1898 }
1899
1900 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1901                                               struct dlb2_ldb_port *port,
1902                                               u8 slot)
1903 {
1904         u32 ctrl = 0;
1905
1906         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1907         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1908         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1909
1910         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1911
1912         memset(&ctrl, 0, sizeof(ctrl));
1913
1914         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1915         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1916         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1917
1918         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1919
1920         dlb2_flush_csr(hw);
1921 }
1922
1923
1924 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1925                                               struct dlb2_ldb_queue *queue)
1926 {
1927         u32 infl_lim = 0;
1928
1929         DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
1930                  DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
1931
1932         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1933                     infl_lim);
1934 }
1935
1936 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1937                                                 struct dlb2_ldb_queue *queue)
1938 {
1939         DLB2_CSR_WR(hw,
1940                     DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1941                     DLB2_LSP_QID_LDB_INFL_LIM_RST);
1942 }
1943
1944 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1945                                                 struct dlb2_hw_domain *domain,
1946                                                 struct dlb2_ldb_port *port,
1947                                                 struct dlb2_ldb_queue *queue)
1948 {
1949         struct dlb2_list_entry *iter;
1950         enum dlb2_qid_map_state state;
1951         int slot, ret, i;
1952         u32 infl_cnt;
1953         u8 prio;
1954         RTE_SET_USED(iter);
1955
1956         infl_cnt = DLB2_CSR_RD(hw,
1957                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
1958                                                     queue->id.phys_id));
1959
1960         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
1961                 DLB2_HW_ERR(hw,
1962                             "[%s()] Internal error: non-zero QID inflight count\n",
1963                             __func__);
1964                 return -EINVAL;
1965         }
1966
1967         /*
1968          * Static map the port and set its corresponding has_work bits.
1969          */
1970         state = DLB2_QUEUE_MAP_IN_PROG;
1971         if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1972                 return -EINVAL;
1973
1974         prio = port->qid_map[slot].priority;
1975
1976         /*
1977          * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1978          * the port's qid_map state.
1979          */
1980         ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1981         if (ret)
1982                 return ret;
1983
1984         ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1985         if (ret)
1986                 return ret;
1987
1988         /*
1989          * Ensure IF_status(cq,qid) is 0 before enabling the port to
1990          * prevent spurious schedules to cause the queue's inflight
1991          * count to increase.
1992          */
1993         dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1994
1995         /* Reset the queue's inflight status */
1996         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1997                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1998                         state = DLB2_QUEUE_MAPPED;
1999                         if (!dlb2_port_find_slot_queue(port, state,
2000                                                        queue, &slot))
2001                                 continue;
2002
2003                         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2004                 }
2005         }
2006
2007         dlb2_ldb_queue_set_inflight_limit(hw, queue);
2008
2009         /* Re-enable CQs mapped to this queue */
2010         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2011
2012         /* If this queue has other mappings pending, clear its inflight limit */
2013         if (queue->num_pending_additions > 0)
2014                 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
2015
2016         return 0;
2017 }
2018
2019 /**
2020  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
2021  * @hw: dlb2_hw handle for a particular device.
2022  * @port: load-balanced port
2023  * @queue: load-balanced queue
2024  * @priority: queue servicing priority
2025  *
2026  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
2027  * at a later point, and <0 if an error occurred.
2028  */
2029 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
2030                                          struct dlb2_ldb_port *port,
2031                                          struct dlb2_ldb_queue *queue,
2032                                          u8 priority)
2033 {
2034         enum dlb2_qid_map_state state;
2035         struct dlb2_hw_domain *domain;
2036         int domain_id, slot, ret;
2037         u32 infl_cnt;
2038
2039         domain_id = port->domain_id.phys_id;
2040
2041         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
2042         if (domain == NULL) {
2043                 DLB2_HW_ERR(hw,
2044                             "[%s()] Internal error: unable to find domain %d\n",
2045                             __func__, port->domain_id.phys_id);
2046                 return -EINVAL;
2047         }
2048
2049         /*
2050          * Set the QID inflight limit to 0 to prevent further scheduling of the
2051          * queue.
2052          */
2053         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
2054                                                   queue->id.phys_id), 0);
2055
2056         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
2057                 DLB2_HW_ERR(hw,
2058                             "Internal error: No available unmapped slots\n");
2059                 return -EFAULT;
2060         }
2061
2062         port->qid_map[slot].qid = queue->id.phys_id;
2063         port->qid_map[slot].priority = priority;
2064
2065         state = DLB2_QUEUE_MAP_IN_PROG;
2066         ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
2067         if (ret)
2068                 return ret;
2069
2070         infl_cnt = DLB2_CSR_RD(hw,
2071                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2072                                                     queue->id.phys_id));
2073
2074         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2075                 /*
2076                  * The queue is owed completions so it's not safe to map it
2077                  * yet. Schedule a kernel thread to complete the mapping later,
2078                  * once software has completed all the queue's inflight events.
2079                  */
2080                 if (!os_worker_active(hw))
2081                         os_schedule_work(hw);
2082
2083                 return 1;
2084         }
2085
2086         /*
2087          * Disable the affected CQ, and the CQs already mapped to the QID,
2088          * before reading the QID's inflight count a second time. There is an
2089          * unlikely race in which the QID may schedule one more QE after we
2090          * read an inflight count of 0, and disabling the CQs guarantees that
2091          * the race will not occur after a re-read of the inflight count
2092          * register.
2093          */
2094         if (port->enabled)
2095                 dlb2_ldb_port_cq_disable(hw, port);
2096
2097         dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2098
2099         infl_cnt = DLB2_CSR_RD(hw,
2100                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2101                                                     queue->id.phys_id));
2102
2103         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2104                 if (port->enabled)
2105                         dlb2_ldb_port_cq_enable(hw, port);
2106
2107                 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2108
2109                 /*
2110                  * The queue is owed completions so it's not safe to map it
2111                  * yet. Schedule a kernel thread to complete the mapping later,
2112                  * once software has completed all the queue's inflight events.
2113                  */
2114                 if (!os_worker_active(hw))
2115                         os_schedule_work(hw);
2116
2117                 return 1;
2118         }
2119
2120         return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2121 }
2122
2123 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2124                                         struct dlb2_hw_domain *domain,
2125                                         struct dlb2_ldb_port *port)
2126 {
2127         int i;
2128
2129         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2130                 u32 infl_cnt;
2131                 struct dlb2_ldb_queue *queue;
2132                 int qid;
2133
2134                 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2135                         continue;
2136
2137                 qid = port->qid_map[i].qid;
2138
2139                 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2140
2141                 if (queue == NULL) {
2142                         DLB2_HW_ERR(hw,
2143                                     "[%s()] Internal error: unable to find queue %d\n",
2144                                     __func__, qid);
2145                         continue;
2146                 }
2147
2148                 infl_cnt = DLB2_CSR_RD(hw,
2149                                        DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2150
2151                 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
2152                         continue;
2153
2154                 /*
2155                  * Disable the affected CQ, and the CQs already mapped to the
2156                  * QID, before reading the QID's inflight count a second time.
2157                  * There is an unlikely race in which the QID may schedule one
2158                  * more QE after we read an inflight count of 0, and disabling
2159                  * the CQs guarantees that the race will not occur after a
2160                  * re-read of the inflight count register.
2161                  */
2162                 if (port->enabled)
2163                         dlb2_ldb_port_cq_disable(hw, port);
2164
2165                 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2166
2167                 infl_cnt = DLB2_CSR_RD(hw,
2168                                        DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2169
2170                 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2171                         if (port->enabled)
2172                                 dlb2_ldb_port_cq_enable(hw, port);
2173
2174                         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2175
2176                         continue;
2177                 }
2178
2179                 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2180         }
2181 }
2182
2183 static unsigned int
2184 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2185                                       struct dlb2_hw_domain *domain)
2186 {
2187         struct dlb2_list_entry *iter;
2188         struct dlb2_ldb_port *port;
2189         int i;
2190         RTE_SET_USED(iter);
2191
2192         if (!domain->configured || domain->num_pending_additions == 0)
2193                 return 0;
2194
2195         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2196                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2197                         dlb2_domain_finish_map_port(hw, domain, port);
2198         }
2199
2200         return domain->num_pending_additions;
2201 }
2202
2203 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2204                                    struct dlb2_ldb_port *port,
2205                                    struct dlb2_ldb_queue *queue)
2206 {
2207         enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2208         u32 lsp_qid2cq2;
2209         u32 lsp_qid2cq;
2210         u32 atm_qid2cq;
2211         u32 cq2priov;
2212         u32 queue_id;
2213         u32 port_id;
2214         int i;
2215
2216         /* Find the queue's slot */
2217         mapped = DLB2_QUEUE_MAPPED;
2218         in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2219         pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2220
2221         if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2222             !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2223             !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2224                 DLB2_HW_ERR(hw,
2225                             "[%s():%d] Internal error: QID %d isn't mapped\n",
2226                             __func__, __LINE__, queue->id.phys_id);
2227                 return -EFAULT;
2228         }
2229
2230         port_id = port->id.phys_id;
2231         queue_id = queue->id.phys_id;
2232
2233         /* Read-modify-write the priority and valid bit register */
2234         cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
2235
2236         cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
2237
2238         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
2239
2240         atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
2241                                                          port_id / 4));
2242
2243         lsp_qid2cq = DLB2_CSR_RD(hw,
2244                                  DLB2_LSP_QID2CQIDIX(hw->ver,
2245                                                 queue_id, port_id / 4));
2246
2247         lsp_qid2cq2 = DLB2_CSR_RD(hw,
2248                                   DLB2_LSP_QID2CQIDIX2(hw->ver,
2249                                                   queue_id, port_id / 4));
2250
2251         switch (port_id % 4) {
2252         case 0:
2253                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2254                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2255                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2256                 break;
2257
2258         case 1:
2259                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2260                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2261                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2262                 break;
2263
2264         case 2:
2265                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2266                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2267                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2268                 break;
2269
2270         case 3:
2271                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2272                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2273                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2274                 break;
2275         }
2276
2277         DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
2278
2279         DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
2280                     lsp_qid2cq);
2281
2282         DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
2283                     lsp_qid2cq2);
2284
2285         dlb2_flush_csr(hw);
2286
2287         unmapped = DLB2_QUEUE_UNMAPPED;
2288
2289         return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2290 }
2291
2292 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2293                                  struct dlb2_hw_domain *domain,
2294                                  struct dlb2_ldb_port *port,
2295                                  struct dlb2_ldb_queue *queue,
2296                                  u8 prio)
2297 {
2298         if (domain->started)
2299                 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2300         else
2301                 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2302 }
2303
2304 static void
2305 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2306                                    struct dlb2_hw_domain *domain,
2307                                    struct dlb2_ldb_port *port,
2308                                    int slot)
2309 {
2310         enum dlb2_qid_map_state state;
2311         struct dlb2_ldb_queue *queue;
2312
2313         queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2314
2315         state = port->qid_map[slot].state;
2316
2317         /* Update the QID2CQIDX and CQ2QID vectors */
2318         dlb2_ldb_port_unmap_qid(hw, port, queue);
2319
2320         /*
2321          * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2322          * the has_work bits
2323          */
2324         dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2325
2326         /* Reset the {CQ, slot} to its default state */
2327         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2328
2329         /* Re-enable the CQ if it was not manually disabled by the user */
2330         if (port->enabled)
2331                 dlb2_ldb_port_cq_enable(hw, port);
2332
2333         /*
2334          * If there is a mapping that is pending this slot's removal, perform
2335          * the mapping now.
2336          */
2337         if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2338                 struct dlb2_ldb_port_qid_map *map;
2339                 struct dlb2_ldb_queue *map_queue;
2340                 u8 prio;
2341
2342                 map = &port->qid_map[slot];
2343
2344                 map->qid = map->pending_qid;
2345                 map->priority = map->pending_priority;
2346
2347                 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2348                 prio = map->priority;
2349
2350                 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2351         }
2352 }
2353
2354
2355 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2356                                           struct dlb2_hw_domain *domain,
2357                                           struct dlb2_ldb_port *port)
2358 {
2359         u32 infl_cnt;
2360         int i;
2361
2362         if (port->num_pending_removals == 0)
2363                 return false;
2364
2365         /*
2366          * The unmap requires all the CQ's outstanding inflights to be
2367          * completed.
2368          */
2369         infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
2370                                                        port->id.phys_id));
2371         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
2372                 return false;
2373
2374         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2375                 struct dlb2_ldb_port_qid_map *map;
2376
2377                 map = &port->qid_map[i];
2378
2379                 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2380                     map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2381                         continue;
2382
2383                 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2384         }
2385
2386         return true;
2387 }
2388
2389 static unsigned int
2390 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2391                                         struct dlb2_hw_domain *domain)
2392 {
2393         struct dlb2_list_entry *iter;
2394         struct dlb2_ldb_port *port;
2395         int i;
2396         RTE_SET_USED(iter);
2397
2398         if (!domain->configured || domain->num_pending_removals == 0)
2399                 return 0;
2400
2401         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2402                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2403                         dlb2_domain_finish_unmap_port(hw, domain, port);
2404         }
2405
2406         return domain->num_pending_removals;
2407 }
2408
2409 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2410                                         struct dlb2_hw_domain *domain)
2411 {
2412         struct dlb2_list_entry *iter;
2413         struct dlb2_ldb_port *port;
2414         int i;
2415         RTE_SET_USED(iter);
2416
2417         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2418                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2419                         port->enabled = false;
2420
2421                         dlb2_ldb_port_cq_disable(hw, port);
2422                 }
2423         }
2424 }
2425
2426
2427 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2428                                   u32 domain_id,
2429                                   bool vdev_req,
2430                                   unsigned int vdev_id)
2431 {
2432         DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2433         if (vdev_req)
2434                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2435         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2436 }
2437
2438 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2439                                          struct dlb2_hw_domain *domain,
2440                                          unsigned int vdev_id)
2441 {
2442         struct dlb2_list_entry *iter;
2443         struct dlb2_dir_pq_pair *port;
2444         u32 vpp_v = 0;
2445         RTE_SET_USED(iter);
2446
2447         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2448                 unsigned int offs;
2449                 u32 virt_id;
2450
2451                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2452                         virt_id = port->id.virt_id;
2453                 else
2454                         virt_id = port->id.phys_id;
2455
2456                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2457
2458                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
2459         }
2460 }
2461
2462 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2463                                          struct dlb2_hw_domain *domain,
2464                                          unsigned int vdev_id)
2465 {
2466         struct dlb2_list_entry *iter;
2467         struct dlb2_ldb_port *port;
2468         u32 vpp_v = 0;
2469         int i;
2470         RTE_SET_USED(iter);
2471
2472         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2473                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2474                         unsigned int offs;
2475                         u32 virt_id;
2476
2477                         if (hw->virt_mode == DLB2_VIRT_SRIOV)
2478                                 virt_id = port->id.virt_id;
2479                         else
2480                                 virt_id = port->id.phys_id;
2481
2482                         offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2483
2484                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
2485                 }
2486         }
2487 }
2488
2489 static void
2490 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2491                                         struct dlb2_hw_domain *domain)
2492 {
2493         struct dlb2_list_entry *iter;
2494         struct dlb2_ldb_port *port;
2495         u32 int_en = 0;
2496         u32 wd_en = 0;
2497         int i;
2498         RTE_SET_USED(iter);
2499
2500         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2501                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2502                         DLB2_CSR_WR(hw,
2503                                     DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
2504                                                        port->id.phys_id),
2505                                     int_en);
2506
2507                         DLB2_CSR_WR(hw,
2508                                     DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
2509                                                       port->id.phys_id),
2510                                     wd_en);
2511                 }
2512         }
2513 }
2514
2515 static void
2516 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2517                                         struct dlb2_hw_domain *domain)
2518 {
2519         struct dlb2_list_entry *iter;
2520         struct dlb2_dir_pq_pair *port;
2521         u32 int_en = 0;
2522         u32 wd_en = 0;
2523         RTE_SET_USED(iter);
2524
2525         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2526                 DLB2_CSR_WR(hw,
2527                             DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2528                             int_en);
2529
2530                 DLB2_CSR_WR(hw,
2531                             DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
2532                             wd_en);
2533         }
2534 }
2535
2536 static void
2537 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2538                                           struct dlb2_hw_domain *domain)
2539 {
2540         int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2541         struct dlb2_list_entry *iter;
2542         struct dlb2_ldb_queue *queue;
2543         RTE_SET_USED(iter);
2544
2545         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2546                 int idx = domain_offset + queue->id.phys_id;
2547
2548                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
2549
2550                 if (queue->id.vdev_owned) {
2551                         DLB2_CSR_WR(hw,
2552                                     DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2553                                     0);
2554
2555                         idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2556                                 queue->id.virt_id;
2557
2558                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
2559
2560                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
2561                 }
2562         }
2563 }
2564
2565 static void
2566 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2567                                           struct dlb2_hw_domain *domain)
2568 {
2569         struct dlb2_list_entry *iter;
2570         struct dlb2_dir_pq_pair *queue;
2571         unsigned long max_ports;
2572         int domain_offset;
2573         RTE_SET_USED(iter);
2574
2575         max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
2576
2577         domain_offset = domain->id.phys_id * max_ports;
2578
2579         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2580                 int idx = domain_offset + queue->id.phys_id;
2581
2582                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
2583
2584                 if (queue->id.vdev_owned) {
2585                         idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
2586
2587                         DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
2588
2589                         DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
2590                 }
2591         }
2592 }
2593
2594 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2595                                                struct dlb2_hw_domain *domain)
2596 {
2597         struct dlb2_list_entry *iter;
2598         struct dlb2_ldb_port *port;
2599         u32 chk_en = 0;
2600         int i;
2601         RTE_SET_USED(iter);
2602
2603         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2604                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2605                         DLB2_CSR_WR(hw,
2606                                     DLB2_CHP_SN_CHK_ENBL(hw->ver,
2607                                                          port->id.phys_id),
2608                                     chk_en);
2609                 }
2610         }
2611 }
2612
2613 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2614                                                  struct dlb2_hw_domain *domain)
2615 {
2616         struct dlb2_list_entry *iter;
2617         struct dlb2_ldb_port *port;
2618         int i;
2619         RTE_SET_USED(iter);
2620
2621         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2622                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2623                         int j;
2624
2625                         for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
2626                                 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2627                                         break;
2628                         }
2629
2630                         if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2631                                 DLB2_HW_ERR(hw,
2632                                             "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2633                                             __func__, port->id.phys_id);
2634                                 return -EFAULT;
2635                         }
2636                 }
2637         }
2638
2639         return 0;
2640 }
2641
2642 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2643                                         struct dlb2_hw_domain *domain)
2644 {
2645         struct dlb2_list_entry *iter;
2646         struct dlb2_dir_pq_pair *port;
2647         RTE_SET_USED(iter);
2648
2649         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2650                 port->enabled = false;
2651
2652                 dlb2_dir_port_cq_disable(hw, port);
2653         }
2654 }
2655
2656 static void
2657 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2658                                        struct dlb2_hw_domain *domain)
2659 {
2660         struct dlb2_list_entry *iter;
2661         struct dlb2_dir_pq_pair *port;
2662         u32 pp_v = 0;
2663         RTE_SET_USED(iter);
2664
2665         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2666                 DLB2_CSR_WR(hw,
2667                             DLB2_SYS_DIR_PP_V(port->id.phys_id),
2668                             pp_v);
2669         }
2670 }
2671
2672 static void
2673 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2674                                        struct dlb2_hw_domain *domain)
2675 {
2676         struct dlb2_list_entry *iter;
2677         struct dlb2_ldb_port *port;
2678         u32 pp_v = 0;
2679         int i;
2680         RTE_SET_USED(iter);
2681
2682         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2683                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2684                         DLB2_CSR_WR(hw,
2685                                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2686                                     pp_v);
2687                 }
2688         }
2689 }
2690
2691 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2692                                             struct dlb2_hw_domain *domain)
2693 {
2694         struct dlb2_list_entry *iter;
2695         struct dlb2_dir_pq_pair *dir_port;
2696         struct dlb2_ldb_port *ldb_port;
2697         struct dlb2_ldb_queue *queue;
2698         int i;
2699         RTE_SET_USED(iter);
2700
2701         /*
2702          * Confirm that all the domain's queue's inflight counts and AQED
2703          * active counts are 0.
2704          */
2705         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2706                 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2707                         DLB2_HW_ERR(hw,
2708                                     "[%s()] Internal error: failed to empty ldb queue %d\n",
2709                                     __func__, queue->id.phys_id);
2710                         return -EFAULT;
2711                 }
2712         }
2713
2714         /* Confirm that all the domain's CQs inflight and token counts are 0. */
2715         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2716                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2717                         if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2718                             dlb2_ldb_cq_token_count(hw, ldb_port)) {
2719                                 DLB2_HW_ERR(hw,
2720                                             "[%s()] Internal error: failed to empty ldb port %d\n",
2721                                             __func__, ldb_port->id.phys_id);
2722                                 return -EFAULT;
2723                         }
2724                 }
2725         }
2726
2727         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2728                 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2729                         DLB2_HW_ERR(hw,
2730                                     "[%s()] Internal error: failed to empty dir queue %d\n",
2731                                     __func__, dir_port->id.phys_id);
2732                         return -EFAULT;
2733                 }
2734
2735                 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2736                         DLB2_HW_ERR(hw,
2737                                     "[%s()] Internal error: failed to empty dir port %d\n",
2738                                     __func__, dir_port->id.phys_id);
2739                         return -EFAULT;
2740                 }
2741         }
2742
2743         return 0;
2744 }
2745
2746 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2747                                                    struct dlb2_ldb_port *port)
2748 {
2749         DLB2_CSR_WR(hw,
2750                     DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2751                     DLB2_SYS_LDB_PP2VAS_RST);
2752
2753         DLB2_CSR_WR(hw,
2754                     DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
2755                     DLB2_CHP_LDB_CQ2VAS_RST);
2756
2757         DLB2_CSR_WR(hw,
2758                     DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2759                     DLB2_SYS_LDB_PP2VDEV_RST);
2760
2761         if (port->id.vdev_owned) {
2762                 unsigned int offs;
2763                 u32 virt_id;
2764
2765                 /*
2766                  * DLB uses producer port address bits 17:12 to determine the
2767                  * producer port ID. In Scalable IOV mode, PP accesses come
2768                  * through the PF MMIO window for the physical producer port,
2769                  * so for translation purposes the virtual and physical port
2770                  * IDs are equal.
2771                  */
2772                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2773                         virt_id = port->id.virt_id;
2774                 else
2775                         virt_id = port->id.phys_id;
2776
2777                 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2778
2779                 DLB2_CSR_WR(hw,
2780                             DLB2_SYS_VF_LDB_VPP2PP(offs),
2781                             DLB2_SYS_VF_LDB_VPP2PP_RST);
2782
2783                 DLB2_CSR_WR(hw,
2784                             DLB2_SYS_VF_LDB_VPP_V(offs),
2785                             DLB2_SYS_VF_LDB_VPP_V_RST);
2786         }
2787
2788         DLB2_CSR_WR(hw,
2789                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2790                     DLB2_SYS_LDB_PP_V_RST);
2791
2792         DLB2_CSR_WR(hw,
2793                     DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
2794                     DLB2_LSP_CQ_LDB_DSBL_RST);
2795
2796         DLB2_CSR_WR(hw,
2797                     DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
2798                     DLB2_CHP_LDB_CQ_DEPTH_RST);
2799
2800         if (hw->ver != DLB2_HW_V2)
2801                 DLB2_CSR_WR(hw,
2802                             DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
2803                             DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
2804
2805         DLB2_CSR_WR(hw,
2806                     DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
2807                     DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2808
2809         DLB2_CSR_WR(hw,
2810                     DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
2811                     DLB2_CHP_HIST_LIST_LIM_RST);
2812
2813         DLB2_CSR_WR(hw,
2814                     DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
2815                     DLB2_CHP_HIST_LIST_BASE_RST);
2816
2817         DLB2_CSR_WR(hw,
2818                     DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
2819                     DLB2_CHP_HIST_LIST_POP_PTR_RST);
2820
2821         DLB2_CSR_WR(hw,
2822                     DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
2823                     DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2824
2825         DLB2_CSR_WR(hw,
2826                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2827                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2828
2829         DLB2_CSR_WR(hw,
2830                     DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2831                     DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2832
2833         DLB2_CSR_WR(hw,
2834                     DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
2835                     DLB2_CHP_LDB_CQ_INT_ENB_RST);
2836
2837         DLB2_CSR_WR(hw,
2838                     DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2839                     DLB2_SYS_LDB_CQ_ISR_RST);
2840
2841         DLB2_CSR_WR(hw,
2842                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2843                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2844
2845         DLB2_CSR_WR(hw,
2846                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2847                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2848
2849         DLB2_CSR_WR(hw,
2850                     DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
2851                     DLB2_CHP_LDB_CQ_WPTR_RST);
2852
2853         DLB2_CSR_WR(hw,
2854                     DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
2855                     DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2856
2857         DLB2_CSR_WR(hw,
2858                     DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2859                     DLB2_SYS_LDB_CQ_ADDR_L_RST);
2860
2861         DLB2_CSR_WR(hw,
2862                     DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2863                     DLB2_SYS_LDB_CQ_ADDR_U_RST);
2864
2865         if (hw->ver == DLB2_HW_V2)
2866                 DLB2_CSR_WR(hw,
2867                             DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2868                             DLB2_SYS_LDB_CQ_AT_RST);
2869
2870         DLB2_CSR_WR(hw,
2871                     DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
2872                     DLB2_SYS_LDB_CQ_PASID_RST);
2873
2874         DLB2_CSR_WR(hw,
2875                     DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2876                     DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2877
2878         DLB2_CSR_WR(hw,
2879                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
2880                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2881
2882         DLB2_CSR_WR(hw,
2883                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
2884                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2885
2886         DLB2_CSR_WR(hw,
2887                     DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
2888                     DLB2_LSP_CQ2QID0_RST);
2889
2890         DLB2_CSR_WR(hw,
2891                     DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
2892                     DLB2_LSP_CQ2QID1_RST);
2893
2894         DLB2_CSR_WR(hw,
2895                     DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
2896                     DLB2_LSP_CQ2PRIOV_RST);
2897 }
2898
2899 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2900                                                  struct dlb2_hw_domain *domain)
2901 {
2902         struct dlb2_list_entry *iter;
2903         struct dlb2_ldb_port *port;
2904         int i;
2905         RTE_SET_USED(iter);
2906
2907         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2908                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2909                         __dlb2_domain_reset_ldb_port_registers(hw, port);
2910         }
2911 }
2912
2913 static void
2914 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2915                                        struct dlb2_dir_pq_pair *port)
2916 {
2917         u32 reg = 0;
2918
2919         DLB2_CSR_WR(hw,
2920                     DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
2921                     DLB2_CHP_DIR_CQ2VAS_RST);
2922
2923         DLB2_CSR_WR(hw,
2924                     DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
2925                     DLB2_LSP_CQ_DIR_DSBL_RST);
2926
2927         DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
2928
2929         if (hw->ver == DLB2_HW_V2)
2930                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2931         else
2932                 DLB2_CSR_WR(hw,
2933                             DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
2934
2935         DLB2_CSR_WR(hw,
2936                     DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
2937                     DLB2_CHP_DIR_CQ_DEPTH_RST);
2938
2939         DLB2_CSR_WR(hw,
2940                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2941                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2942
2943         DLB2_CSR_WR(hw,
2944                     DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2945                     DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2946
2947         DLB2_CSR_WR(hw,
2948                     DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2949                     DLB2_CHP_DIR_CQ_INT_ENB_RST);
2950
2951         DLB2_CSR_WR(hw,
2952                     DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2953                     DLB2_SYS_DIR_CQ_ISR_RST);
2954
2955         DLB2_CSR_WR(hw,
2956                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
2957                                                       port->id.phys_id),
2958                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2959
2960         DLB2_CSR_WR(hw,
2961                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2962                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2963
2964         DLB2_CSR_WR(hw,
2965                     DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
2966                     DLB2_CHP_DIR_CQ_WPTR_RST);
2967
2968         DLB2_CSR_WR(hw,
2969                     DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
2970                     DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2971
2972         DLB2_CSR_WR(hw,
2973                     DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2974                     DLB2_SYS_DIR_CQ_ADDR_L_RST);
2975
2976         DLB2_CSR_WR(hw,
2977                     DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2978                     DLB2_SYS_DIR_CQ_ADDR_U_RST);
2979
2980         DLB2_CSR_WR(hw,
2981                     DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2982                     DLB2_SYS_DIR_CQ_AT_RST);
2983
2984         if (hw->ver == DLB2_HW_V2)
2985                 DLB2_CSR_WR(hw,
2986                             DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2987                             DLB2_SYS_DIR_CQ_AT_RST);
2988
2989         DLB2_CSR_WR(hw,
2990                     DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
2991                     DLB2_SYS_DIR_CQ_PASID_RST);
2992
2993         DLB2_CSR_WR(hw,
2994                     DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2995                     DLB2_SYS_DIR_CQ_FMT_RST);
2996
2997         DLB2_CSR_WR(hw,
2998                     DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2999                     DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
3000
3001         DLB2_CSR_WR(hw,
3002                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3003                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
3004
3005         DLB2_CSR_WR(hw,
3006                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3007                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
3008
3009         DLB2_CSR_WR(hw,
3010                     DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
3011                     DLB2_SYS_DIR_PP2VAS_RST);
3012
3013         DLB2_CSR_WR(hw,
3014                     DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3015                     DLB2_CHP_DIR_CQ2VAS_RST);
3016
3017         DLB2_CSR_WR(hw,
3018                     DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
3019                     DLB2_SYS_DIR_PP2VDEV_RST);
3020
3021         if (port->id.vdev_owned) {
3022                 unsigned int offs;
3023                 u32 virt_id;
3024
3025                 /*
3026                  * DLB uses producer port address bits 17:12 to determine the
3027                  * producer port ID. In Scalable IOV mode, PP accesses come
3028                  * through the PF MMIO window for the physical producer port,
3029                  * so for translation purposes the virtual and physical port
3030                  * IDs are equal.
3031                  */
3032                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3033                         virt_id = port->id.virt_id;
3034                 else
3035                         virt_id = port->id.phys_id;
3036
3037                 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3038                         virt_id;
3039
3040                 DLB2_CSR_WR(hw,
3041                             DLB2_SYS_VF_DIR_VPP2PP(offs),
3042                             DLB2_SYS_VF_DIR_VPP2PP_RST);
3043
3044                 DLB2_CSR_WR(hw,
3045                             DLB2_SYS_VF_DIR_VPP_V(offs),
3046                             DLB2_SYS_VF_DIR_VPP_V_RST);
3047         }
3048
3049         DLB2_CSR_WR(hw,
3050                     DLB2_SYS_DIR_PP_V(port->id.phys_id),
3051                     DLB2_SYS_DIR_PP_V_RST);
3052 }
3053
3054 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3055                                                  struct dlb2_hw_domain *domain)
3056 {
3057         struct dlb2_list_entry *iter;
3058         struct dlb2_dir_pq_pair *port;
3059         RTE_SET_USED(iter);
3060
3061         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3062                 __dlb2_domain_reset_dir_port_registers(hw, port);
3063 }
3064
3065 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
3066                                                   struct dlb2_hw_domain *domain)
3067 {
3068         struct dlb2_list_entry *iter;
3069         struct dlb2_ldb_queue *queue;
3070         RTE_SET_USED(iter);
3071
3072         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3073                 unsigned int queue_id = queue->id.phys_id;
3074                 int i;
3075
3076                 DLB2_CSR_WR(hw,
3077                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
3078                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3079
3080                 DLB2_CSR_WR(hw,
3081                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
3082                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3083
3084                 DLB2_CSR_WR(hw,
3085                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
3086                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3087
3088                 DLB2_CSR_WR(hw,
3089                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
3090                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3091
3092                 DLB2_CSR_WR(hw,
3093                             DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
3094                             DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3095
3096                 DLB2_CSR_WR(hw,
3097                             DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
3098                             DLB2_LSP_QID_LDB_INFL_LIM_RST);
3099
3100                 DLB2_CSR_WR(hw,
3101                             DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
3102                             DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3103
3104                 DLB2_CSR_WR(hw,
3105                             DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
3106                             DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3107
3108                 DLB2_CSR_WR(hw,
3109                             DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
3110                             DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3111
3112                 DLB2_CSR_WR(hw,
3113                             DLB2_SYS_LDB_QID_ITS(queue_id),
3114                             DLB2_SYS_LDB_QID_ITS_RST);
3115
3116                 DLB2_CSR_WR(hw,
3117                             DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
3118                             DLB2_CHP_ORD_QID_SN_RST);
3119
3120                 DLB2_CSR_WR(hw,
3121                             DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
3122                             DLB2_CHP_ORD_QID_SN_MAP_RST);
3123
3124                 DLB2_CSR_WR(hw,
3125                             DLB2_SYS_LDB_QID_V(queue_id),
3126                             DLB2_SYS_LDB_QID_V_RST);
3127
3128                 DLB2_CSR_WR(hw,
3129                             DLB2_SYS_LDB_QID_CFG_V(queue_id),
3130                             DLB2_SYS_LDB_QID_CFG_V_RST);
3131
3132                 if (queue->sn_cfg_valid) {
3133                         u32 offs[2];
3134
3135                         offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
3136                                                          queue->sn_slot);
3137                         offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
3138                                                          queue->sn_slot);
3139
3140                         DLB2_CSR_WR(hw,
3141                                     offs[queue->sn_group],
3142                                     DLB2_RO_GRP_0_SLT_SHFT_RST);
3143                 }
3144
3145                 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3146                         DLB2_CSR_WR(hw,
3147                                     DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
3148                                     DLB2_LSP_QID2CQIDIX_00_RST);
3149
3150                         DLB2_CSR_WR(hw,
3151                                     DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
3152                                     DLB2_LSP_QID2CQIDIX2_00_RST);
3153
3154                         DLB2_CSR_WR(hw,
3155                                     DLB2_ATM_QID2CQIDIX(queue_id, i),
3156                                     DLB2_ATM_QID2CQIDIX_00_RST);
3157                 }
3158         }
3159 }
3160
3161 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3162                                                   struct dlb2_hw_domain *domain)
3163 {
3164         struct dlb2_list_entry *iter;
3165         struct dlb2_dir_pq_pair *queue;
3166         RTE_SET_USED(iter);
3167
3168         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3169                 DLB2_CSR_WR(hw,
3170                             DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
3171                                                        queue->id.phys_id),
3172                             DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3173
3174                 DLB2_CSR_WR(hw,
3175                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
3176                                                           queue->id.phys_id),
3177                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3178
3179                 DLB2_CSR_WR(hw,
3180                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
3181                                                           queue->id.phys_id),
3182                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3183
3184                 DLB2_CSR_WR(hw,
3185                             DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
3186                                                          queue->id.phys_id),
3187                             DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3188
3189                 DLB2_CSR_WR(hw,
3190                             DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3191                             DLB2_SYS_DIR_QID_ITS_RST);
3192
3193                 DLB2_CSR_WR(hw,
3194                             DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3195                             DLB2_SYS_DIR_QID_V_RST);
3196         }
3197 }
3198
3199
3200
3201
3202
3203 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3204                                         struct dlb2_hw_domain *domain)
3205 {
3206         dlb2_domain_reset_ldb_port_registers(hw, domain);
3207
3208         dlb2_domain_reset_dir_port_registers(hw, domain);
3209
3210         dlb2_domain_reset_ldb_queue_registers(hw, domain);
3211
3212         dlb2_domain_reset_dir_queue_registers(hw, domain);
3213
3214         if (hw->ver == DLB2_HW_V2) {
3215                 DLB2_CSR_WR(hw,
3216                             DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3217                             DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3218
3219                 DLB2_CSR_WR(hw,
3220                             DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3221                             DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3222         } else
3223                 DLB2_CSR_WR(hw,
3224                             DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
3225                             DLB2_CHP_CFG_VAS_CRD_RST);
3226 }
3227
3228 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3229                                             struct dlb2_hw_domain *domain)
3230 {
3231         struct dlb2_dir_pq_pair *tmp_dir_port;
3232         struct dlb2_ldb_queue *tmp_ldb_queue;
3233         struct dlb2_ldb_port *tmp_ldb_port;
3234         struct dlb2_list_entry *iter1;
3235         struct dlb2_list_entry *iter2;
3236         struct dlb2_function_resources *rsrcs;
3237         struct dlb2_dir_pq_pair *dir_port;
3238         struct dlb2_ldb_queue *ldb_queue;
3239         struct dlb2_ldb_port *ldb_port;
3240         struct dlb2_list_head *list;
3241         int ret, i;
3242         RTE_SET_USED(tmp_dir_port);
3243         RTE_SET_USED(tmp_ldb_queue);
3244         RTE_SET_USED(tmp_ldb_port);
3245         RTE_SET_USED(iter1);
3246         RTE_SET_USED(iter2);
3247
3248         rsrcs = domain->parent_func;
3249
3250         /* Move the domain's ldb queues to the function's avail list */
3251         list = &domain->used_ldb_queues;
3252         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3253                 if (ldb_queue->sn_cfg_valid) {
3254                         struct dlb2_sn_group *grp;
3255
3256                         grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3257
3258                         dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3259                         ldb_queue->sn_cfg_valid = false;
3260                 }
3261
3262                 ldb_queue->owned = false;
3263                 ldb_queue->num_mappings = 0;
3264                 ldb_queue->num_pending_additions = 0;
3265
3266                 dlb2_list_del(&domain->used_ldb_queues,
3267                               &ldb_queue->domain_list);
3268                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3269                               &ldb_queue->func_list);
3270                 rsrcs->num_avail_ldb_queues++;
3271         }
3272
3273         list = &domain->avail_ldb_queues;
3274         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3275                 ldb_queue->owned = false;
3276
3277                 dlb2_list_del(&domain->avail_ldb_queues,
3278                               &ldb_queue->domain_list);
3279                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3280                               &ldb_queue->func_list);
3281                 rsrcs->num_avail_ldb_queues++;
3282         }
3283
3284         /* Move the domain's ldb ports to the function's avail list */
3285         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3286                 list = &domain->used_ldb_ports[i];
3287                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3288                                        iter1, iter2) {
3289                         int j;
3290
3291                         ldb_port->owned = false;
3292                         ldb_port->configured = false;
3293                         ldb_port->num_pending_removals = 0;
3294                         ldb_port->num_mappings = 0;
3295                         ldb_port->init_tkn_cnt = 0;
3296                         ldb_port->cq_depth = 0;
3297                         for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3298                                 ldb_port->qid_map[j].state =
3299                                         DLB2_QUEUE_UNMAPPED;
3300
3301                         dlb2_list_del(&domain->used_ldb_ports[i],
3302                                       &ldb_port->domain_list);
3303                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3304                                       &ldb_port->func_list);
3305                         rsrcs->num_avail_ldb_ports[i]++;
3306                 }
3307
3308                 list = &domain->avail_ldb_ports[i];
3309                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3310                                        iter1, iter2) {
3311                         ldb_port->owned = false;
3312
3313                         dlb2_list_del(&domain->avail_ldb_ports[i],
3314                                       &ldb_port->domain_list);
3315                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3316                                       &ldb_port->func_list);
3317                         rsrcs->num_avail_ldb_ports[i]++;
3318                 }
3319         }
3320
3321         /* Move the domain's dir ports to the function's avail list */
3322         list = &domain->used_dir_pq_pairs;
3323         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3324                 dir_port->owned = false;
3325                 dir_port->port_configured = false;
3326                 dir_port->init_tkn_cnt = 0;
3327
3328                 dlb2_list_del(&domain->used_dir_pq_pairs,
3329                               &dir_port->domain_list);
3330
3331                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3332                               &dir_port->func_list);
3333                 rsrcs->num_avail_dir_pq_pairs++;
3334         }
3335
3336         list = &domain->avail_dir_pq_pairs;
3337         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3338                 dir_port->owned = false;
3339
3340                 dlb2_list_del(&domain->avail_dir_pq_pairs,
3341                               &dir_port->domain_list);
3342
3343                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3344                               &dir_port->func_list);
3345                 rsrcs->num_avail_dir_pq_pairs++;
3346         }
3347
3348         /* Return hist list entries to the function */
3349         ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3350                                     domain->hist_list_entry_base,
3351                                     domain->total_hist_list_entries);
3352         if (ret) {
3353                 DLB2_HW_ERR(hw,
3354                             "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3355                             __func__);
3356                 return ret;
3357         }
3358
3359         domain->total_hist_list_entries = 0;
3360         domain->avail_hist_list_entries = 0;
3361         domain->hist_list_entry_base = 0;
3362         domain->hist_list_entry_offset = 0;
3363
3364         if (hw->ver == DLB2_HW_V2_5) {
3365                 rsrcs->num_avail_entries += domain->num_credits;
3366                 domain->num_credits = 0;
3367         } else {
3368                 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3369                 domain->num_ldb_credits = 0;
3370
3371                 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3372                 domain->num_dir_credits = 0;
3373         }
3374         rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3375         rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3376         domain->num_avail_aqed_entries = 0;
3377         domain->num_used_aqed_entries = 0;
3378
3379         domain->num_pending_removals = 0;
3380         domain->num_pending_additions = 0;
3381         domain->configured = false;
3382         domain->started = false;
3383
3384         /*
3385          * Move the domain out of the used_domains list and back to the
3386          * function's avail_domains list.
3387          */
3388         dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3389         dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3390         rsrcs->num_avail_domains++;
3391
3392         return 0;
3393 }
3394
3395 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3396                                             struct dlb2_hw_domain *domain,
3397                                             struct dlb2_ldb_queue *queue)
3398 {
3399         struct dlb2_ldb_port *port = NULL;
3400         int ret, i;
3401
3402         /* If a domain has LDB queues, it must have LDB ports */
3403         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3404                 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
3405                                           typeof(*port));
3406                 if (port)
3407                         break;
3408         }
3409
3410         if (port == NULL) {
3411                 DLB2_HW_ERR(hw,
3412                             "[%s()] Internal error: No configured LDB ports\n",
3413                             __func__);
3414                 return -EFAULT;
3415         }
3416
3417         /* If necessary, free up a QID slot in this CQ */
3418         if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3419                 struct dlb2_ldb_queue *mapped_queue;
3420
3421                 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3422
3423                 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3424                 if (ret)
3425                         return ret;
3426         }
3427
3428         ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3429         if (ret)
3430                 return ret;
3431
3432         return dlb2_domain_drain_mapped_queues(hw, domain);
3433 }
3434
3435 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3436                                              struct dlb2_hw_domain *domain)
3437 {
3438         struct dlb2_list_entry *iter;
3439         struct dlb2_ldb_queue *queue;
3440         int ret;
3441         RTE_SET_USED(iter);
3442
3443         /* If the domain hasn't been started, there's no traffic to drain */
3444         if (!domain->started)
3445                 return 0;
3446
3447         /*
3448          * Pre-condition: the unattached queue must not have any outstanding
3449          * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3450          * prior to this in dlb2_domain_drain_mapped_queues().
3451          */
3452         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3453                 if (queue->num_mappings != 0 ||
3454                     dlb2_ldb_queue_is_empty(hw, queue))
3455                         continue;
3456
3457                 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3458                 if (ret)
3459                         return ret;
3460         }
3461
3462         return 0;
3463 }
3464
3465 /**
3466  * dlb2_reset_domain() - reset a scheduling domain
3467  * @hw: dlb2_hw handle for a particular device.
3468  * @domain_id: domain ID.
3469  * @vdev_req: indicates whether this request came from a vdev.
3470  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3471  *
3472  * This function resets and frees a DLB 2.0 scheduling domain and its associated
3473  * resources.
3474  *
3475  * Pre-condition: the driver must ensure software has stopped sending QEs
3476  * through this domain's producer ports before invoking this function, or
3477  * undefined behavior will result.
3478  *
3479  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3480  * device.
3481  *
3482  * Return:
3483  * Returns 0 upon success, -1 otherwise.
3484  *
3485  * EINVAL - Invalid domain ID, or the domain is not configured.
3486  * EFAULT - Internal error. (Possibly caused if software is the pre-condition
3487  *          is not met.)
3488  * ETIMEDOUT - Hardware component didn't reset in the expected time.
3489  */
3490 int dlb2_reset_domain(struct dlb2_hw *hw,
3491                       u32 domain_id,
3492                       bool vdev_req,
3493                       unsigned int vdev_id)
3494 {
3495         struct dlb2_hw_domain *domain;
3496         int ret;
3497
3498         dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3499
3500         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3501
3502         if (domain == NULL || !domain->configured)
3503                 return -EINVAL;
3504
3505         /* Disable VPPs */
3506         if (vdev_req) {
3507                 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3508
3509                 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3510         }
3511
3512         /* Disable CQ interrupts */
3513         dlb2_domain_disable_dir_port_interrupts(hw, domain);
3514
3515         dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3516
3517         /*
3518          * For each queue owned by this domain, disable its write permissions to
3519          * cause any traffic sent to it to be dropped. Well-behaved software
3520          * should not be sending QEs at this point.
3521          */
3522         dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3523
3524         dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3525
3526         /* Turn off completion tracking on all the domain's PPs. */
3527         dlb2_domain_disable_ldb_seq_checks(hw, domain);
3528
3529         /*
3530          * Disable the LDB CQs and drain them in order to complete the map and
3531          * unmap procedures, which require zero CQ inflights and zero QID
3532          * inflights respectively.
3533          */
3534         dlb2_domain_disable_ldb_cqs(hw, domain);
3535
3536         dlb2_domain_drain_ldb_cqs(hw, domain, false);
3537
3538         ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3539         if (ret)
3540                 return ret;
3541
3542         ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3543         if (ret)
3544                 return ret;
3545
3546         ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3547         if (ret)
3548                 return ret;
3549
3550         /* Re-enable the CQs in order to drain the mapped queues. */
3551         dlb2_domain_enable_ldb_cqs(hw, domain);
3552
3553         ret = dlb2_domain_drain_mapped_queues(hw, domain);
3554         if (ret)
3555                 return ret;
3556
3557         ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3558         if (ret)
3559                 return ret;
3560
3561         /* Done draining LDB QEs, so disable the CQs. */
3562         dlb2_domain_disable_ldb_cqs(hw, domain);
3563
3564         dlb2_domain_drain_dir_queues(hw, domain);
3565
3566         /* Done draining DIR QEs, so disable the CQs. */
3567         dlb2_domain_disable_dir_cqs(hw, domain);
3568
3569         /* Disable PPs */
3570         dlb2_domain_disable_dir_producer_ports(hw, domain);
3571
3572         dlb2_domain_disable_ldb_producer_ports(hw, domain);
3573
3574         ret = dlb2_domain_verify_reset_success(hw, domain);
3575         if (ret)
3576                 return ret;
3577
3578         /* Reset the QID and port state. */
3579         dlb2_domain_reset_registers(hw, domain);
3580
3581         /* Hardware reset complete. Reset the domain's software state */
3582         return dlb2_domain_reset_software_state(hw, domain);
3583 }
3584
3585 static void
3586 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3587                                u32 domain_id,
3588                                struct dlb2_create_ldb_queue_args *args,
3589                                bool vdev_req,
3590                                unsigned int vdev_id)
3591 {
3592         DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3593         if (vdev_req)
3594                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3595         DLB2_HW_DBG(hw, "\tDomain ID:                  %d\n",
3596                     domain_id);
3597         DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3598                     args->num_sequence_numbers);
3599         DLB2_HW_DBG(hw, "\tNumber of QID inflights:    %d\n",
3600                     args->num_qid_inflights);
3601         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:    %d\n",
3602                     args->num_atomic_inflights);
3603 }
3604
3605 static int
3606 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3607                                   struct dlb2_ldb_queue *queue,
3608                                   struct dlb2_create_ldb_queue_args *args)
3609 {
3610         int slot = -1;
3611         int i;
3612
3613         queue->sn_cfg_valid = false;
3614
3615         if (args->num_sequence_numbers == 0)
3616                 return 0;
3617
3618         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3619                 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3620
3621                 if (group->sequence_numbers_per_queue ==
3622                     args->num_sequence_numbers &&
3623                     !dlb2_sn_group_full(group)) {
3624                         slot = dlb2_sn_group_alloc_slot(group);
3625                         if (slot >= 0)
3626                                 break;
3627                 }
3628         }
3629
3630         if (slot == -1) {
3631                 DLB2_HW_ERR(hw,
3632                             "[%s():%d] Internal error: no sequence number slots available\n",
3633                             __func__, __LINE__);
3634                 return -EFAULT;
3635         }
3636
3637         queue->sn_cfg_valid = true;
3638         queue->sn_group = i;
3639         queue->sn_slot = slot;
3640         return 0;
3641 }
3642
3643 static int
3644 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3645                                   u32 domain_id,
3646                                   struct dlb2_create_ldb_queue_args *args,
3647                                   struct dlb2_cmd_response *resp,
3648                                   bool vdev_req,
3649                                   unsigned int vdev_id,
3650                                   struct dlb2_hw_domain **out_domain,
3651                                   struct dlb2_ldb_queue **out_queue)
3652 {
3653         struct dlb2_hw_domain *domain;
3654         struct dlb2_ldb_queue *queue;
3655         int i;
3656
3657         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3658
3659         if (!domain) {
3660                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3661                 return -EINVAL;
3662         }
3663
3664         if (!domain->configured) {
3665                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3666                 return -EINVAL;
3667         }
3668
3669         if (domain->started) {
3670                 resp->status = DLB2_ST_DOMAIN_STARTED;
3671                 return -EINVAL;
3672         }
3673
3674         queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3675         if (!queue) {
3676                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3677                 return -EINVAL;
3678         }
3679
3680         if (args->num_sequence_numbers) {
3681                 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3682                         struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3683
3684                         if (group->sequence_numbers_per_queue ==
3685                             args->num_sequence_numbers &&
3686                             !dlb2_sn_group_full(group))
3687                                 break;
3688                 }
3689
3690                 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3691                         resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3692                         return -EINVAL;
3693                 }
3694         }
3695
3696         if (args->num_qid_inflights > 4096) {
3697                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3698                 return -EINVAL;
3699         }
3700
3701         /* Inflights must be <= number of sequence numbers if ordered */
3702         if (args->num_sequence_numbers != 0 &&
3703             args->num_qid_inflights > args->num_sequence_numbers) {
3704                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3705                 return -EINVAL;
3706         }
3707
3708         if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3709                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3710                 return -EINVAL;
3711         }
3712
3713         if (args->num_atomic_inflights &&
3714             args->lock_id_comp_level != 0 &&
3715             args->lock_id_comp_level != 64 &&
3716             args->lock_id_comp_level != 128 &&
3717             args->lock_id_comp_level != 256 &&
3718             args->lock_id_comp_level != 512 &&
3719             args->lock_id_comp_level != 1024 &&
3720             args->lock_id_comp_level != 2048 &&
3721             args->lock_id_comp_level != 4096 &&
3722             args->lock_id_comp_level != 65536) {
3723                 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3724                 return -EINVAL;
3725         }
3726
3727         *out_domain = domain;
3728         *out_queue = queue;
3729
3730         return 0;
3731 }
3732
3733 static int
3734 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3735                                 struct dlb2_hw_domain *domain,
3736                                 struct dlb2_ldb_queue *queue,
3737                                 struct dlb2_create_ldb_queue_args *args)
3738 {
3739         int ret;
3740         ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3741         if (ret)
3742                 return ret;
3743
3744         /* Attach QID inflights */
3745         queue->num_qid_inflights = args->num_qid_inflights;
3746
3747         /* Attach atomic inflights */
3748         queue->aqed_limit = args->num_atomic_inflights;
3749
3750         domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3751         domain->num_used_aqed_entries += args->num_atomic_inflights;
3752
3753         return 0;
3754 }
3755
3756 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3757                                      struct dlb2_hw_domain *domain,
3758                                      struct dlb2_ldb_queue *queue,
3759                                      struct dlb2_create_ldb_queue_args *args,
3760                                      bool vdev_req,
3761                                      unsigned int vdev_id)
3762 {
3763         struct dlb2_sn_group *sn_group;
3764         unsigned int offs;
3765         u32 reg = 0;
3766         u32 alimit;
3767
3768         /* QID write permissions are turned on when the domain is started */
3769         offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.phys_id;
3770
3771         DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), reg);
3772
3773         /*
3774          * Unordered QIDs get 4K inflights, ordered get as many as the number
3775          * of sequence numbers.
3776          */
3777         DLB2_BITS_SET(reg, args->num_qid_inflights,
3778                       DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
3779         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
3780                                                   queue->id.phys_id), reg);
3781
3782         alimit = queue->aqed_limit;
3783
3784         if (alimit > DLB2_MAX_NUM_AQED_ENTRIES)
3785                 alimit = DLB2_MAX_NUM_AQED_ENTRIES;
3786
3787         reg = 0;
3788         DLB2_BITS_SET(reg, alimit, DLB2_LSP_QID_AQED_ACTIVE_LIM_LIMIT);
3789         DLB2_CSR_WR(hw,
3790                     DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver,
3791                                                  queue->id.phys_id), reg);
3792
3793         reg = 0;
3794         switch (args->lock_id_comp_level) {
3795         case 64:
3796                 DLB2_BITS_SET(reg, 1, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3797                 break;
3798         case 128:
3799                 DLB2_BITS_SET(reg, 2, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3800                 break;
3801         case 256:
3802                 DLB2_BITS_SET(reg, 3, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3803                 break;
3804         case 512:
3805                 DLB2_BITS_SET(reg, 4, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3806                 break;
3807         case 1024:
3808                 DLB2_BITS_SET(reg, 5, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3809                 break;
3810         case 2048:
3811                 DLB2_BITS_SET(reg, 6, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3812                 break;
3813         case 4096:
3814                 DLB2_BITS_SET(reg, 7, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3815                 break;
3816         default:
3817                 /* No compression by default */
3818                 break;
3819         }
3820
3821         DLB2_CSR_WR(hw, DLB2_AQED_QID_HID_WIDTH(queue->id.phys_id), reg);
3822
3823         reg = 0;
3824         /* Don't timestamp QEs that pass through this queue */
3825         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_ITS(queue->id.phys_id), reg);
3826
3827         DLB2_BITS_SET(reg, args->depth_threshold,
3828                       DLB2_LSP_QID_ATM_DEPTH_THRSH_THRESH);
3829         DLB2_CSR_WR(hw,
3830                     DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver,
3831                                                  queue->id.phys_id), reg);
3832
3833         reg = 0;
3834         DLB2_BITS_SET(reg, args->depth_threshold,
3835                       DLB2_LSP_QID_NALDB_DEPTH_THRSH_THRESH);
3836         DLB2_CSR_WR(hw,
3837                     DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue->id.phys_id),
3838                     reg);
3839
3840         /*
3841          * This register limits the number of inflight flows a queue can have
3842          * at one time.  It has an upper bound of 2048, but can be
3843          * over-subscribed. 512 is chosen so that a single queue does not use
3844          * the entire atomic storage, but can use a substantial portion if
3845          * needed.
3846          */
3847         reg = 0;
3848         DLB2_BITS_SET(reg, 512, DLB2_AQED_QID_FID_LIM_QID_FID_LIMIT);
3849         DLB2_CSR_WR(hw, DLB2_AQED_QID_FID_LIM(queue->id.phys_id), reg);
3850
3851         /* Configure SNs */
3852         reg = 0;
3853         sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3854         DLB2_BITS_SET(reg, sn_group->mode, DLB2_CHP_ORD_QID_SN_MAP_MODE);
3855         DLB2_BITS_SET(reg, queue->sn_slot, DLB2_CHP_ORD_QID_SN_MAP_SLOT);
3856         DLB2_BITS_SET(reg, sn_group->id, DLB2_CHP_ORD_QID_SN_MAP_GRP);
3857
3858         DLB2_CSR_WR(hw,
3859                     DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue->id.phys_id), reg);
3860
3861         reg = 0;
3862         DLB2_BITS_SET(reg, (args->num_sequence_numbers != 0),
3863                  DLB2_SYS_LDB_QID_CFG_V_SN_CFG_V);
3864         DLB2_BITS_SET(reg, (args->num_atomic_inflights != 0),
3865                  DLB2_SYS_LDB_QID_CFG_V_FID_CFG_V);
3866
3867         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), reg);
3868
3869         if (vdev_req) {
3870                 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3871
3872                 reg = 0;
3873                 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VQID_V_VQID_V);
3874                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), reg);
3875
3876                 reg = 0;
3877                 DLB2_BITS_SET(reg, queue->id.phys_id,
3878                               DLB2_SYS_VF_LDB_VQID2QID_QID);
3879                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), reg);
3880
3881                 reg = 0;
3882                 DLB2_BITS_SET(reg, queue->id.virt_id,
3883                               DLB2_SYS_LDB_QID2VQID_VQID);
3884                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID2VQID(queue->id.phys_id), reg);
3885         }
3886
3887         reg = 0;
3888         DLB2_BIT_SET(reg, DLB2_SYS_LDB_QID_V_QID_V);
3889         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), reg);
3890 }
3891
3892 /**
3893  * dlb2_hw_create_ldb_queue() - create a load-balanced queue
3894  * @hw: dlb2_hw handle for a particular device.
3895  * @domain_id: domain ID.
3896  * @args: queue creation arguments.
3897  * @resp: response structure.
3898  * @vdev_req: indicates whether this request came from a vdev.
3899  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3900  *
3901  * This function creates a load-balanced queue.
3902  *
3903  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3904  * device.
3905  *
3906  * Return:
3907  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
3908  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
3909  * contains the queue ID.
3910  *
3911  * resp->id contains a virtual ID if vdev_req is true.
3912  *
3913  * Errors:
3914  * EINVAL - A requested resource is unavailable, the domain is not configured,
3915  *          the domain has already been started, or the requested queue name is
3916  *          already in use.
3917  * EFAULT - Internal error (resp->status not set).
3918  */
3919 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3920                              u32 domain_id,
3921                              struct dlb2_create_ldb_queue_args *args,
3922                              struct dlb2_cmd_response *resp,
3923                              bool vdev_req,
3924                              unsigned int vdev_id)
3925 {
3926         struct dlb2_hw_domain *domain;
3927         struct dlb2_ldb_queue *queue;
3928         int ret;
3929
3930         dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3931
3932         /*
3933          * Verify that hardware resources are available before attempting to
3934          * satisfy the request. This simplifies the error unwinding code.
3935          */
3936         ret = dlb2_verify_create_ldb_queue_args(hw,
3937                                                 domain_id,
3938                                                 args,
3939                                                 resp,
3940                                                 vdev_req,
3941                                                 vdev_id,
3942                                                 &domain,
3943                                                 &queue);
3944         if (ret)
3945                 return ret;
3946
3947         ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3948
3949         if (ret) {
3950                 DLB2_HW_ERR(hw,
3951                             "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3952                             __func__, __LINE__);
3953                 return ret;
3954         }
3955
3956         dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3957
3958         queue->num_mappings = 0;
3959
3960         queue->configured = true;
3961
3962         /*
3963          * Configuration succeeded, so move the resource from the 'avail' to
3964          * the 'used' list.
3965          */
3966         dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
3967
3968         dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
3969
3970         resp->status = 0;
3971         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
3972
3973         return 0;
3974 }
3975
3976 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
3977                                        struct dlb2_hw_domain *domain,
3978                                        struct dlb2_ldb_port *port,
3979                                        bool vdev_req,
3980                                        unsigned int vdev_id)
3981 {
3982         u32 reg = 0;
3983
3984         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
3985         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
3986
3987         if (vdev_req) {
3988                 unsigned int offs;
3989                 u32 virt_id;
3990
3991                 /*
3992                  * DLB uses producer port address bits 17:12 to determine the
3993                  * producer port ID. In Scalable IOV mode, PP accesses come
3994                  * through the PF MMIO window for the physical producer port,
3995                  * so for translation purposes the virtual and physical port
3996                  * IDs are equal.
3997                  */
3998                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3999                         virt_id = port->id.virt_id;
4000                 else
4001                         virt_id = port->id.phys_id;
4002
4003                 reg = 0;
4004                 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
4005                 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4006                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
4007
4008                 reg = 0;
4009                 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
4010                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
4011
4012                 reg = 0;
4013                 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
4014                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
4015         }
4016
4017         reg = 0;
4018         DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
4019         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
4020 }
4021
4022 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4023                                       struct dlb2_hw_domain *domain,
4024                                       struct dlb2_ldb_port *port,
4025                                       uintptr_t cq_dma_base,
4026                                       struct dlb2_create_ldb_port_args *args,
4027                                       bool vdev_req,
4028                                       unsigned int vdev_id)
4029 {
4030         u32 hl_base = 0;
4031         u32 reg = 0;
4032         u32 ds = 0;
4033
4034         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4035         DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
4036         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
4037
4038         reg = cq_dma_base >> 32;
4039         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
4040
4041         /*
4042          * 'ro' == relaxed ordering. This setting allows DLB2 to write
4043          * cache lines out-of-order (but QEs within a cache line are always
4044          * updated in-order).
4045          */
4046         reg = 0;
4047         DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
4048         DLB2_BITS_SET(reg,
4049                  !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4050                  DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
4051         DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
4052
4053         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
4054
4055         port->cq_depth = args->cq_depth;
4056
4057         if (args->cq_depth <= 8) {
4058                 ds = 1;
4059         } else if (args->cq_depth == 16) {
4060                 ds = 2;
4061         } else if (args->cq_depth == 32) {
4062                 ds = 3;
4063         } else if (args->cq_depth == 64) {
4064                 ds = 4;
4065         } else if (args->cq_depth == 128) {
4066                 ds = 5;
4067         } else if (args->cq_depth == 256) {
4068                 ds = 6;
4069         } else if (args->cq_depth == 512) {
4070                 ds = 7;
4071         } else if (args->cq_depth == 1024) {
4072                 ds = 8;
4073         } else {
4074                 DLB2_HW_ERR(hw,
4075                             "[%s():%d] Internal error: invalid CQ depth\n",
4076                             __func__, __LINE__);
4077                 return -EFAULT;
4078         }
4079
4080         reg = 0;
4081         DLB2_BITS_SET(reg, ds,
4082                       DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4083         DLB2_CSR_WR(hw,
4084                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4085                     reg);
4086
4087         /*
4088          * To support CQs with depth less than 8, program the token count
4089          * register with a non-zero initial value. Operations such as domain
4090          * reset must take this initial value into account when quiescing the
4091          * CQ.
4092          */
4093         port->init_tkn_cnt = 0;
4094
4095         if (args->cq_depth < 8) {
4096                 reg = 0;
4097                 port->init_tkn_cnt = 8 - args->cq_depth;
4098
4099                 DLB2_BITS_SET(reg,
4100                               port->init_tkn_cnt,
4101                               DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
4102                 DLB2_CSR_WR(hw,
4103                             DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4104                             reg);
4105         } else {
4106                 DLB2_CSR_WR(hw,
4107                             DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4108                             DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4109         }
4110
4111         reg = 0;
4112         DLB2_BITS_SET(reg, ds,
4113                       DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
4114         DLB2_CSR_WR(hw,
4115                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4116                     reg);
4117
4118         /* Reset the CQ write pointer */
4119         DLB2_CSR_WR(hw,
4120                     DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
4121                     DLB2_CHP_LDB_CQ_WPTR_RST);
4122
4123         reg = 0;
4124         DLB2_BITS_SET(reg,
4125                       port->hist_list_entry_limit - 1,
4126                       DLB2_CHP_HIST_LIST_LIM_LIMIT);
4127         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
4128
4129         DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
4130                       DLB2_CHP_HIST_LIST_BASE_BASE);
4131         DLB2_CSR_WR(hw,
4132                     DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
4133                     hl_base);
4134
4135         /*
4136          * The inflight limit sets a cap on the number of QEs for which this CQ
4137          * can owe completions at one time.
4138          */
4139         reg = 0;
4140         DLB2_BITS_SET(reg, args->cq_history_list_size,
4141                       DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
4142         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
4143                     reg);
4144
4145         reg = 0;
4146         DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4147                       DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
4148         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
4149                     reg);
4150
4151         reg = 0;
4152         DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4153                       DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
4154         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
4155                     reg);
4156
4157         /*
4158          * Address translation (AT) settings: 0: untranslated, 2: translated
4159          * (see ATS spec regarding Address Type field for more details)
4160          */
4161
4162         if (hw->ver == DLB2_HW_V2) {
4163                 reg = 0;
4164                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
4165         }
4166
4167         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4168                 reg = 0;
4169                 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4170                               DLB2_SYS_LDB_CQ_PASID_PASID);
4171                 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
4172         }
4173
4174         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
4175
4176         reg = 0;
4177         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
4178         DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
4179
4180         /* Disable the port's QID mappings */
4181         reg = 0;
4182         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
4183
4184         return 0;
4185 }
4186
4187 static bool
4188 dlb2_cq_depth_is_valid(u32 depth)
4189 {
4190         if (depth != 1 && depth != 2 &&
4191             depth != 4 && depth != 8 &&
4192             depth != 16 && depth != 32 &&
4193             depth != 64 && depth != 128 &&
4194             depth != 256 && depth != 512 &&
4195             depth != 1024)
4196                 return false;
4197
4198         return true;
4199 }
4200
4201 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4202                                    struct dlb2_hw_domain *domain,
4203                                    struct dlb2_ldb_port *port,
4204                                    uintptr_t cq_dma_base,
4205                                    struct dlb2_create_ldb_port_args *args,
4206                                    bool vdev_req,
4207                                    unsigned int vdev_id)
4208 {
4209         int ret, i;
4210
4211         port->hist_list_entry_base = domain->hist_list_entry_base +
4212                                      domain->hist_list_entry_offset;
4213         port->hist_list_entry_limit = port->hist_list_entry_base +
4214                                       args->cq_history_list_size;
4215
4216         domain->hist_list_entry_offset += args->cq_history_list_size;
4217         domain->avail_hist_list_entries -= args->cq_history_list_size;
4218
4219         ret = dlb2_ldb_port_configure_cq(hw,
4220                                          domain,
4221                                          port,
4222                                          cq_dma_base,
4223                                          args,
4224                                          vdev_req,
4225                                          vdev_id);
4226         if (ret)
4227                 return ret;
4228
4229         dlb2_ldb_port_configure_pp(hw,
4230                                    domain,
4231                                    port,
4232                                    vdev_req,
4233                                    vdev_id);
4234
4235         dlb2_ldb_port_cq_enable(hw, port);
4236
4237         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4238                 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4239         port->num_mappings = 0;
4240
4241         port->enabled = true;
4242
4243         port->configured = true;
4244
4245         return 0;
4246 }
4247
4248 static void
4249 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4250                               u32 domain_id,
4251                               uintptr_t cq_dma_base,
4252                               struct dlb2_create_ldb_port_args *args,
4253                               bool vdev_req,
4254                               unsigned int vdev_id)
4255 {
4256         DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4257         if (vdev_req)
4258                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4259         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4260                     domain_id);
4261         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4262                     args->cq_depth);
4263         DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
4264                     args->cq_history_list_size);
4265         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4266                     cq_dma_base);
4267         DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
4268         DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
4269                     args->cos_strict);
4270 }
4271
4272 static int
4273 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4274                                  u32 domain_id,
4275                                  uintptr_t cq_dma_base,
4276                                  struct dlb2_create_ldb_port_args *args,
4277                                  struct dlb2_cmd_response *resp,
4278                                  bool vdev_req,
4279                                  unsigned int vdev_id,
4280                                  struct dlb2_hw_domain **out_domain,
4281                                  struct dlb2_ldb_port **out_port,
4282                                  int *out_cos_id)
4283 {
4284         struct dlb2_hw_domain *domain;
4285         struct dlb2_ldb_port *port;
4286         int i, id;
4287
4288         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4289
4290         if (!domain) {
4291                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4292                 return -EINVAL;
4293         }
4294
4295         if (!domain->configured) {
4296                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4297                 return -EINVAL;
4298         }
4299
4300         if (domain->started) {
4301                 resp->status = DLB2_ST_DOMAIN_STARTED;
4302                 return -EINVAL;
4303         }
4304
4305         if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4306                 resp->status = DLB2_ST_INVALID_COS_ID;
4307                 return -EINVAL;
4308         }
4309
4310         if (args->cos_strict) {
4311                 id = args->cos_id;
4312                 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4313                                           typeof(*port));
4314         } else {
4315                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4316                         id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4317
4318                         port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4319                                                   typeof(*port));
4320                         if (port)
4321                                 break;
4322                 }
4323         }
4324
4325         if (!port) {
4326                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4327                 return -EINVAL;
4328         }
4329
4330         /* Check cache-line alignment */
4331         if ((cq_dma_base & 0x3F) != 0) {
4332                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4333                 return -EINVAL;
4334         }
4335
4336         if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4337                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4338                 return -EINVAL;
4339         }
4340
4341         /* The history list size must be >= 1 */
4342         if (!args->cq_history_list_size) {
4343                 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4344                 return -EINVAL;
4345         }
4346
4347         if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4348                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4349                 return -EINVAL;
4350         }
4351
4352         *out_domain = domain;
4353         *out_port = port;
4354         *out_cos_id = id;
4355
4356         return 0;
4357 }
4358
4359 /**
4360  * dlb2_hw_create_ldb_port() - create a load-balanced port
4361  * @hw: dlb2_hw handle for a particular device.
4362  * @domain_id: domain ID.
4363  * @args: port creation arguments.
4364  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4365  * @resp: response structure.
4366  * @vdev_req: indicates whether this request came from a vdev.
4367  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4368  *
4369  * This function creates a load-balanced port.
4370  *
4371  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4372  * device.
4373  *
4374  * Return:
4375  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4376  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4377  * contains the port ID.
4378  *
4379  * resp->id contains a virtual ID if vdev_req is true.
4380  *
4381  * Errors:
4382  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4383  *          pointer address is not properly aligned, the domain is not
4384  *          configured, or the domain has already been started.
4385  * EFAULT - Internal error (resp->status not set).
4386  */
4387 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4388                             u32 domain_id,
4389                             struct dlb2_create_ldb_port_args *args,
4390                             uintptr_t cq_dma_base,
4391                             struct dlb2_cmd_response *resp,
4392                             bool vdev_req,
4393                             unsigned int vdev_id)
4394 {
4395         struct dlb2_hw_domain *domain;
4396         struct dlb2_ldb_port *port;
4397         int ret, cos_id;
4398
4399         dlb2_log_create_ldb_port_args(hw,
4400                                       domain_id,
4401                                       cq_dma_base,
4402                                       args,
4403                                       vdev_req,
4404                                       vdev_id);
4405
4406         /*
4407          * Verify that hardware resources are available before attempting to
4408          * satisfy the request. This simplifies the error unwinding code.
4409          */
4410         ret = dlb2_verify_create_ldb_port_args(hw,
4411                                                domain_id,
4412                                                cq_dma_base,
4413                                                args,
4414                                                resp,
4415                                                vdev_req,
4416                                                vdev_id,
4417                                                &domain,
4418                                                &port,
4419                                                &cos_id);
4420         if (ret)
4421                 return ret;
4422
4423         ret = dlb2_configure_ldb_port(hw,
4424                                       domain,
4425                                       port,
4426                                       cq_dma_base,
4427                                       args,
4428                                       vdev_req,
4429                                       vdev_id);
4430         if (ret)
4431                 return ret;
4432
4433         /*
4434          * Configuration succeeded, so move the resource from the 'avail' to
4435          * the 'used' list.
4436          */
4437         dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4438
4439         dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4440
4441         resp->status = 0;
4442         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4443
4444         return 0;
4445 }