event/dlb2: add infos get and configure
[dpdk.git] / drivers / event / dlb2 / pf / base / dlb2_resource.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include "dlb2_user.h"
6
7 #include "dlb2_hw_types.h"
8 #include "dlb2_mbox.h"
9 #include "dlb2_osdep.h"
10 #include "dlb2_osdep_bitmap.h"
11 #include "dlb2_osdep_types.h"
12 #include "dlb2_regs.h"
13 #include "dlb2_resource.h"
14
15 #include "../../dlb2_priv.h"
16 #include "../../dlb2_inline_fns.h"
17
18 #define DLB2_DOM_LIST_HEAD(head, type) \
19         DLB2_LIST_HEAD((head), type, domain_list)
20
21 #define DLB2_FUNC_LIST_HEAD(head, type) \
22         DLB2_LIST_HEAD((head), type, func_list)
23
24 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
25         DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
26
27 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
28         DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
29
30 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
31         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
32
33 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
34         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
35
36 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
37 {
38         int i;
39
40         dlb2_list_init_head(&domain->used_ldb_queues);
41         dlb2_list_init_head(&domain->used_dir_pq_pairs);
42         dlb2_list_init_head(&domain->avail_ldb_queues);
43         dlb2_list_init_head(&domain->avail_dir_pq_pairs);
44
45         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
46                 dlb2_list_init_head(&domain->used_ldb_ports[i]);
47         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
48                 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
49 }
50
51 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
52 {
53         int i;
54
55         dlb2_list_init_head(&rsrc->avail_domains);
56         dlb2_list_init_head(&rsrc->used_domains);
57         dlb2_list_init_head(&rsrc->avail_ldb_queues);
58         dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
59
60         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
61                 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
62 }
63
64 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
65 {
66         union dlb2_chp_cfg_chp_csr_ctrl r0;
67
68         r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
69
70         r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
71
72         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
73 }
74
75 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
76                               struct dlb2_get_num_resources_args *arg,
77                               bool vdev_req,
78                               unsigned int vdev_id)
79 {
80         struct dlb2_function_resources *rsrcs;
81         struct dlb2_bitmap *map;
82         int i;
83
84         if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
85                 return -EINVAL;
86
87         if (vdev_req)
88                 rsrcs = &hw->vdev[vdev_id];
89         else
90                 rsrcs = &hw->pf;
91
92         arg->num_sched_domains = rsrcs->num_avail_domains;
93
94         arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
95
96         arg->num_ldb_ports = 0;
97         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
98                 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
99
100         arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
101         arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
102         arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
103         arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
104
105         arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
106
107         arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
108
109         map = rsrcs->avail_hist_list_entries;
110
111         arg->num_hist_list_entries = dlb2_bitmap_count(map);
112
113         arg->max_contiguous_hist_list_entries =
114                 dlb2_bitmap_longest_set_range(map);
115
116         arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
117
118         arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
119
120         return 0;
121 }
122
123 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
124 {
125         union dlb2_chp_cfg_chp_csr_ctrl r0;
126
127         r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
128
129         r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
130
131         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
132 }
133
134 void dlb2_resource_free(struct dlb2_hw *hw)
135 {
136         int i;
137
138         if (hw->pf.avail_hist_list_entries)
139                 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
140
141         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
142                 if (hw->vdev[i].avail_hist_list_entries)
143                         dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
144         }
145 }
146
147 int dlb2_resource_init(struct dlb2_hw *hw)
148 {
149         struct dlb2_list_entry *list;
150         unsigned int i;
151         int ret;
152
153         /*
154          * For optimal load-balancing, ports that map to one or more QIDs in
155          * common should not be in numerical sequence. This is application
156          * dependent, but the driver interleaves port IDs as much as possible
157          * to reduce the likelihood of this. This initial allocation maximizes
158          * the average distance between an ID and its immediate neighbors (i.e.
159          * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
160          * 3, etc.).
161          */
162         u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
163                 0,  7,  14,  5, 12,  3, 10,  1,  8, 15,  6, 13,  4, 11,  2,  9,
164                 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
165                 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
166                 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
167         };
168
169         /* Zero-out resource tracking data structures */
170         memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
171         memset(&hw->pf, 0, sizeof(hw->pf));
172
173         dlb2_init_fn_rsrc_lists(&hw->pf);
174
175         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
176                 memset(&hw->vdev[i], 0, sizeof(hw->vdev[i]));
177                 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
178         }
179
180         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
181                 memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
182                 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
183                 hw->domains[i].parent_func = &hw->pf;
184         }
185
186         /* Give all resources to the PF driver */
187         hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
188         for (i = 0; i < hw->pf.num_avail_domains; i++) {
189                 list = &hw->domains[i].func_list;
190
191                 dlb2_list_add(&hw->pf.avail_domains, list);
192         }
193
194         hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
195         for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
196                 list = &hw->rsrcs.ldb_queues[i].func_list;
197
198                 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
199         }
200
201         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
202                 hw->pf.num_avail_ldb_ports[i] =
203                         DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
204
205         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
206                 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
207                 struct dlb2_ldb_port *port;
208
209                 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
210
211                 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
212                               &port->func_list);
213         }
214
215         hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS;
216         for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
217                 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
218
219                 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
220         }
221
222         hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
223         hw->pf.num_avail_dqed_entries = DLB2_MAX_NUM_DIR_CREDITS;
224         hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
225
226         ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
227                                 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
228         if (ret)
229                 goto unwind;
230
231         ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
232         if (ret)
233                 goto unwind;
234
235         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
236                 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
237                                         DLB2_MAX_NUM_HIST_LIST_ENTRIES);
238                 if (ret)
239                         goto unwind;
240
241                 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
242                 if (ret)
243                         goto unwind;
244         }
245
246         /* Initialize the hardware resource IDs */
247         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
248                 hw->domains[i].id.phys_id = i;
249                 hw->domains[i].id.vdev_owned = false;
250         }
251
252         for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
253                 hw->rsrcs.ldb_queues[i].id.phys_id = i;
254                 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
255         }
256
257         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
258                 hw->rsrcs.ldb_ports[i].id.phys_id = i;
259                 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
260         }
261
262         for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS; i++) {
263                 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
264                 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
265         }
266
267         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
268                 hw->rsrcs.sn_groups[i].id = i;
269                 /* Default mode (0) is 64 sequence numbers per queue */
270                 hw->rsrcs.sn_groups[i].mode = 0;
271                 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
272                 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
273         }
274
275         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
276                 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
277
278         return 0;
279
280 unwind:
281         dlb2_resource_free(hw);
282
283         return ret;
284 }
285
286 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
287 {
288         union dlb2_cfg_mstr_cfg_pm_pmcsr_disable r0;
289
290         r0.val = DLB2_CSR_RD(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE);
291
292         r0.field.disable = 0;
293
294         DLB2_CSR_WR(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE, r0.val);
295 }
296
297 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
298                                           struct dlb2_hw_domain *domain)
299 {
300         union dlb2_chp_cfg_ldb_vas_crd r0 = { {0} };
301         union dlb2_chp_cfg_dir_vas_crd r1 = { {0} };
302
303         r0.field.count = domain->num_ldb_credits;
304
305         DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), r0.val);
306
307         r1.field.count = domain->num_dir_credits;
308
309         DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), r1.val);
310 }
311
312 static struct dlb2_ldb_port *
313 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
314                        struct dlb2_function_resources *rsrcs,
315                        u32 domain_id,
316                        u32 cos_id)
317 {
318         struct dlb2_list_entry *iter;
319         struct dlb2_ldb_port *port;
320         RTE_SET_USED(iter);
321         /*
322          * To reduce the odds of consecutive load-balanced ports mapping to the
323          * same queue(s), the driver attempts to allocate ports whose neighbors
324          * are owned by a different domain.
325          */
326         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
327                 u32 next, prev;
328                 u32 phys_id;
329
330                 phys_id = port->id.phys_id;
331                 next = phys_id + 1;
332                 prev = phys_id - 1;
333
334                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
335                         next = 0;
336                 if (phys_id == 0)
337                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
338
339                 if (!hw->rsrcs.ldb_ports[next].owned ||
340                     hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
341                         continue;
342
343                 if (!hw->rsrcs.ldb_ports[prev].owned ||
344                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
345                         continue;
346
347                 return port;
348         }
349
350         /*
351          * Failing that, the driver looks for a port with one neighbor owned by
352          * a different domain and the other unallocated.
353          */
354         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
355                 u32 next, prev;
356                 u32 phys_id;
357
358                 phys_id = port->id.phys_id;
359                 next = phys_id + 1;
360                 prev = phys_id - 1;
361
362                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
363                         next = 0;
364                 if (phys_id == 0)
365                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
366
367                 if (!hw->rsrcs.ldb_ports[prev].owned &&
368                     hw->rsrcs.ldb_ports[next].owned &&
369                     hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
370                         return port;
371
372                 if (!hw->rsrcs.ldb_ports[next].owned &&
373                     hw->rsrcs.ldb_ports[prev].owned &&
374                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
375                         return port;
376         }
377
378         /*
379          * Failing that, the driver looks for a port with both neighbors
380          * unallocated.
381          */
382         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
383                 u32 next, prev;
384                 u32 phys_id;
385
386                 phys_id = port->id.phys_id;
387                 next = phys_id + 1;
388                 prev = phys_id - 1;
389
390                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
391                         next = 0;
392                 if (phys_id == 0)
393                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
394
395                 if (!hw->rsrcs.ldb_ports[prev].owned &&
396                     !hw->rsrcs.ldb_ports[next].owned)
397                         return port;
398         }
399
400         /* If all else fails, the driver returns the next available port. */
401         return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
402                                    typeof(*port));
403 }
404
405 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
406                                    struct dlb2_function_resources *rsrcs,
407                                    struct dlb2_hw_domain *domain,
408                                    u32 num_ports,
409                                    u32 cos_id,
410                                    struct dlb2_cmd_response *resp)
411 {
412         unsigned int i;
413
414         if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
415                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
416                 return -EINVAL;
417         }
418
419         for (i = 0; i < num_ports; i++) {
420                 struct dlb2_ldb_port *port;
421
422                 port = dlb2_get_next_ldb_port(hw, rsrcs,
423                                               domain->id.phys_id, cos_id);
424                 if (port == NULL) {
425                         DLB2_HW_ERR(hw,
426                                     "[%s()] Internal error: domain validation failed\n",
427                                     __func__);
428                         return -EFAULT;
429                 }
430
431                 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
432                               &port->func_list);
433
434                 port->domain_id = domain->id;
435                 port->owned = true;
436
437                 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
438                               &port->domain_list);
439         }
440
441         rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
442
443         return 0;
444 }
445
446 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
447                                  struct dlb2_function_resources *rsrcs,
448                                  struct dlb2_hw_domain *domain,
449                                  struct dlb2_create_sched_domain_args *args,
450                                  struct dlb2_cmd_response *resp)
451 {
452         unsigned int i, j;
453         int ret;
454
455         if (args->cos_strict) {
456                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
457                         u32 num = args->num_cos_ldb_ports[i];
458
459                         /* Allocate ports from specific classes-of-service */
460                         ret = __dlb2_attach_ldb_ports(hw,
461                                                       rsrcs,
462                                                       domain,
463                                                       num,
464                                                       i,
465                                                       resp);
466                         if (ret)
467                                 return ret;
468                 }
469         } else {
470                 unsigned int k;
471                 u32 cos_id;
472
473                 /*
474                  * Attempt to allocate from specific class-of-service, but
475                  * fallback to the other classes if that fails.
476                  */
477                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
478                         for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
479                                 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
480                                         cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
481
482                                         ret = __dlb2_attach_ldb_ports(hw,
483                                                                       rsrcs,
484                                                                       domain,
485                                                                       1,
486                                                                       cos_id,
487                                                                       resp);
488                                         if (ret == 0)
489                                                 break;
490                                 }
491
492                                 if (ret < 0)
493                                         return ret;
494                         }
495                 }
496         }
497
498         /* Allocate num_ldb_ports from any class-of-service */
499         for (i = 0; i < args->num_ldb_ports; i++) {
500                 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
501                         ret = __dlb2_attach_ldb_ports(hw,
502                                                       rsrcs,
503                                                       domain,
504                                                       1,
505                                                       j,
506                                                       resp);
507                         if (ret == 0)
508                                 break;
509                 }
510
511                 if (ret < 0)
512                         return ret;
513         }
514
515         return 0;
516 }
517
518 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
519                                  struct dlb2_function_resources *rsrcs,
520                                  struct dlb2_hw_domain *domain,
521                                  u32 num_ports,
522                                  struct dlb2_cmd_response *resp)
523 {
524         unsigned int i;
525
526         if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
527                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
528                 return -EINVAL;
529         }
530
531         for (i = 0; i < num_ports; i++) {
532                 struct dlb2_dir_pq_pair *port;
533
534                 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
535                                            typeof(*port));
536                 if (port == NULL) {
537                         DLB2_HW_ERR(hw,
538                                     "[%s()] Internal error: domain validation failed\n",
539                                     __func__);
540                         return -EFAULT;
541                 }
542
543                 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
544
545                 port->domain_id = domain->id;
546                 port->owned = true;
547
548                 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
549         }
550
551         rsrcs->num_avail_dir_pq_pairs -= num_ports;
552
553         return 0;
554 }
555
556 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
557                                    struct dlb2_hw_domain *domain,
558                                    u32 num_credits,
559                                    struct dlb2_cmd_response *resp)
560 {
561         if (rsrcs->num_avail_qed_entries < num_credits) {
562                 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
563                 return -EINVAL;
564         }
565
566         rsrcs->num_avail_qed_entries -= num_credits;
567         domain->num_ldb_credits += num_credits;
568         return 0;
569 }
570
571 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
572                                    struct dlb2_hw_domain *domain,
573                                    u32 num_credits,
574                                    struct dlb2_cmd_response *resp)
575 {
576         if (rsrcs->num_avail_dqed_entries < num_credits) {
577                 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
578                 return -EINVAL;
579         }
580
581         rsrcs->num_avail_dqed_entries -= num_credits;
582         domain->num_dir_credits += num_credits;
583         return 0;
584 }
585
586 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
587                                         struct dlb2_hw_domain *domain,
588                                         u32 num_atomic_inflights,
589                                         struct dlb2_cmd_response *resp)
590 {
591         if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
592                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
593                 return -EINVAL;
594         }
595
596         rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
597         domain->num_avail_aqed_entries += num_atomic_inflights;
598         return 0;
599 }
600
601 static int
602 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
603                                      struct dlb2_hw_domain *domain,
604                                      u32 num_hist_list_entries,
605                                      struct dlb2_cmd_response *resp)
606 {
607         struct dlb2_bitmap *bitmap;
608         int base;
609
610         if (num_hist_list_entries) {
611                 bitmap = rsrcs->avail_hist_list_entries;
612
613                 base = dlb2_bitmap_find_set_bit_range(bitmap,
614                                                       num_hist_list_entries);
615                 if (base < 0)
616                         goto error;
617
618                 domain->total_hist_list_entries = num_hist_list_entries;
619                 domain->avail_hist_list_entries = num_hist_list_entries;
620                 domain->hist_list_entry_base = base;
621                 domain->hist_list_entry_offset = 0;
622
623                 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
624         }
625         return 0;
626
627 error:
628         resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
629         return -EINVAL;
630 }
631
632 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
633                                   struct dlb2_function_resources *rsrcs,
634                                   struct dlb2_hw_domain *domain,
635                                   u32 num_queues,
636                                   struct dlb2_cmd_response *resp)
637 {
638         unsigned int i;
639
640         if (rsrcs->num_avail_ldb_queues < num_queues) {
641                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
642                 return -EINVAL;
643         }
644
645         for (i = 0; i < num_queues; i++) {
646                 struct dlb2_ldb_queue *queue;
647
648                 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
649                                             typeof(*queue));
650                 if (queue == NULL) {
651                         DLB2_HW_ERR(hw,
652                                     "[%s()] Internal error: domain validation failed\n",
653                                     __func__);
654                         return -EFAULT;
655                 }
656
657                 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
658
659                 queue->domain_id = domain->id;
660                 queue->owned = true;
661
662                 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
663         }
664
665         rsrcs->num_avail_ldb_queues -= num_queues;
666
667         return 0;
668 }
669
670 static int
671 dlb2_domain_attach_resources(struct dlb2_hw *hw,
672                              struct dlb2_function_resources *rsrcs,
673                              struct dlb2_hw_domain *domain,
674                              struct dlb2_create_sched_domain_args *args,
675                              struct dlb2_cmd_response *resp)
676 {
677         int ret;
678
679         ret = dlb2_attach_ldb_queues(hw,
680                                      rsrcs,
681                                      domain,
682                                      args->num_ldb_queues,
683                                      resp);
684         if (ret < 0)
685                 return ret;
686
687         ret = dlb2_attach_ldb_ports(hw,
688                                     rsrcs,
689                                     domain,
690                                     args,
691                                     resp);
692         if (ret < 0)
693                 return ret;
694
695         ret = dlb2_attach_dir_ports(hw,
696                                     rsrcs,
697                                     domain,
698                                     args->num_dir_ports,
699                                     resp);
700         if (ret < 0)
701                 return ret;
702
703         ret = dlb2_attach_ldb_credits(rsrcs,
704                                       domain,
705                                       args->num_ldb_credits,
706                                       resp);
707         if (ret < 0)
708                 return ret;
709
710         ret = dlb2_attach_dir_credits(rsrcs,
711                                       domain,
712                                       args->num_dir_credits,
713                                       resp);
714         if (ret < 0)
715                 return ret;
716
717         ret = dlb2_attach_domain_hist_list_entries(rsrcs,
718                                                    domain,
719                                                    args->num_hist_list_entries,
720                                                    resp);
721         if (ret < 0)
722                 return ret;
723
724         ret = dlb2_attach_atomic_inflights(rsrcs,
725                                            domain,
726                                            args->num_atomic_inflights,
727                                            resp);
728         if (ret < 0)
729                 return ret;
730
731         dlb2_configure_domain_credits(hw, domain);
732
733         domain->configured = true;
734
735         domain->started = false;
736
737         rsrcs->num_avail_domains--;
738
739         return 0;
740 }
741
742 static int
743 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
744                                   struct dlb2_create_sched_domain_args *args,
745                                   struct dlb2_cmd_response *resp)
746 {
747         u32 num_avail_ldb_ports, req_ldb_ports;
748         struct dlb2_bitmap *avail_hl_entries;
749         unsigned int max_contig_hl_range;
750         int i;
751
752         avail_hl_entries = rsrcs->avail_hist_list_entries;
753
754         max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
755
756         num_avail_ldb_ports = 0;
757         req_ldb_ports = 0;
758         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
759                 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
760
761                 req_ldb_ports += args->num_cos_ldb_ports[i];
762         }
763
764         req_ldb_ports += args->num_ldb_ports;
765
766         if (rsrcs->num_avail_domains < 1) {
767                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
768                 return -EINVAL;
769         }
770
771         if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
772                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
773                 return -EINVAL;
774         }
775
776         if (req_ldb_ports > num_avail_ldb_ports) {
777                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
778                 return -EINVAL;
779         }
780
781         for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
782                 if (args->num_cos_ldb_ports[i] >
783                     rsrcs->num_avail_ldb_ports[i]) {
784                         resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
785                         return -EINVAL;
786                 }
787         }
788
789         if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
790                 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
791                 return -EINVAL;
792         }
793
794         if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
795                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
796                 return -EINVAL;
797         }
798
799         if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
800                 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
801                 return -EINVAL;
802         }
803
804         if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
805                 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
806                 return -EINVAL;
807         }
808
809         if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
810                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
811                 return -EINVAL;
812         }
813
814         if (max_contig_hl_range < args->num_hist_list_entries) {
815                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
816                 return -EINVAL;
817         }
818
819         return 0;
820 }
821
822 static void
823 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
824                                   struct dlb2_create_sched_domain_args *args,
825                                   bool vdev_req,
826                                   unsigned int vdev_id)
827 {
828         DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
829         if (vdev_req)
830                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
831         DLB2_HW_DBG(hw, "\tNumber of LDB queues:          %d\n",
832                     args->num_ldb_queues);
833         DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
834                     args->num_ldb_ports);
835         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0):   %d\n",
836                     args->num_cos_ldb_ports[0]);
837         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1):   %d\n",
838                     args->num_cos_ldb_ports[1]);
839         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2):   %d\n",
840                     args->num_cos_ldb_ports[1]);
841         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3):   %d\n",
842                     args->num_cos_ldb_ports[1]);
843         DLB2_HW_DBG(hw, "\tStrict CoS allocation:         %d\n",
844                     args->cos_strict);
845         DLB2_HW_DBG(hw, "\tNumber of DIR ports:           %d\n",
846                     args->num_dir_ports);
847         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:       %d\n",
848                     args->num_atomic_inflights);
849         DLB2_HW_DBG(hw, "\tNumber of hist list entries:   %d\n",
850                     args->num_hist_list_entries);
851         DLB2_HW_DBG(hw, "\tNumber of LDB credits:         %d\n",
852                     args->num_ldb_credits);
853         DLB2_HW_DBG(hw, "\tNumber of DIR credits:         %d\n",
854                     args->num_dir_credits);
855 }
856
857 /**
858  * dlb2_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
859  *      domain and its resources.
860  * @hw: Contains the current state of the DLB2 hardware.
861  * @args: User-provided arguments.
862  * @resp: Response to user.
863  * @vdev_req: Request came from a virtual device.
864  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
865  *
866  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
867  * satisfy a request, resp->status will be set accordingly.
868  */
869 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
870                                 struct dlb2_create_sched_domain_args *args,
871                                 struct dlb2_cmd_response *resp,
872                                 bool vdev_req,
873                                 unsigned int vdev_id)
874 {
875         struct dlb2_function_resources *rsrcs;
876         struct dlb2_hw_domain *domain;
877         int ret;
878
879         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
880
881         dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
882
883         /*
884          * Verify that hardware resources are available before attempting to
885          * satisfy the request. This simplifies the error unwinding code.
886          */
887         ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp);
888         if (ret)
889                 return ret;
890
891         domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
892         if (domain == NULL) {
893                 DLB2_HW_ERR(hw,
894                             "[%s():%d] Internal error: no available domains\n",
895                             __func__, __LINE__);
896                 return -EFAULT;
897         }
898
899         if (domain->configured) {
900                 DLB2_HW_ERR(hw,
901                             "[%s()] Internal error: avail_domains contains configured domains.\n",
902                             __func__);
903                 return -EFAULT;
904         }
905
906         dlb2_init_domain_rsrc_lists(domain);
907
908         ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
909         if (ret < 0) {
910                 DLB2_HW_ERR(hw,
911                             "[%s()] Internal error: failed to verify args.\n",
912                             __func__);
913
914                 return ret;
915         }
916
917         dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
918
919         dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
920
921         resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
922         resp->status = 0;
923
924         return 0;
925 }
926
927 /*
928  * The PF driver cannot assume that a register write will affect subsequent HCW
929  * writes. To ensure a write completes, the driver must read back a CSR. This
930  * function only need be called for configuration that can occur after the
931  * domain has started; prior to starting, applications can't send HCWs.
932  */
933 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
934 {
935         DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
936 }
937
938 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
939                                      struct dlb2_dir_pq_pair *port)
940 {
941         union dlb2_lsp_cq_dir_dsbl reg;
942
943         reg.field.disabled = 1;
944
945         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
946
947         dlb2_flush_csr(hw);
948 }
949
950 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
951                                    struct dlb2_dir_pq_pair *port)
952 {
953         union dlb2_lsp_cq_dir_tkn_cnt r0;
954
955         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id));
956
957         /*
958          * Account for the initial token count, which is used in order to
959          * provide a CQ with depth less than 8.
960          */
961
962         return r0.field.count - port->init_tkn_cnt;
963 }
964
965 static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
966                              struct dlb2_dir_pq_pair *port)
967 {
968         unsigned int port_id = port->id.phys_id;
969         u32 cnt;
970
971         /* Return any outstanding tokens */
972         cnt = dlb2_dir_cq_token_count(hw, port);
973
974         if (cnt != 0) {
975                 struct dlb2_hcw hcw_mem[8], *hcw;
976                 void  *pp_addr;
977
978                 pp_addr = os_map_producer_port(hw, port_id, false);
979
980                 /* Point hcw to a 64B-aligned location */
981                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
982
983                 /*
984                  * Program the first HCW for a batch token return and
985                  * the rest as NOOPS
986                  */
987                 memset(hcw, 0, 4 * sizeof(*hcw));
988                 hcw->cq_token = 1;
989                 hcw->lock_id = cnt - 1;
990
991                 dlb2_movdir64b(pp_addr, hcw);
992
993                 os_fence_hcw(hw, pp_addr);
994
995                 os_unmap_producer_port(hw, pp_addr);
996         }
997
998         return 0;
999 }
1000
1001 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1002                                     struct dlb2_dir_pq_pair *port)
1003 {
1004         union dlb2_lsp_cq_dir_dsbl reg;
1005
1006         reg.field.disabled = 0;
1007
1008         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
1009
1010         dlb2_flush_csr(hw);
1011 }
1012
1013 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1014                                      struct dlb2_hw_domain *domain,
1015                                      bool toggle_port)
1016 {
1017         struct dlb2_list_entry *iter;
1018         struct dlb2_dir_pq_pair *port;
1019         int ret;
1020         RTE_SET_USED(iter);
1021
1022         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1023                 /*
1024                  * Can't drain a port if it's not configured, and there's
1025                  * nothing to drain if its queue is unconfigured.
1026                  */
1027                 if (!port->port_configured || !port->queue_configured)
1028                         continue;
1029
1030                 if (toggle_port)
1031                         dlb2_dir_port_cq_disable(hw, port);
1032
1033                 ret = dlb2_drain_dir_cq(hw, port);
1034                 if (ret < 0)
1035                         return ret;
1036
1037                 if (toggle_port)
1038                         dlb2_dir_port_cq_enable(hw, port);
1039         }
1040
1041         return 0;
1042 }
1043
1044 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1045                                 struct dlb2_dir_pq_pair *queue)
1046 {
1047         union dlb2_lsp_qid_dir_enqueue_cnt r0;
1048
1049         r0.val = DLB2_CSR_RD(hw,
1050                              DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
1051
1052         return r0.field.count;
1053 }
1054
1055 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1056                                     struct dlb2_dir_pq_pair *queue)
1057 {
1058         return dlb2_dir_queue_depth(hw, queue) == 0;
1059 }
1060
1061 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1062                                          struct dlb2_hw_domain *domain)
1063 {
1064         struct dlb2_list_entry *iter;
1065         struct dlb2_dir_pq_pair *queue;
1066         RTE_SET_USED(iter);
1067
1068         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1069                 if (!dlb2_dir_queue_is_empty(hw, queue))
1070                         return false;
1071         }
1072
1073         return true;
1074 }
1075
1076 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1077                                         struct dlb2_hw_domain *domain)
1078 {
1079         int i, ret;
1080
1081         /* If the domain hasn't been started, there's no traffic to drain */
1082         if (!domain->started)
1083                 return 0;
1084
1085         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1086                 ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
1087                 if (ret < 0)
1088                         return ret;
1089
1090                 if (dlb2_domain_dir_queues_empty(hw, domain))
1091                         break;
1092         }
1093
1094         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1095                 DLB2_HW_ERR(hw,
1096                             "[%s()] Internal error: failed to empty queues\n",
1097                             __func__);
1098                 return -EFAULT;
1099         }
1100
1101         /*
1102          * Drain the CQs one more time. For the queues to go empty, they would
1103          * have scheduled one or more QEs.
1104          */
1105         ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
1106         if (ret < 0)
1107                 return ret;
1108
1109         return 0;
1110 }
1111
1112 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1113                                     struct dlb2_ldb_port *port)
1114 {
1115         union dlb2_lsp_cq_ldb_dsbl reg;
1116
1117         /*
1118          * Don't re-enable the port if a removal is pending. The caller should
1119          * mark this port as enabled (if it isn't already), and when the
1120          * removal completes the port will be enabled.
1121          */
1122         if (port->num_pending_removals)
1123                 return;
1124
1125         reg.field.disabled = 0;
1126
1127         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
1128
1129         dlb2_flush_csr(hw);
1130 }
1131
1132 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1133                                      struct dlb2_ldb_port *port)
1134 {
1135         union dlb2_lsp_cq_ldb_dsbl reg;
1136
1137         reg.field.disabled = 1;
1138
1139         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
1140
1141         dlb2_flush_csr(hw);
1142 }
1143
1144 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1145                                       struct dlb2_ldb_port *port)
1146 {
1147         union dlb2_lsp_cq_ldb_infl_cnt r0;
1148
1149         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
1150
1151         return r0.field.count;
1152 }
1153
1154 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1155                                    struct dlb2_ldb_port *port)
1156 {
1157         union dlb2_lsp_cq_ldb_tkn_cnt r0;
1158
1159         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id));
1160
1161         /*
1162          * Account for the initial token count, which is used in order to
1163          * provide a CQ with depth less than 8.
1164          */
1165
1166         return r0.field.token_count - port->init_tkn_cnt;
1167 }
1168
1169 static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1170 {
1171         u32 infl_cnt, tkn_cnt;
1172         unsigned int i;
1173
1174         infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1175         tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1176
1177         if (infl_cnt || tkn_cnt) {
1178                 struct dlb2_hcw hcw_mem[8], *hcw;
1179                 void  *pp_addr;
1180
1181                 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1182
1183                 /* Point hcw to a 64B-aligned location */
1184                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1185
1186                 /*
1187                  * Program the first HCW for a completion and token return and
1188                  * the other HCWs as NOOPS
1189                  */
1190
1191                 memset(hcw, 0, 4 * sizeof(*hcw));
1192                 hcw->qe_comp = (infl_cnt > 0);
1193                 hcw->cq_token = (tkn_cnt > 0);
1194                 hcw->lock_id = tkn_cnt - 1;
1195
1196                 /* Return tokens in the first HCW */
1197                 dlb2_movdir64b(pp_addr, hcw);
1198
1199                 hcw->cq_token = 0;
1200
1201                 /* Issue remaining completions (if any) */
1202                 for (i = 1; i < infl_cnt; i++)
1203                         dlb2_movdir64b(pp_addr, hcw);
1204
1205                 os_fence_hcw(hw, pp_addr);
1206
1207                 os_unmap_producer_port(hw, pp_addr);
1208         }
1209
1210         return 0;
1211 }
1212
1213 static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1214                                      struct dlb2_hw_domain *domain,
1215                                      bool toggle_port)
1216 {
1217         struct dlb2_list_entry *iter;
1218         struct dlb2_ldb_port *port;
1219         int ret, i;
1220         RTE_SET_USED(iter);
1221
1222         /* If the domain hasn't been started, there's no traffic to drain */
1223         if (!domain->started)
1224                 return 0;
1225
1226         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1227                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1228                         if (toggle_port)
1229                                 dlb2_ldb_port_cq_disable(hw, port);
1230
1231                         ret = dlb2_drain_ldb_cq(hw, port);
1232                         if (ret < 0)
1233                                 return ret;
1234
1235                         if (toggle_port)
1236                                 dlb2_ldb_port_cq_enable(hw, port);
1237                 }
1238         }
1239
1240         return 0;
1241 }
1242
1243 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1244                                 struct dlb2_ldb_queue *queue)
1245 {
1246         union dlb2_lsp_qid_aqed_active_cnt r0;
1247         union dlb2_lsp_qid_atm_active r1;
1248         union dlb2_lsp_qid_ldb_enqueue_cnt r2;
1249
1250         r0.val = DLB2_CSR_RD(hw,
1251                              DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
1252         r1.val = DLB2_CSR_RD(hw,
1253                              DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
1254
1255         r2.val = DLB2_CSR_RD(hw,
1256                              DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
1257
1258         return r0.field.count + r1.field.count + r2.field.count;
1259 }
1260
1261 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1262                                     struct dlb2_ldb_queue *queue)
1263 {
1264         return dlb2_ldb_queue_depth(hw, queue) == 0;
1265 }
1266
1267 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1268                                             struct dlb2_hw_domain *domain)
1269 {
1270         struct dlb2_list_entry *iter;
1271         struct dlb2_ldb_queue *queue;
1272         RTE_SET_USED(iter);
1273
1274         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1275                 if (queue->num_mappings == 0)
1276                         continue;
1277
1278                 if (!dlb2_ldb_queue_is_empty(hw, queue))
1279                         return false;
1280         }
1281
1282         return true;
1283 }
1284
1285 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1286                                            struct dlb2_hw_domain *domain)
1287 {
1288         int i, ret;
1289
1290         /* If the domain hasn't been started, there's no traffic to drain */
1291         if (!domain->started)
1292                 return 0;
1293
1294         if (domain->num_pending_removals > 0) {
1295                 DLB2_HW_ERR(hw,
1296                             "[%s()] Internal error: failed to unmap domain queues\n",
1297                             __func__);
1298                 return -EFAULT;
1299         }
1300
1301         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1302                 ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
1303                 if (ret < 0)
1304                         return ret;
1305
1306                 if (dlb2_domain_mapped_queues_empty(hw, domain))
1307                         break;
1308         }
1309
1310         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1311                 DLB2_HW_ERR(hw,
1312                             "[%s()] Internal error: failed to empty queues\n",
1313                             __func__);
1314                 return -EFAULT;
1315         }
1316
1317         /*
1318          * Drain the CQs one more time. For the queues to go empty, they would
1319          * have scheduled one or more QEs.
1320          */
1321         ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
1322         if (ret < 0)
1323                 return ret;
1324
1325         return 0;
1326 }
1327
1328 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1329                                        struct dlb2_hw_domain *domain)
1330 {
1331         struct dlb2_list_entry *iter;
1332         struct dlb2_ldb_port *port;
1333         int i;
1334         RTE_SET_USED(iter);
1335
1336         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1337                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1338                         port->enabled = true;
1339
1340                         dlb2_ldb_port_cq_enable(hw, port);
1341                 }
1342         }
1343 }
1344
1345 static struct dlb2_ldb_queue *
1346 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1347                            u32 id,
1348                            bool vdev_req,
1349                            unsigned int vdev_id)
1350 {
1351         struct dlb2_list_entry *iter1;
1352         struct dlb2_list_entry *iter2;
1353         struct dlb2_function_resources *rsrcs;
1354         struct dlb2_hw_domain *domain;
1355         struct dlb2_ldb_queue *queue;
1356         RTE_SET_USED(iter1);
1357         RTE_SET_USED(iter2);
1358
1359         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1360                 return NULL;
1361
1362         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1363
1364         if (!vdev_req)
1365                 return &hw->rsrcs.ldb_queues[id];
1366
1367         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1368                 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
1369                         if (queue->id.virt_id == id)
1370                                 return queue;
1371         }
1372
1373         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
1374                 if (queue->id.virt_id == id)
1375                         return queue;
1376
1377         return NULL;
1378 }
1379
1380 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1381                                                       u32 id,
1382                                                       bool vdev_req,
1383                                                       unsigned int vdev_id)
1384 {
1385         struct dlb2_list_entry *iteration;
1386         struct dlb2_function_resources *rsrcs;
1387         struct dlb2_hw_domain *domain;
1388         RTE_SET_USED(iteration);
1389
1390         if (id >= DLB2_MAX_NUM_DOMAINS)
1391                 return NULL;
1392
1393         if (!vdev_req)
1394                 return &hw->domains[id];
1395
1396         rsrcs = &hw->vdev[vdev_id];
1397
1398         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
1399                 if (domain->id.virt_id == id)
1400                         return domain;
1401
1402         return NULL;
1403 }
1404
1405 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1406                                            struct dlb2_ldb_port *port,
1407                                            struct dlb2_ldb_queue *queue,
1408                                            int slot,
1409                                            enum dlb2_qid_map_state new_state)
1410 {
1411         enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1412         struct dlb2_hw_domain *domain;
1413         int domain_id;
1414
1415         domain_id = port->domain_id.phys_id;
1416
1417         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1418         if (domain == NULL) {
1419                 DLB2_HW_ERR(hw,
1420                             "[%s()] Internal error: unable to find domain %d\n",
1421                             __func__, domain_id);
1422                 return -EINVAL;
1423         }
1424
1425         switch (curr_state) {
1426         case DLB2_QUEUE_UNMAPPED:
1427                 switch (new_state) {
1428                 case DLB2_QUEUE_MAPPED:
1429                         queue->num_mappings++;
1430                         port->num_mappings++;
1431                         break;
1432                 case DLB2_QUEUE_MAP_IN_PROG:
1433                         queue->num_pending_additions++;
1434                         domain->num_pending_additions++;
1435                         break;
1436                 default:
1437                         goto error;
1438                 }
1439                 break;
1440         case DLB2_QUEUE_MAPPED:
1441                 switch (new_state) {
1442                 case DLB2_QUEUE_UNMAPPED:
1443                         queue->num_mappings--;
1444                         port->num_mappings--;
1445                         break;
1446                 case DLB2_QUEUE_UNMAP_IN_PROG:
1447                         port->num_pending_removals++;
1448                         domain->num_pending_removals++;
1449                         break;
1450                 case DLB2_QUEUE_MAPPED:
1451                         /* Priority change, nothing to update */
1452                         break;
1453                 default:
1454                         goto error;
1455                 }
1456                 break;
1457         case DLB2_QUEUE_MAP_IN_PROG:
1458                 switch (new_state) {
1459                 case DLB2_QUEUE_UNMAPPED:
1460                         queue->num_pending_additions--;
1461                         domain->num_pending_additions--;
1462                         break;
1463                 case DLB2_QUEUE_MAPPED:
1464                         queue->num_mappings++;
1465                         port->num_mappings++;
1466                         queue->num_pending_additions--;
1467                         domain->num_pending_additions--;
1468                         break;
1469                 default:
1470                         goto error;
1471                 }
1472                 break;
1473         case DLB2_QUEUE_UNMAP_IN_PROG:
1474                 switch (new_state) {
1475                 case DLB2_QUEUE_UNMAPPED:
1476                         port->num_pending_removals--;
1477                         domain->num_pending_removals--;
1478                         queue->num_mappings--;
1479                         port->num_mappings--;
1480                         break;
1481                 case DLB2_QUEUE_MAPPED:
1482                         port->num_pending_removals--;
1483                         domain->num_pending_removals--;
1484                         break;
1485                 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1486                         /* Nothing to update */
1487                         break;
1488                 default:
1489                         goto error;
1490                 }
1491                 break;
1492         case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1493                 switch (new_state) {
1494                 case DLB2_QUEUE_UNMAP_IN_PROG:
1495                         /* Nothing to update */
1496                         break;
1497                 case DLB2_QUEUE_UNMAPPED:
1498                         /*
1499                          * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1500                          * becomes UNMAPPED before it transitions to
1501                          * MAP_IN_PROG.
1502                          */
1503                         queue->num_mappings--;
1504                         port->num_mappings--;
1505                         port->num_pending_removals--;
1506                         domain->num_pending_removals--;
1507                         break;
1508                 default:
1509                         goto error;
1510                 }
1511                 break;
1512         default:
1513                 goto error;
1514         }
1515
1516         port->qid_map[slot].state = new_state;
1517
1518         DLB2_HW_DBG(hw,
1519                     "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1520                     __func__, queue->id.phys_id, port->id.phys_id,
1521                     curr_state, new_state);
1522         return 0;
1523
1524 error:
1525         DLB2_HW_ERR(hw,
1526                     "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1527                     __func__, queue->id.phys_id, port->id.phys_id,
1528                     curr_state, new_state);
1529         return -EFAULT;
1530 }
1531
1532 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1533                                 enum dlb2_qid_map_state state,
1534                                 int *slot)
1535 {
1536         int i;
1537
1538         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1539                 if (port->qid_map[i].state == state)
1540                         break;
1541         }
1542
1543         *slot = i;
1544
1545         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1546 }
1547
1548 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1549                                       enum dlb2_qid_map_state state,
1550                                       struct dlb2_ldb_queue *queue,
1551                                       int *slot)
1552 {
1553         int i;
1554
1555         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1556                 if (port->qid_map[i].state == state &&
1557                     port->qid_map[i].qid == queue->id.phys_id)
1558                         break;
1559         }
1560
1561         *slot = i;
1562
1563         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1564 }
1565
1566 /*
1567  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1568  * their function names imply, and should only be called by the dynamic CQ
1569  * mapping code.
1570  */
1571 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1572                                               struct dlb2_hw_domain *domain,
1573                                               struct dlb2_ldb_queue *queue)
1574 {
1575         struct dlb2_list_entry *iter;
1576         struct dlb2_ldb_port *port;
1577         int slot, i;
1578         RTE_SET_USED(iter);
1579
1580         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1581                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1582                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1583
1584                         if (!dlb2_port_find_slot_queue(port, state,
1585                                                        queue, &slot))
1586                                 continue;
1587
1588                         if (port->enabled)
1589                                 dlb2_ldb_port_cq_disable(hw, port);
1590                 }
1591         }
1592 }
1593
1594 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1595                                              struct dlb2_hw_domain *domain,
1596                                              struct dlb2_ldb_queue *queue)
1597 {
1598         struct dlb2_list_entry *iter;
1599         struct dlb2_ldb_port *port;
1600         int slot, i;
1601         RTE_SET_USED(iter);
1602
1603         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1604                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1605                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1606
1607                         if (!dlb2_port_find_slot_queue(port, state,
1608                                                        queue, &slot))
1609                                 continue;
1610
1611                         if (port->enabled)
1612                                 dlb2_ldb_port_cq_enable(hw, port);
1613                 }
1614         }
1615 }
1616
1617 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1618                                                 struct dlb2_ldb_port *port,
1619                                                 int slot)
1620 {
1621         union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
1622
1623         r0.field.cq = port->id.phys_id;
1624         r0.field.qidix = slot;
1625         r0.field.value = 0;
1626         r0.field.inflight_ok_v = 1;
1627
1628         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
1629
1630         dlb2_flush_csr(hw);
1631 }
1632
1633 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1634                                               struct dlb2_ldb_port *port,
1635                                               int slot)
1636 {
1637         union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
1638
1639         r0.field.cq = port->id.phys_id;
1640         r0.field.qidix = slot;
1641         r0.field.value = 1;
1642         r0.field.inflight_ok_v = 1;
1643
1644         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
1645
1646         dlb2_flush_csr(hw);
1647 }
1648
1649 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1650                                         struct dlb2_ldb_port *p,
1651                                         struct dlb2_ldb_queue *q,
1652                                         u8 priority)
1653 {
1654         union dlb2_lsp_cq2priov r0;
1655         union dlb2_lsp_cq2qid0 r1;
1656         union dlb2_atm_qid2cqidix_00 r2;
1657         union dlb2_lsp_qid2cqidix_00 r3;
1658         union dlb2_lsp_qid2cqidix2_00 r4;
1659         enum dlb2_qid_map_state state;
1660         int i;
1661
1662         /* Look for a pending or already mapped slot, else an unused slot */
1663         if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1664             !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1665             !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1666                 DLB2_HW_ERR(hw,
1667                             "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1668                             __func__, __LINE__);
1669                 return -EFAULT;
1670         }
1671
1672         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1673                 DLB2_HW_ERR(hw,
1674                             "[%s():%d] Internal error: port slot tracking failed\n",
1675                             __func__, __LINE__);
1676                 return -EFAULT;
1677         }
1678
1679         /* Read-modify-write the priority and valid bit register */
1680         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
1681
1682         r0.field.v |= 1 << i;
1683         r0.field.prio |= (priority & 0x7) << i * 3;
1684
1685         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
1686
1687         /* Read-modify-write the QID map register */
1688         if (i < 4)
1689                 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
1690         else
1691                 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
1692
1693         if (i == 0 || i == 4)
1694                 r1.field.qid_p0 = q->id.phys_id;
1695         if (i == 1 || i == 5)
1696                 r1.field.qid_p1 = q->id.phys_id;
1697         if (i == 2 || i == 6)
1698                 r1.field.qid_p2 = q->id.phys_id;
1699         if (i == 3 || i == 7)
1700                 r1.field.qid_p3 = q->id.phys_id;
1701
1702         if (i < 4)
1703                 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
1704         else
1705                 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
1706
1707         r2.val = DLB2_CSR_RD(hw,
1708                              DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1709                                                  p->id.phys_id / 4));
1710
1711         r3.val = DLB2_CSR_RD(hw,
1712                              DLB2_LSP_QID2CQIDIX(q->id.phys_id,
1713                                                  p->id.phys_id / 4));
1714
1715         r4.val = DLB2_CSR_RD(hw,
1716                              DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
1717                                                   p->id.phys_id / 4));
1718
1719         switch (p->id.phys_id % 4) {
1720         case 0:
1721                 r2.field.cq_p0 |= 1 << i;
1722                 r3.field.cq_p0 |= 1 << i;
1723                 r4.field.cq_p0 |= 1 << i;
1724                 break;
1725
1726         case 1:
1727                 r2.field.cq_p1 |= 1 << i;
1728                 r3.field.cq_p1 |= 1 << i;
1729                 r4.field.cq_p1 |= 1 << i;
1730                 break;
1731
1732         case 2:
1733                 r2.field.cq_p2 |= 1 << i;
1734                 r3.field.cq_p2 |= 1 << i;
1735                 r4.field.cq_p2 |= 1 << i;
1736                 break;
1737
1738         case 3:
1739                 r2.field.cq_p3 |= 1 << i;
1740                 r3.field.cq_p3 |= 1 << i;
1741                 r4.field.cq_p3 |= 1 << i;
1742                 break;
1743         }
1744
1745         DLB2_CSR_WR(hw,
1746                     DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1747                     r2.val);
1748
1749         DLB2_CSR_WR(hw,
1750                     DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1751                     r3.val);
1752
1753         DLB2_CSR_WR(hw,
1754                     DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
1755                     r4.val);
1756
1757         dlb2_flush_csr(hw);
1758
1759         p->qid_map[i].qid = q->id.phys_id;
1760         p->qid_map[i].priority = priority;
1761
1762         state = DLB2_QUEUE_MAPPED;
1763
1764         return dlb2_port_slot_state_transition(hw, p, q, i, state);
1765 }
1766
1767 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1768                                            struct dlb2_ldb_port *port,
1769                                            struct dlb2_ldb_queue *queue,
1770                                            int slot)
1771 {
1772         union dlb2_lsp_qid_aqed_active_cnt r0;
1773         union dlb2_lsp_qid_ldb_enqueue_cnt r1;
1774         union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
1775
1776         /* Set the atomic scheduling haswork bit */
1777         r0.val = DLB2_CSR_RD(hw,
1778                              DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
1779
1780         r2.field.cq = port->id.phys_id;
1781         r2.field.qidix = slot;
1782         r2.field.value = 1;
1783         r2.field.rlist_haswork_v = r0.field.count > 0;
1784
1785         /* Set the non-atomic scheduling haswork bit */
1786         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1787
1788         r1.val = DLB2_CSR_RD(hw,
1789                              DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
1790
1791         memset(&r2, 0, sizeof(r2));
1792
1793         r2.field.cq = port->id.phys_id;
1794         r2.field.qidix = slot;
1795         r2.field.value = 1;
1796         r2.field.nalb_haswork_v = (r1.field.count > 0);
1797
1798         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1799
1800         dlb2_flush_csr(hw);
1801
1802         return 0;
1803 }
1804
1805 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1806                                               struct dlb2_ldb_port *port,
1807                                               u8 slot)
1808 {
1809         union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
1810
1811         r2.field.cq = port->id.phys_id;
1812         r2.field.qidix = slot;
1813         r2.field.value = 0;
1814         r2.field.rlist_haswork_v = 1;
1815
1816         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1817
1818         memset(&r2, 0, sizeof(r2));
1819
1820         r2.field.cq = port->id.phys_id;
1821         r2.field.qidix = slot;
1822         r2.field.value = 0;
1823         r2.field.nalb_haswork_v = 1;
1824
1825         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1826
1827         dlb2_flush_csr(hw);
1828 }
1829
1830 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1831                                               struct dlb2_ldb_queue *queue)
1832 {
1833         union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
1834
1835         r0.field.limit = queue->num_qid_inflights;
1836
1837         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
1838 }
1839
1840 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1841                                                 struct dlb2_ldb_queue *queue)
1842 {
1843         DLB2_CSR_WR(hw,
1844                     DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
1845                     DLB2_LSP_QID_LDB_INFL_LIM_RST);
1846 }
1847
1848 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1849                                                 struct dlb2_hw_domain *domain,
1850                                                 struct dlb2_ldb_port *port,
1851                                                 struct dlb2_ldb_queue *queue)
1852 {
1853         struct dlb2_list_entry *iter;
1854         union dlb2_lsp_qid_ldb_infl_cnt r0;
1855         enum dlb2_qid_map_state state;
1856         int slot, ret, i;
1857         u8 prio;
1858         RTE_SET_USED(iter);
1859
1860         r0.val = DLB2_CSR_RD(hw,
1861                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
1862
1863         if (r0.field.count) {
1864                 DLB2_HW_ERR(hw,
1865                             "[%s()] Internal error: non-zero QID inflight count\n",
1866                             __func__);
1867                 return -EINVAL;
1868         }
1869
1870         /*
1871          * Static map the port and set its corresponding has_work bits.
1872          */
1873         state = DLB2_QUEUE_MAP_IN_PROG;
1874         if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1875                 return -EINVAL;
1876
1877         if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1878                 DLB2_HW_ERR(hw,
1879                             "[%s():%d] Internal error: port slot tracking failed\n",
1880                             __func__, __LINE__);
1881                 return -EFAULT;
1882         }
1883
1884         prio = port->qid_map[slot].priority;
1885
1886         /*
1887          * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1888          * the port's qid_map state.
1889          */
1890         ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1891         if (ret)
1892                 return ret;
1893
1894         ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1895         if (ret)
1896                 return ret;
1897
1898         /*
1899          * Ensure IF_status(cq,qid) is 0 before enabling the port to
1900          * prevent spurious schedules to cause the queue's inflight
1901          * count to increase.
1902          */
1903         dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1904
1905         /* Reset the queue's inflight status */
1906         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1907                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1908                         state = DLB2_QUEUE_MAPPED;
1909                         if (!dlb2_port_find_slot_queue(port, state,
1910                                                        queue, &slot))
1911                                 continue;
1912
1913                         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
1914                 }
1915         }
1916
1917         dlb2_ldb_queue_set_inflight_limit(hw, queue);
1918
1919         /* Re-enable CQs mapped to this queue */
1920         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
1921
1922         /* If this queue has other mappings pending, clear its inflight limit */
1923         if (queue->num_pending_additions > 0)
1924                 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
1925
1926         return 0;
1927 }
1928
1929 /**
1930  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
1931  * @hw: dlb2_hw handle for a particular device.
1932  * @port: load-balanced port
1933  * @queue: load-balanced queue
1934  * @priority: queue servicing priority
1935  *
1936  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
1937  * at a later point, and <0 if an error occurred.
1938  */
1939 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
1940                                          struct dlb2_ldb_port *port,
1941                                          struct dlb2_ldb_queue *queue,
1942                                          u8 priority)
1943 {
1944         union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
1945         enum dlb2_qid_map_state state;
1946         struct dlb2_hw_domain *domain;
1947         int domain_id, slot, ret;
1948
1949         domain_id = port->domain_id.phys_id;
1950
1951         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1952         if (domain == NULL) {
1953                 DLB2_HW_ERR(hw,
1954                             "[%s()] Internal error: unable to find domain %d\n",
1955                             __func__, port->domain_id.phys_id);
1956                 return -EINVAL;
1957         }
1958
1959         /*
1960          * Set the QID inflight limit to 0 to prevent further scheduling of the
1961          * queue.
1962          */
1963         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
1964
1965         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
1966                 DLB2_HW_ERR(hw,
1967                             "Internal error: No available unmapped slots\n");
1968                 return -EFAULT;
1969         }
1970
1971         if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1972                 DLB2_HW_ERR(hw,
1973                             "[%s():%d] Internal error: port slot tracking failed\n",
1974                             __func__, __LINE__);
1975                 return -EFAULT;
1976         }
1977
1978         port->qid_map[slot].qid = queue->id.phys_id;
1979         port->qid_map[slot].priority = priority;
1980
1981         state = DLB2_QUEUE_MAP_IN_PROG;
1982         ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
1983         if (ret)
1984                 return ret;
1985
1986         r0.val = DLB2_CSR_RD(hw,
1987                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
1988
1989         if (r0.field.count) {
1990                 /*
1991                  * The queue is owed completions so it's not safe to map it
1992                  * yet. Schedule a kernel thread to complete the mapping later,
1993                  * once software has completed all the queue's inflight events.
1994                  */
1995                 if (!os_worker_active(hw))
1996                         os_schedule_work(hw);
1997
1998                 return 1;
1999         }
2000
2001         /*
2002          * Disable the affected CQ, and the CQs already mapped to the QID,
2003          * before reading the QID's inflight count a second time. There is an
2004          * unlikely race in which the QID may schedule one more QE after we
2005          * read an inflight count of 0, and disabling the CQs guarantees that
2006          * the race will not occur after a re-read of the inflight count
2007          * register.
2008          */
2009         if (port->enabled)
2010                 dlb2_ldb_port_cq_disable(hw, port);
2011
2012         dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2013
2014         r0.val = DLB2_CSR_RD(hw,
2015                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
2016
2017         if (r0.field.count) {
2018                 if (port->enabled)
2019                         dlb2_ldb_port_cq_enable(hw, port);
2020
2021                 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2022
2023                 /*
2024                  * The queue is owed completions so it's not safe to map it
2025                  * yet. Schedule a kernel thread to complete the mapping later,
2026                  * once software has completed all the queue's inflight events.
2027                  */
2028                 if (!os_worker_active(hw))
2029                         os_schedule_work(hw);
2030
2031                 return 1;
2032         }
2033
2034         return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2035 }
2036
2037 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2038                                         struct dlb2_hw_domain *domain,
2039                                         struct dlb2_ldb_port *port)
2040 {
2041         int i;
2042
2043         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2044                 union dlb2_lsp_qid_ldb_infl_cnt r0;
2045                 struct dlb2_ldb_queue *queue;
2046                 int qid;
2047
2048                 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2049                         continue;
2050
2051                 qid = port->qid_map[i].qid;
2052
2053                 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2054
2055                 if (queue == NULL) {
2056                         DLB2_HW_ERR(hw,
2057                                     "[%s()] Internal error: unable to find queue %d\n",
2058                                     __func__, qid);
2059                         continue;
2060                 }
2061
2062                 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
2063
2064                 if (r0.field.count)
2065                         continue;
2066
2067                 /*
2068                  * Disable the affected CQ, and the CQs already mapped to the
2069                  * QID, before reading the QID's inflight count a second time.
2070                  * There is an unlikely race in which the QID may schedule one
2071                  * more QE after we read an inflight count of 0, and disabling
2072                  * the CQs guarantees that the race will not occur after a
2073                  * re-read of the inflight count register.
2074                  */
2075                 if (port->enabled)
2076                         dlb2_ldb_port_cq_disable(hw, port);
2077
2078                 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2079
2080                 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
2081
2082                 if (r0.field.count) {
2083                         if (port->enabled)
2084                                 dlb2_ldb_port_cq_enable(hw, port);
2085
2086                         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2087
2088                         continue;
2089                 }
2090
2091                 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2092         }
2093 }
2094
2095 static unsigned int
2096 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2097                                       struct dlb2_hw_domain *domain)
2098 {
2099         struct dlb2_list_entry *iter;
2100         struct dlb2_ldb_port *port;
2101         int i;
2102         RTE_SET_USED(iter);
2103
2104         if (!domain->configured || domain->num_pending_additions == 0)
2105                 return 0;
2106
2107         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2108                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2109                         dlb2_domain_finish_map_port(hw, domain, port);
2110         }
2111
2112         return domain->num_pending_additions;
2113 }
2114
2115 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2116                                    struct dlb2_ldb_port *port,
2117                                    struct dlb2_ldb_queue *queue)
2118 {
2119         enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2120         union dlb2_lsp_cq2priov r0;
2121         union dlb2_atm_qid2cqidix_00 r1;
2122         union dlb2_lsp_qid2cqidix_00 r2;
2123         union dlb2_lsp_qid2cqidix2_00 r3;
2124         u32 queue_id;
2125         u32 port_id;
2126         int i;
2127
2128         /* Find the queue's slot */
2129         mapped = DLB2_QUEUE_MAPPED;
2130         in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2131         pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2132
2133         if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2134             !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2135             !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2136                 DLB2_HW_ERR(hw,
2137                             "[%s():%d] Internal error: QID %d isn't mapped\n",
2138                             __func__, __LINE__, queue->id.phys_id);
2139                 return -EFAULT;
2140         }
2141
2142         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2143                 DLB2_HW_ERR(hw,
2144                             "[%s():%d] Internal error: port slot tracking failed\n",
2145                             __func__, __LINE__);
2146                 return -EFAULT;
2147         }
2148
2149         port_id = port->id.phys_id;
2150         queue_id = queue->id.phys_id;
2151
2152         /* Read-modify-write the priority and valid bit register */
2153         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
2154
2155         r0.field.v &= ~(1 << i);
2156
2157         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
2158
2159         r1.val = DLB2_CSR_RD(hw,
2160                              DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
2161
2162         r2.val = DLB2_CSR_RD(hw,
2163                              DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
2164
2165         r3.val = DLB2_CSR_RD(hw,
2166                              DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
2167
2168         switch (port_id % 4) {
2169         case 0:
2170                 r1.field.cq_p0 &= ~(1 << i);
2171                 r2.field.cq_p0 &= ~(1 << i);
2172                 r3.field.cq_p0 &= ~(1 << i);
2173                 break;
2174
2175         case 1:
2176                 r1.field.cq_p1 &= ~(1 << i);
2177                 r2.field.cq_p1 &= ~(1 << i);
2178                 r3.field.cq_p1 &= ~(1 << i);
2179                 break;
2180
2181         case 2:
2182                 r1.field.cq_p2 &= ~(1 << i);
2183                 r2.field.cq_p2 &= ~(1 << i);
2184                 r3.field.cq_p2 &= ~(1 << i);
2185                 break;
2186
2187         case 3:
2188                 r1.field.cq_p3 &= ~(1 << i);
2189                 r2.field.cq_p3 &= ~(1 << i);
2190                 r3.field.cq_p3 &= ~(1 << i);
2191                 break;
2192         }
2193
2194         DLB2_CSR_WR(hw,
2195                     DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
2196                     r1.val);
2197
2198         DLB2_CSR_WR(hw,
2199                     DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
2200                     r2.val);
2201
2202         DLB2_CSR_WR(hw,
2203                     DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
2204                     r3.val);
2205
2206         dlb2_flush_csr(hw);
2207
2208         unmapped = DLB2_QUEUE_UNMAPPED;
2209
2210         return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2211 }
2212
2213 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2214                                  struct dlb2_hw_domain *domain,
2215                                  struct dlb2_ldb_port *port,
2216                                  struct dlb2_ldb_queue *queue,
2217                                  u8 prio)
2218 {
2219         if (domain->started)
2220                 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2221         else
2222                 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2223 }
2224
2225 static void
2226 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2227                                    struct dlb2_hw_domain *domain,
2228                                    struct dlb2_ldb_port *port,
2229                                    int slot)
2230 {
2231         enum dlb2_qid_map_state state;
2232         struct dlb2_ldb_queue *queue;
2233
2234         queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2235
2236         state = port->qid_map[slot].state;
2237
2238         /* Update the QID2CQIDX and CQ2QID vectors */
2239         dlb2_ldb_port_unmap_qid(hw, port, queue);
2240
2241         /*
2242          * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2243          * the has_work bits
2244          */
2245         dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2246
2247         /* Reset the {CQ, slot} to its default state */
2248         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2249
2250         /* Re-enable the CQ if it wasn't manually disabled by the user */
2251         if (port->enabled)
2252                 dlb2_ldb_port_cq_enable(hw, port);
2253
2254         /*
2255          * If there is a mapping that is pending this slot's removal, perform
2256          * the mapping now.
2257          */
2258         if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2259                 struct dlb2_ldb_port_qid_map *map;
2260                 struct dlb2_ldb_queue *map_queue;
2261                 u8 prio;
2262
2263                 map = &port->qid_map[slot];
2264
2265                 map->qid = map->pending_qid;
2266                 map->priority = map->pending_priority;
2267
2268                 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2269                 prio = map->priority;
2270
2271                 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2272         }
2273 }
2274
2275 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2276                                           struct dlb2_hw_domain *domain,
2277                                           struct dlb2_ldb_port *port)
2278 {
2279         union dlb2_lsp_cq_ldb_infl_cnt r0;
2280         int i;
2281
2282         if (port->num_pending_removals == 0)
2283                 return false;
2284
2285         /*
2286          * The unmap requires all the CQ's outstanding inflights to be
2287          * completed.
2288          */
2289         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
2290         if (r0.field.count > 0)
2291                 return false;
2292
2293         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2294                 struct dlb2_ldb_port_qid_map *map;
2295
2296                 map = &port->qid_map[i];
2297
2298                 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2299                     map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2300                         continue;
2301
2302                 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2303         }
2304
2305         return true;
2306 }
2307
2308 static unsigned int
2309 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2310                                         struct dlb2_hw_domain *domain)
2311 {
2312         struct dlb2_list_entry *iter;
2313         struct dlb2_ldb_port *port;
2314         int i;
2315         RTE_SET_USED(iter);
2316
2317         if (!domain->configured || domain->num_pending_removals == 0)
2318                 return 0;
2319
2320         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2321                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2322                         dlb2_domain_finish_unmap_port(hw, domain, port);
2323         }
2324
2325         return domain->num_pending_removals;
2326 }
2327
2328 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2329                                         struct dlb2_hw_domain *domain)
2330 {
2331         struct dlb2_list_entry *iter;
2332         struct dlb2_ldb_port *port;
2333         int i;
2334         RTE_SET_USED(iter);
2335
2336         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2337                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2338                         port->enabled = false;
2339
2340                         dlb2_ldb_port_cq_disable(hw, port);
2341                 }
2342         }
2343 }
2344
2345 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2346                                   u32 domain_id,
2347                                   bool vdev_req,
2348                                   unsigned int vdev_id)
2349 {
2350         DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2351         if (vdev_req)
2352                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2353         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2354 }
2355
2356 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2357                                          struct dlb2_hw_domain *domain,
2358                                          unsigned int vdev_id)
2359 {
2360         struct dlb2_list_entry *iter;
2361         union dlb2_sys_vf_dir_vpp_v r1;
2362         struct dlb2_dir_pq_pair *port;
2363         RTE_SET_USED(iter);
2364
2365         r1.field.vpp_v = 0;
2366
2367         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2368                 unsigned int offs;
2369                 u32 virt_id;
2370
2371                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2372                         virt_id = port->id.virt_id;
2373                 else
2374                         virt_id = port->id.phys_id;
2375
2376                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
2377
2378                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val);
2379         }
2380 }
2381
2382 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2383                                          struct dlb2_hw_domain *domain,
2384                                          unsigned int vdev_id)
2385 {
2386         struct dlb2_list_entry *iter;
2387         union dlb2_sys_vf_ldb_vpp_v r1;
2388         struct dlb2_ldb_port *port;
2389         int i;
2390         RTE_SET_USED(iter);
2391
2392         r1.field.vpp_v = 0;
2393
2394         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2395                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2396                         unsigned int offs;
2397                         u32 virt_id;
2398
2399                         if (hw->virt_mode == DLB2_VIRT_SRIOV)
2400                                 virt_id = port->id.virt_id;
2401                         else
2402                                 virt_id = port->id.phys_id;
2403
2404                         offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2405
2406                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r1.val);
2407                 }
2408         }
2409 }
2410
2411 static void
2412 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2413                                         struct dlb2_hw_domain *domain)
2414 {
2415         struct dlb2_list_entry *iter;
2416         union dlb2_chp_ldb_cq_int_enb r0 = { {0} };
2417         union dlb2_chp_ldb_cq_wd_enb r1 = { {0} };
2418         struct dlb2_ldb_port *port;
2419         int i;
2420         RTE_SET_USED(iter);
2421
2422         r0.field.en_tim = 0;
2423         r0.field.en_depth = 0;
2424
2425         r1.field.wd_enable = 0;
2426
2427         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2428                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2429                         DLB2_CSR_WR(hw,
2430                                     DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
2431                                     r0.val);
2432
2433                         DLB2_CSR_WR(hw,
2434                                     DLB2_CHP_LDB_CQ_WD_ENB(port->id.phys_id),
2435                                     r1.val);
2436                 }
2437         }
2438 }
2439
2440 static void
2441 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2442                                         struct dlb2_hw_domain *domain)
2443 {
2444         struct dlb2_list_entry *iter;
2445         union dlb2_chp_dir_cq_int_enb r0 = { {0} };
2446         union dlb2_chp_dir_cq_wd_enb r1 = { {0} };
2447         struct dlb2_dir_pq_pair *port;
2448         RTE_SET_USED(iter);
2449
2450         r0.field.en_tim = 0;
2451         r0.field.en_depth = 0;
2452
2453         r1.field.wd_enable = 0;
2454
2455         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2456                 DLB2_CSR_WR(hw,
2457                             DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
2458                             r0.val);
2459
2460                 DLB2_CSR_WR(hw,
2461                             DLB2_CHP_DIR_CQ_WD_ENB(port->id.phys_id),
2462                             r1.val);
2463         }
2464 }
2465
2466 static void
2467 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2468                                           struct dlb2_hw_domain *domain)
2469 {
2470         int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2471         struct dlb2_list_entry *iter;
2472         struct dlb2_ldb_queue *queue;
2473         RTE_SET_USED(iter);
2474
2475         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2476                 union dlb2_sys_ldb_vasqid_v r0 = { {0} };
2477                 union dlb2_sys_ldb_qid2vqid r1 = { {0} };
2478                 union dlb2_sys_vf_ldb_vqid_v r2 = { {0} };
2479                 union dlb2_sys_vf_ldb_vqid2qid r3 = { {0} };
2480                 int idx;
2481
2482                 idx = domain_offset + queue->id.phys_id;
2483
2484                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), r0.val);
2485
2486                 if (queue->id.vdev_owned) {
2487                         DLB2_CSR_WR(hw,
2488                                     DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2489                                     r1.val);
2490
2491                         idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2492                                 queue->id.virt_id;
2493
2494                         DLB2_CSR_WR(hw,
2495                                     DLB2_SYS_VF_LDB_VQID_V(idx),
2496                                     r2.val);
2497
2498                         DLB2_CSR_WR(hw,
2499                                     DLB2_SYS_VF_LDB_VQID2QID(idx),
2500                                     r3.val);
2501                 }
2502         }
2503 }
2504
2505 static void
2506 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2507                                           struct dlb2_hw_domain *domain)
2508 {
2509         int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS;
2510         struct dlb2_list_entry *iter;
2511         struct dlb2_dir_pq_pair *queue;
2512         RTE_SET_USED(iter);
2513
2514         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2515                 union dlb2_sys_dir_vasqid_v r0 = { {0} };
2516                 union dlb2_sys_vf_dir_vqid_v r1 = { {0} };
2517                 union dlb2_sys_vf_dir_vqid2qid r2 = { {0} };
2518                 int idx;
2519
2520                 idx = domain_offset + queue->id.phys_id;
2521
2522                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val);
2523
2524                 if (queue->id.vdev_owned) {
2525                         idx = queue->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS +
2526                                 queue->id.virt_id;
2527
2528                         DLB2_CSR_WR(hw,
2529                                     DLB2_SYS_VF_DIR_VQID_V(idx),
2530                                     r1.val);
2531
2532                         DLB2_CSR_WR(hw,
2533                                     DLB2_SYS_VF_DIR_VQID2QID(idx),
2534                                     r2.val);
2535                 }
2536         }
2537 }
2538
2539 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2540                                                struct dlb2_hw_domain *domain)
2541 {
2542         struct dlb2_list_entry *iter;
2543         union dlb2_chp_sn_chk_enbl r1;
2544         struct dlb2_ldb_port *port;
2545         int i;
2546         RTE_SET_USED(iter);
2547
2548         r1.field.en = 0;
2549
2550         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2551                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2552                         DLB2_CSR_WR(hw,
2553                                     DLB2_CHP_SN_CHK_ENBL(port->id.phys_id),
2554                                     r1.val);
2555         }
2556 }
2557
2558 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2559                                                  struct dlb2_hw_domain *domain)
2560 {
2561         struct dlb2_list_entry *iter;
2562         struct dlb2_ldb_port *port;
2563         int i;
2564         RTE_SET_USED(iter);
2565
2566         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2567                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2568                         int i;
2569
2570                         for (i = 0; i < DLB2_MAX_CQ_COMP_CHECK_LOOPS; i++) {
2571                                 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2572                                         break;
2573                         }
2574
2575                         if (i == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2576                                 DLB2_HW_ERR(hw,
2577                                             "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2578                                             __func__, port->id.phys_id);
2579                                 return -EFAULT;
2580                         }
2581                 }
2582         }
2583
2584         return 0;
2585 }
2586
2587 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2588                                         struct dlb2_hw_domain *domain)
2589 {
2590         struct dlb2_list_entry *iter;
2591         struct dlb2_dir_pq_pair *port;
2592         RTE_SET_USED(iter);
2593
2594         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2595                 port->enabled = false;
2596
2597                 dlb2_dir_port_cq_disable(hw, port);
2598         }
2599 }
2600
2601 static void
2602 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2603                                        struct dlb2_hw_domain *domain)
2604 {
2605         struct dlb2_list_entry *iter;
2606         struct dlb2_dir_pq_pair *port;
2607         union dlb2_sys_dir_pp_v r1;
2608         RTE_SET_USED(iter);
2609
2610         r1.field.pp_v = 0;
2611
2612         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2613                 DLB2_CSR_WR(hw,
2614                             DLB2_SYS_DIR_PP_V(port->id.phys_id),
2615                             r1.val);
2616 }
2617
2618 static void
2619 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2620                                        struct dlb2_hw_domain *domain)
2621 {
2622         struct dlb2_list_entry *iter;
2623         union dlb2_sys_ldb_pp_v r1;
2624         struct dlb2_ldb_port *port;
2625         int i;
2626         RTE_SET_USED(iter);
2627
2628         r1.field.pp_v = 0;
2629
2630         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2631                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2632                         DLB2_CSR_WR(hw,
2633                                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2634                                     r1.val);
2635         }
2636 }
2637
2638 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2639                                             struct dlb2_hw_domain *domain)
2640 {
2641         struct dlb2_list_entry *iter;
2642         struct dlb2_dir_pq_pair *dir_port;
2643         struct dlb2_ldb_port *ldb_port;
2644         struct dlb2_ldb_queue *queue;
2645         int i;
2646         RTE_SET_USED(iter);
2647
2648         /*
2649          * Confirm that all the domain's queue's inflight counts and AQED
2650          * active counts are 0.
2651          */
2652         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2653                 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2654                         DLB2_HW_ERR(hw,
2655                                     "[%s()] Internal error: failed to empty ldb queue %d\n",
2656                                     __func__, queue->id.phys_id);
2657                         return -EFAULT;
2658                 }
2659         }
2660
2661         /* Confirm that all the domain's CQs inflight and token counts are 0. */
2662         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2663                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2664                         if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2665                             dlb2_ldb_cq_token_count(hw, ldb_port)) {
2666                                 DLB2_HW_ERR(hw,
2667                                             "[%s()] Internal error: failed to empty ldb port %d\n",
2668                                             __func__, ldb_port->id.phys_id);
2669                                 return -EFAULT;
2670                         }
2671                 }
2672         }
2673
2674         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2675                 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2676                         DLB2_HW_ERR(hw,
2677                                     "[%s()] Internal error: failed to empty dir queue %d\n",
2678                                     __func__, dir_port->id.phys_id);
2679                         return -EFAULT;
2680                 }
2681
2682                 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2683                         DLB2_HW_ERR(hw,
2684                                     "[%s()] Internal error: failed to empty dir port %d\n",
2685                                     __func__, dir_port->id.phys_id);
2686                         return -EFAULT;
2687                 }
2688         }
2689
2690         return 0;
2691 }
2692
2693 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2694                                                    struct dlb2_ldb_port *port)
2695 {
2696         DLB2_CSR_WR(hw,
2697                     DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2698                     DLB2_SYS_LDB_PP2VAS_RST);
2699
2700         DLB2_CSR_WR(hw,
2701                     DLB2_CHP_LDB_CQ2VAS(port->id.phys_id),
2702                     DLB2_CHP_LDB_CQ2VAS_RST);
2703
2704         DLB2_CSR_WR(hw,
2705                     DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2706                     DLB2_SYS_LDB_PP2VDEV_RST);
2707
2708         if (port->id.vdev_owned) {
2709                 unsigned int offs;
2710                 u32 virt_id;
2711
2712                 /*
2713                  * DLB uses producer port address bits 17:12 to determine the
2714                  * producer port ID. In Scalable IOV mode, PP accesses come
2715                  * through the PF MMIO window for the physical producer port,
2716                  * so for translation purposes the virtual and physical port
2717                  * IDs are equal.
2718                  */
2719                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2720                         virt_id = port->id.virt_id;
2721                 else
2722                         virt_id = port->id.phys_id;
2723
2724                 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2725
2726                 DLB2_CSR_WR(hw,
2727                             DLB2_SYS_VF_LDB_VPP2PP(offs),
2728                             DLB2_SYS_VF_LDB_VPP2PP_RST);
2729
2730                 DLB2_CSR_WR(hw,
2731                             DLB2_SYS_VF_LDB_VPP_V(offs),
2732                             DLB2_SYS_VF_LDB_VPP_V_RST);
2733         }
2734
2735         DLB2_CSR_WR(hw,
2736                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2737                     DLB2_SYS_LDB_PP_V_RST);
2738
2739         DLB2_CSR_WR(hw,
2740                     DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id),
2741                     DLB2_LSP_CQ_LDB_DSBL_RST);
2742
2743         DLB2_CSR_WR(hw,
2744                     DLB2_CHP_LDB_CQ_DEPTH(port->id.phys_id),
2745                     DLB2_CHP_LDB_CQ_DEPTH_RST);
2746
2747         DLB2_CSR_WR(hw,
2748                     DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id),
2749                     DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2750
2751         DLB2_CSR_WR(hw,
2752                     DLB2_CHP_HIST_LIST_LIM(port->id.phys_id),
2753                     DLB2_CHP_HIST_LIST_LIM_RST);
2754
2755         DLB2_CSR_WR(hw,
2756                     DLB2_CHP_HIST_LIST_BASE(port->id.phys_id),
2757                     DLB2_CHP_HIST_LIST_BASE_RST);
2758
2759         DLB2_CSR_WR(hw,
2760                     DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id),
2761                     DLB2_CHP_HIST_LIST_POP_PTR_RST);
2762
2763         DLB2_CSR_WR(hw,
2764                     DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
2765                     DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2766
2767         DLB2_CSR_WR(hw,
2768                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id),
2769                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2770
2771         DLB2_CSR_WR(hw,
2772                     DLB2_CHP_LDB_CQ_TMR_THRSH(port->id.phys_id),
2773                     DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2774
2775         DLB2_CSR_WR(hw,
2776                     DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
2777                     DLB2_CHP_LDB_CQ_INT_ENB_RST);
2778
2779         DLB2_CSR_WR(hw,
2780                     DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2781                     DLB2_SYS_LDB_CQ_ISR_RST);
2782
2783         DLB2_CSR_WR(hw,
2784                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
2785                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2786
2787         DLB2_CSR_WR(hw,
2788                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2789                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2790
2791         DLB2_CSR_WR(hw,
2792                     DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
2793                     DLB2_CHP_LDB_CQ_WPTR_RST);
2794
2795         DLB2_CSR_WR(hw,
2796                     DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
2797                     DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2798
2799         DLB2_CSR_WR(hw,
2800                     DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2801                     DLB2_SYS_LDB_CQ_ADDR_L_RST);
2802
2803         DLB2_CSR_WR(hw,
2804                     DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2805                     DLB2_SYS_LDB_CQ_ADDR_U_RST);
2806
2807         DLB2_CSR_WR(hw,
2808                     DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2809                     DLB2_SYS_LDB_CQ_AT_RST);
2810
2811         DLB2_CSR_WR(hw,
2812                     DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
2813                     DLB2_SYS_LDB_CQ_PASID_RST);
2814
2815         DLB2_CSR_WR(hw,
2816                     DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2817                     DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2818
2819         DLB2_CSR_WR(hw,
2820                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(port->id.phys_id),
2821                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2822
2823         DLB2_CSR_WR(hw,
2824                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(port->id.phys_id),
2825                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2826
2827         DLB2_CSR_WR(hw,
2828                     DLB2_LSP_CQ2QID0(port->id.phys_id),
2829                     DLB2_LSP_CQ2QID0_RST);
2830
2831         DLB2_CSR_WR(hw,
2832                     DLB2_LSP_CQ2QID1(port->id.phys_id),
2833                     DLB2_LSP_CQ2QID1_RST);
2834
2835         DLB2_CSR_WR(hw,
2836                     DLB2_LSP_CQ2PRIOV(port->id.phys_id),
2837                     DLB2_LSP_CQ2PRIOV_RST);
2838 }
2839
2840 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2841                                                  struct dlb2_hw_domain *domain)
2842 {
2843         struct dlb2_list_entry *iter;
2844         struct dlb2_ldb_port *port;
2845         int i;
2846         RTE_SET_USED(iter);
2847
2848         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2849                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2850                         __dlb2_domain_reset_ldb_port_registers(hw, port);
2851         }
2852 }
2853
2854 static void
2855 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2856                                        struct dlb2_dir_pq_pair *port)
2857 {
2858         DLB2_CSR_WR(hw,
2859                     DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
2860                     DLB2_CHP_DIR_CQ2VAS_RST);
2861
2862         DLB2_CSR_WR(hw,
2863                     DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id),
2864                     DLB2_LSP_CQ_DIR_DSBL_RST);
2865
2866         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2867
2868         DLB2_CSR_WR(hw,
2869                     DLB2_CHP_DIR_CQ_DEPTH(port->id.phys_id),
2870                     DLB2_CHP_DIR_CQ_DEPTH_RST);
2871
2872         DLB2_CSR_WR(hw,
2873                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id),
2874                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2875
2876         DLB2_CSR_WR(hw,
2877                     DLB2_CHP_DIR_CQ_TMR_THRSH(port->id.phys_id),
2878                     DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2879
2880         DLB2_CSR_WR(hw,
2881                     DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
2882                     DLB2_CHP_DIR_CQ_INT_ENB_RST);
2883
2884         DLB2_CSR_WR(hw,
2885                     DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2886                     DLB2_SYS_DIR_CQ_ISR_RST);
2887
2888         DLB2_CSR_WR(hw,
2889                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
2890                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2891
2892         DLB2_CSR_WR(hw,
2893                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2894                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2895
2896         DLB2_CSR_WR(hw,
2897                     DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
2898                     DLB2_CHP_DIR_CQ_WPTR_RST);
2899
2900         DLB2_CSR_WR(hw,
2901                     DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
2902                     DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2903
2904         DLB2_CSR_WR(hw,
2905                     DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2906                     DLB2_SYS_DIR_CQ_ADDR_L_RST);
2907
2908         DLB2_CSR_WR(hw,
2909                     DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2910                     DLB2_SYS_DIR_CQ_ADDR_U_RST);
2911
2912         DLB2_CSR_WR(hw,
2913                     DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2914                     DLB2_SYS_DIR_CQ_AT_RST);
2915
2916         DLB2_CSR_WR(hw,
2917                     DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
2918                     DLB2_SYS_DIR_CQ_PASID_RST);
2919
2920         DLB2_CSR_WR(hw,
2921                     DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2922                     DLB2_SYS_DIR_CQ_FMT_RST);
2923
2924         DLB2_CSR_WR(hw,
2925                     DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2926                     DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
2927
2928         DLB2_CSR_WR(hw,
2929                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(port->id.phys_id),
2930                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
2931
2932         DLB2_CSR_WR(hw,
2933                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(port->id.phys_id),
2934                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
2935
2936         DLB2_CSR_WR(hw,
2937                     DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
2938                     DLB2_SYS_DIR_PP2VAS_RST);
2939
2940         DLB2_CSR_WR(hw,
2941                     DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
2942                     DLB2_CHP_DIR_CQ2VAS_RST);
2943
2944         DLB2_CSR_WR(hw,
2945                     DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
2946                     DLB2_SYS_DIR_PP2VDEV_RST);
2947
2948         if (port->id.vdev_owned) {
2949                 unsigned int offs;
2950                 u32 virt_id;
2951
2952                 /*
2953                  * DLB uses producer port address bits 17:12 to determine the
2954                  * producer port ID. In Scalable IOV mode, PP accesses come
2955                  * through the PF MMIO window for the physical producer port,
2956                  * so for translation purposes the virtual and physical port
2957                  * IDs are equal.
2958                  */
2959                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2960                         virt_id = port->id.virt_id;
2961                 else
2962                         virt_id = port->id.phys_id;
2963
2964                 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
2965
2966                 DLB2_CSR_WR(hw,
2967                             DLB2_SYS_VF_DIR_VPP2PP(offs),
2968                             DLB2_SYS_VF_DIR_VPP2PP_RST);
2969
2970                 DLB2_CSR_WR(hw,
2971                             DLB2_SYS_VF_DIR_VPP_V(offs),
2972                             DLB2_SYS_VF_DIR_VPP_V_RST);
2973         }
2974
2975         DLB2_CSR_WR(hw,
2976                     DLB2_SYS_DIR_PP_V(port->id.phys_id),
2977                     DLB2_SYS_DIR_PP_V_RST);
2978 }
2979
2980 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2981                                                  struct dlb2_hw_domain *domain)
2982 {
2983         struct dlb2_list_entry *iter;
2984         struct dlb2_dir_pq_pair *port;
2985         RTE_SET_USED(iter);
2986
2987         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2988                 __dlb2_domain_reset_dir_port_registers(hw, port);
2989 }
2990
2991 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
2992                                                   struct dlb2_hw_domain *domain)
2993 {
2994         struct dlb2_list_entry *iter;
2995         struct dlb2_ldb_queue *queue;
2996         RTE_SET_USED(iter);
2997
2998         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2999                 unsigned int queue_id = queue->id.phys_id;
3000                 int i;
3001
3002                 DLB2_CSR_WR(hw,
3003                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(queue_id),
3004                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3005
3006                 DLB2_CSR_WR(hw,
3007                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(queue_id),
3008                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3009
3010                 DLB2_CSR_WR(hw,
3011                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(queue_id),
3012                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3013
3014                 DLB2_CSR_WR(hw,
3015                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(queue_id),
3016                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3017
3018                 DLB2_CSR_WR(hw,
3019                             DLB2_LSP_QID_NALDB_MAX_DEPTH(queue_id),
3020                             DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3021
3022                 DLB2_CSR_WR(hw,
3023                             DLB2_LSP_QID_LDB_INFL_LIM(queue_id),
3024                             DLB2_LSP_QID_LDB_INFL_LIM_RST);
3025
3026                 DLB2_CSR_WR(hw,
3027                             DLB2_LSP_QID_AQED_ACTIVE_LIM(queue_id),
3028                             DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3029
3030                 DLB2_CSR_WR(hw,
3031                             DLB2_LSP_QID_ATM_DEPTH_THRSH(queue_id),
3032                             DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3033
3034                 DLB2_CSR_WR(hw,
3035                             DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue_id),
3036                             DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3037
3038                 DLB2_CSR_WR(hw,
3039                             DLB2_SYS_LDB_QID_ITS(queue_id),
3040                             DLB2_SYS_LDB_QID_ITS_RST);
3041
3042                 DLB2_CSR_WR(hw,
3043                             DLB2_CHP_ORD_QID_SN(queue_id),
3044                             DLB2_CHP_ORD_QID_SN_RST);
3045
3046                 DLB2_CSR_WR(hw,
3047                             DLB2_CHP_ORD_QID_SN_MAP(queue_id),
3048                             DLB2_CHP_ORD_QID_SN_MAP_RST);
3049
3050                 DLB2_CSR_WR(hw,
3051                             DLB2_SYS_LDB_QID_V(queue_id),
3052                             DLB2_SYS_LDB_QID_V_RST);
3053
3054                 DLB2_CSR_WR(hw,
3055                             DLB2_SYS_LDB_QID_CFG_V(queue_id),
3056                             DLB2_SYS_LDB_QID_CFG_V_RST);
3057
3058                 if (queue->sn_cfg_valid) {
3059                         u32 offs[2];
3060
3061                         offs[0] = DLB2_RO_PIPE_GRP_0_SLT_SHFT(queue->sn_slot);
3062                         offs[1] = DLB2_RO_PIPE_GRP_1_SLT_SHFT(queue->sn_slot);
3063
3064                         DLB2_CSR_WR(hw,
3065                                     offs[queue->sn_group],
3066                                     DLB2_RO_PIPE_GRP_0_SLT_SHFT_RST);
3067                 }
3068
3069                 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3070                         DLB2_CSR_WR(hw,
3071                                     DLB2_LSP_QID2CQIDIX(queue_id, i),
3072                                     DLB2_LSP_QID2CQIDIX_00_RST);
3073
3074                         DLB2_CSR_WR(hw,
3075                                     DLB2_LSP_QID2CQIDIX2(queue_id, i),
3076                                     DLB2_LSP_QID2CQIDIX2_00_RST);
3077
3078                         DLB2_CSR_WR(hw,
3079                                     DLB2_ATM_QID2CQIDIX(queue_id, i),
3080                                     DLB2_ATM_QID2CQIDIX_00_RST);
3081                 }
3082         }
3083 }
3084
3085 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3086                                                   struct dlb2_hw_domain *domain)
3087 {
3088         struct dlb2_list_entry *iter;
3089         struct dlb2_dir_pq_pair *queue;
3090         RTE_SET_USED(iter);
3091
3092         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3093                 DLB2_CSR_WR(hw,
3094                             DLB2_LSP_QID_DIR_MAX_DEPTH(queue->id.phys_id),
3095                             DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3096
3097                 DLB2_CSR_WR(hw,
3098                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(queue->id.phys_id),
3099                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3100
3101                 DLB2_CSR_WR(hw,
3102                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(queue->id.phys_id),
3103                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3104
3105                 DLB2_CSR_WR(hw,
3106                             DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
3107                             DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3108
3109                 DLB2_CSR_WR(hw,
3110                             DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3111                             DLB2_SYS_DIR_QID_ITS_RST);
3112
3113                 DLB2_CSR_WR(hw,
3114                             DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3115                             DLB2_SYS_DIR_QID_V_RST);
3116         }
3117 }
3118
3119 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3120                                         struct dlb2_hw_domain *domain)
3121 {
3122         dlb2_domain_reset_ldb_port_registers(hw, domain);
3123
3124         dlb2_domain_reset_dir_port_registers(hw, domain);
3125
3126         dlb2_domain_reset_ldb_queue_registers(hw, domain);
3127
3128         dlb2_domain_reset_dir_queue_registers(hw, domain);
3129
3130         DLB2_CSR_WR(hw,
3131                     DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3132                     DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3133
3134         DLB2_CSR_WR(hw,
3135                     DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3136                     DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3137 }
3138
3139 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3140                                             struct dlb2_hw_domain *domain)
3141 {
3142         struct dlb2_dir_pq_pair *tmp_dir_port;
3143         struct dlb2_ldb_queue *tmp_ldb_queue;
3144         struct dlb2_ldb_port *tmp_ldb_port;
3145         struct dlb2_list_entry *iter1;
3146         struct dlb2_list_entry *iter2;
3147         struct dlb2_function_resources *rsrcs;
3148         struct dlb2_dir_pq_pair *dir_port;
3149         struct dlb2_ldb_queue *ldb_queue;
3150         struct dlb2_ldb_port *ldb_port;
3151         struct dlb2_list_head *list;
3152         int ret, i;
3153         RTE_SET_USED(tmp_dir_port);
3154         RTE_SET_USED(tmp_ldb_queue);
3155         RTE_SET_USED(tmp_ldb_port);
3156         RTE_SET_USED(iter1);
3157         RTE_SET_USED(iter2);
3158
3159         rsrcs = domain->parent_func;
3160
3161         /* Move the domain's ldb queues to the function's avail list */
3162         list = &domain->used_ldb_queues;
3163         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3164                 if (ldb_queue->sn_cfg_valid) {
3165                         struct dlb2_sn_group *grp;
3166
3167                         grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3168
3169                         dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3170                         ldb_queue->sn_cfg_valid = false;
3171                 }
3172
3173                 ldb_queue->owned = false;
3174                 ldb_queue->num_mappings = 0;
3175                 ldb_queue->num_pending_additions = 0;
3176
3177                 dlb2_list_del(&domain->used_ldb_queues,
3178                               &ldb_queue->domain_list);
3179                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3180                               &ldb_queue->func_list);
3181                 rsrcs->num_avail_ldb_queues++;
3182         }
3183
3184         list = &domain->avail_ldb_queues;
3185         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3186                 ldb_queue->owned = false;
3187
3188                 dlb2_list_del(&domain->avail_ldb_queues,
3189                               &ldb_queue->domain_list);
3190                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3191                               &ldb_queue->func_list);
3192                 rsrcs->num_avail_ldb_queues++;
3193         }
3194
3195         /* Move the domain's ldb ports to the function's avail list */
3196         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3197                 list = &domain->used_ldb_ports[i];
3198                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3199                                        iter1, iter2) {
3200                         int j;
3201
3202                         ldb_port->owned = false;
3203                         ldb_port->configured = false;
3204                         ldb_port->num_pending_removals = 0;
3205                         ldb_port->num_mappings = 0;
3206                         ldb_port->init_tkn_cnt = 0;
3207                         for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3208                                 ldb_port->qid_map[j].state =
3209                                         DLB2_QUEUE_UNMAPPED;
3210
3211                         dlb2_list_del(&domain->used_ldb_ports[i],
3212                                       &ldb_port->domain_list);
3213                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3214                                       &ldb_port->func_list);
3215                         rsrcs->num_avail_ldb_ports[i]++;
3216                 }
3217
3218                 list = &domain->avail_ldb_ports[i];
3219                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3220                                        iter1, iter2) {
3221                         ldb_port->owned = false;
3222
3223                         dlb2_list_del(&domain->avail_ldb_ports[i],
3224                                       &ldb_port->domain_list);
3225                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3226                                       &ldb_port->func_list);
3227                         rsrcs->num_avail_ldb_ports[i]++;
3228                 }
3229         }
3230
3231         /* Move the domain's dir ports to the function's avail list */
3232         list = &domain->used_dir_pq_pairs;
3233         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3234                 dir_port->owned = false;
3235                 dir_port->port_configured = false;
3236                 dir_port->init_tkn_cnt = 0;
3237
3238                 dlb2_list_del(&domain->used_dir_pq_pairs,
3239                               &dir_port->domain_list);
3240
3241                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3242                               &dir_port->func_list);
3243                 rsrcs->num_avail_dir_pq_pairs++;
3244         }
3245
3246         list = &domain->avail_dir_pq_pairs;
3247         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3248                 dir_port->owned = false;
3249
3250                 dlb2_list_del(&domain->avail_dir_pq_pairs,
3251                               &dir_port->domain_list);
3252
3253                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3254                               &dir_port->func_list);
3255                 rsrcs->num_avail_dir_pq_pairs++;
3256         }
3257
3258         /* Return hist list entries to the function */
3259         ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3260                                     domain->hist_list_entry_base,
3261                                     domain->total_hist_list_entries);
3262         if (ret) {
3263                 DLB2_HW_ERR(hw,
3264                             "[%s()] Internal error: domain hist list base doesn't match the function's bitmap.\n",
3265                             __func__);
3266                 return ret;
3267         }
3268
3269         domain->total_hist_list_entries = 0;
3270         domain->avail_hist_list_entries = 0;
3271         domain->hist_list_entry_base = 0;
3272         domain->hist_list_entry_offset = 0;
3273
3274         rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3275         domain->num_ldb_credits = 0;
3276
3277         rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3278         domain->num_dir_credits = 0;
3279
3280         rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3281         rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3282         domain->num_avail_aqed_entries = 0;
3283         domain->num_used_aqed_entries = 0;
3284
3285         domain->num_pending_removals = 0;
3286         domain->num_pending_additions = 0;
3287         domain->configured = false;
3288         domain->started = false;
3289
3290         /*
3291          * Move the domain out of the used_domains list and back to the
3292          * function's avail_domains list.
3293          */
3294         dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3295         dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3296         rsrcs->num_avail_domains++;
3297
3298         return 0;
3299 }
3300
3301 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3302                                             struct dlb2_hw_domain *domain,
3303                                             struct dlb2_ldb_queue *queue)
3304 {
3305         struct dlb2_ldb_port *port;
3306         int ret, i;
3307
3308         /* If a domain has LDB queues, it must have LDB ports */
3309         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3310                 if (!dlb2_list_empty(&domain->used_ldb_ports[i]))
3311                         break;
3312         }
3313
3314         if (i == DLB2_NUM_COS_DOMAINS) {
3315                 DLB2_HW_ERR(hw,
3316                             "[%s()] Internal error: No configured LDB ports\n",
3317                             __func__);
3318                 return -EFAULT;
3319         }
3320
3321         port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i], typeof(*port));
3322
3323         /* If necessary, free up a QID slot in this CQ */
3324         if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3325                 struct dlb2_ldb_queue *mapped_queue;
3326
3327                 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3328
3329                 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3330                 if (ret)
3331                         return ret;
3332         }
3333
3334         ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3335         if (ret)
3336                 return ret;
3337
3338         return dlb2_domain_drain_mapped_queues(hw, domain);
3339 }
3340
3341 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3342                                              struct dlb2_hw_domain *domain)
3343 {
3344         struct dlb2_list_entry *iter;
3345         struct dlb2_ldb_queue *queue;
3346         int ret;
3347         RTE_SET_USED(iter);
3348
3349         /* If the domain hasn't been started, there's no traffic to drain */
3350         if (!domain->started)
3351                 return 0;
3352
3353         /*
3354          * Pre-condition: the unattached queue must not have any outstanding
3355          * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3356          * prior to this in dlb2_domain_drain_mapped_queues().
3357          */
3358         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3359                 if (queue->num_mappings != 0 ||
3360                     dlb2_ldb_queue_is_empty(hw, queue))
3361                         continue;
3362
3363                 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3364                 if (ret)
3365                         return ret;
3366         }
3367
3368         return 0;
3369 }
3370
3371 /**
3372  * dlb2_reset_domain() - Reset a DLB scheduling domain and its associated
3373  *      hardware resources.
3374  * @hw: Contains the current state of the DLB2 hardware.
3375  * @domain_id: Domain ID
3376  * @vdev_req: Request came from a virtual device.
3377  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
3378  *
3379  * Note: User software *must* stop sending to this domain's producer ports
3380  * before invoking this function, otherwise undefined behavior will result.
3381  *
3382  * Return: returns < 0 on error, 0 otherwise.
3383  */
3384 int dlb2_reset_domain(struct dlb2_hw *hw,
3385                       u32 domain_id,
3386                       bool vdev_req,
3387                       unsigned int vdev_id)
3388 {
3389         struct dlb2_hw_domain *domain;
3390         int ret;
3391
3392         dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3393
3394         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3395
3396         if (domain  == NULL || !domain->configured)
3397                 return -EINVAL;
3398
3399         /* Disable VPPs */
3400         if (vdev_req) {
3401                 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3402
3403                 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3404         }
3405
3406         /* Disable CQ interrupts */
3407         dlb2_domain_disable_dir_port_interrupts(hw, domain);
3408
3409         dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3410
3411         /*
3412          * For each queue owned by this domain, disable its write permissions to
3413          * cause any traffic sent to it to be dropped. Well-behaved software
3414          * should not be sending QEs at this point.
3415          */
3416         dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3417
3418         dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3419
3420         /* Turn off completion tracking on all the domain's PPs. */
3421         dlb2_domain_disable_ldb_seq_checks(hw, domain);
3422
3423         /*
3424          * Disable the LDB CQs and drain them in order to complete the map and
3425          * unmap procedures, which require zero CQ inflights and zero QID
3426          * inflights respectively.
3427          */
3428         dlb2_domain_disable_ldb_cqs(hw, domain);
3429
3430         ret = dlb2_domain_drain_ldb_cqs(hw, domain, false);
3431         if (ret < 0)
3432                 return ret;
3433
3434         ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3435         if (ret < 0)
3436                 return ret;
3437
3438         ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3439         if (ret < 0)
3440                 return ret;
3441
3442         ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3443         if (ret < 0)
3444                 return ret;
3445
3446         /* Re-enable the CQs in order to drain the mapped queues. */
3447         dlb2_domain_enable_ldb_cqs(hw, domain);
3448
3449         ret = dlb2_domain_drain_mapped_queues(hw, domain);
3450         if (ret < 0)
3451                 return ret;
3452
3453         ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3454         if (ret < 0)
3455                 return ret;
3456
3457         /* Done draining LDB QEs, so disable the CQs. */
3458         dlb2_domain_disable_ldb_cqs(hw, domain);
3459
3460         dlb2_domain_drain_dir_queues(hw, domain);
3461
3462         /* Done draining DIR QEs, so disable the CQs. */
3463         dlb2_domain_disable_dir_cqs(hw, domain);
3464
3465         /* Disable PPs */
3466         dlb2_domain_disable_dir_producer_ports(hw, domain);
3467
3468         dlb2_domain_disable_ldb_producer_ports(hw, domain);
3469
3470         ret = dlb2_domain_verify_reset_success(hw, domain);
3471         if (ret)
3472                 return ret;
3473
3474         /* Reset the QID and port state. */
3475         dlb2_domain_reset_registers(hw, domain);
3476
3477         /* Hardware reset complete. Reset the domain's software state */
3478         ret = dlb2_domain_reset_software_state(hw, domain);
3479         if (ret)
3480                 return ret;
3481
3482         return 0;
3483 }
3484
3485 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
3486 {
3487         int i, num = 0;
3488
3489         /* Finish queue unmap jobs for any domain that needs it */
3490         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
3491                 struct dlb2_hw_domain *domain = &hw->domains[i];
3492
3493                 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3494         }
3495
3496         return num;
3497 }
3498
3499 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
3500 {
3501         int i, num = 0;
3502
3503         /* Finish queue map jobs for any domain that needs it */
3504         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
3505                 struct dlb2_hw_domain *domain = &hw->domains[i];
3506
3507                 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
3508         }
3509
3510         return num;
3511 }