event/dlb2: support ldb port specific COS
[dpdk.git] / drivers / event / dlb2 / pf / base / dlb2_resource.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include "dlb2_user.h"
6
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
13
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
16
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18         DLB2_LIST_HEAD((head), type, domain_list)
19
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21         DLB2_LIST_HEAD((head), type, func_list)
22
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24         DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
25
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27         DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
28
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
31
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
34
35 /*
36  * The PF driver cannot assume that a register write will affect subsequent HCW
37  * writes. To ensure a write completes, the driver must read back a CSR. This
38  * function only need be called for configuration that can occur after the
39  * domain has started; prior to starting, applications can't send HCWs.
40  */
41 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
42 {
43         DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
44 }
45
46 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
47 {
48         int i;
49
50         dlb2_list_init_head(&domain->used_ldb_queues);
51         dlb2_list_init_head(&domain->used_dir_pq_pairs);
52         dlb2_list_init_head(&domain->avail_ldb_queues);
53         dlb2_list_init_head(&domain->avail_dir_pq_pairs);
54
55         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
56                 dlb2_list_init_head(&domain->used_ldb_ports[i]);
57         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
58                 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
59 }
60
61 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
62 {
63         int i;
64         dlb2_list_init_head(&rsrc->avail_domains);
65         dlb2_list_init_head(&rsrc->used_domains);
66         dlb2_list_init_head(&rsrc->avail_ldb_queues);
67         dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
68
69         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
70                 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
71 }
72
73 /**
74  * dlb2_resource_free() - free device state memory
75  * @hw: dlb2_hw handle for a particular device.
76  *
77  * This function frees software state pointed to by dlb2_hw. This function
78  * should be called when resetting the device or unloading the driver.
79  */
80 void dlb2_resource_free(struct dlb2_hw *hw)
81 {
82         int i;
83
84         if (hw->pf.avail_hist_list_entries)
85                 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
86
87         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
88                 if (hw->vdev[i].avail_hist_list_entries)
89                         dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
90         }
91 }
92
93 /**
94  * dlb2_resource_init() - initialize the device
95  * @hw: pointer to struct dlb2_hw.
96  * @ver: device version.
97  *
98  * This function initializes the device's software state (pointed to by the hw
99  * argument) and programs global scheduling QoS registers. This function should
100  * be called during driver initialization, and the dlb2_hw structure should
101  * be zero-initialized before calling the function.
102  *
103  * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
104  * device is reset.
105  *
106  * Return:
107  * Returns 0 upon success, <0 otherwise.
108  */
109 int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
110 {
111         struct dlb2_list_entry *list;
112         unsigned int i;
113         int ret;
114
115         /*
116          * For optimal load-balancing, ports that map to one or more QIDs in
117          * common should not be in numerical sequence. The port->QID mapping is
118          * application dependent, but the driver interleaves port IDs as much
119          * as possible to reduce the likelihood of sequential ports mapping to
120          * the same QID(s). This initial allocation of port IDs maximizes the
121          * average distance between an ID and its immediate neighbors (i.e.
122          * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
123          * 3, etc.).
124          */
125         const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
126                 0,  7,  14,  5, 12,  3, 10,  1,  8, 15,  6, 13,  4, 11,  2,  9,
127                 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
128                 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
129                 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
130         };
131
132         hw->ver = ver;
133
134         dlb2_init_fn_rsrc_lists(&hw->pf);
135
136         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
137                 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
138
139         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
140                 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
141                 hw->domains[i].parent_func = &hw->pf;
142         }
143
144         /* Give all resources to the PF driver */
145         hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
146         for (i = 0; i < hw->pf.num_avail_domains; i++) {
147                 list = &hw->domains[i].func_list;
148
149                 dlb2_list_add(&hw->pf.avail_domains, list);
150         }
151
152         hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
153         for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
154                 list = &hw->rsrcs.ldb_queues[i].func_list;
155
156                 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
157         }
158
159         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
160                 hw->pf.num_avail_ldb_ports[i] =
161                         DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
162
163         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
164                 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
165                 struct dlb2_ldb_port *port;
166
167                 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
168
169                 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
170                               &port->func_list);
171         }
172
173         hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
174         for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
175                 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
176
177                 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
178         }
179
180         if (hw->ver == DLB2_HW_V2) {
181                 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
182                 hw->pf.num_avail_dqed_entries =
183                         DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
184         } else {
185                 hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
186         }
187
188         hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
189
190         ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
191                                 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
192         if (ret)
193                 goto unwind;
194
195         ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
196         if (ret)
197                 goto unwind;
198
199         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
200                 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
201                                         DLB2_MAX_NUM_HIST_LIST_ENTRIES);
202                 if (ret)
203                         goto unwind;
204
205                 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
206                 if (ret)
207                         goto unwind;
208         }
209
210         /* Initialize the hardware resource IDs */
211         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
212                 hw->domains[i].id.phys_id = i;
213                 hw->domains[i].id.vdev_owned = false;
214         }
215
216         for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
217                 hw->rsrcs.ldb_queues[i].id.phys_id = i;
218                 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
219         }
220
221         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
222                 hw->rsrcs.ldb_ports[i].id.phys_id = i;
223                 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
224         }
225
226         for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
227                 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
228                 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
229         }
230
231         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
232                 hw->rsrcs.sn_groups[i].id = i;
233                 /* Default mode (0) is 64 sequence numbers per queue */
234                 hw->rsrcs.sn_groups[i].mode = 0;
235                 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
236                 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
237         }
238
239         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
240                 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
241
242         return 0;
243
244 unwind:
245         dlb2_resource_free(hw);
246
247         return ret;
248 }
249
250 /**
251  * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
252  * @hw: dlb2_hw handle for a particular device.
253  * @ver: device version.
254  *
255  * Clearing the PMCSR must be done at initialization to make the device fully
256  * operational.
257  */
258 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
259 {
260         u32 pmcsr_dis;
261
262         pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
263
264         DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
265
266         DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
267 }
268
269 /**
270  * dlb2_hw_get_num_resources() - query the PCI function's available resources
271  * @hw: dlb2_hw handle for a particular device.
272  * @arg: pointer to resource counts.
273  * @vdev_req: indicates whether this request came from a vdev.
274  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
275  *
276  * This function returns the number of available resources for the PF or for a
277  * VF.
278  *
279  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
280  * device.
281  *
282  * Return:
283  * Returns 0 upon success, -EINVAL if vdev_req is true and vdev_id is
284  * invalid.
285  */
286 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
287                               struct dlb2_get_num_resources_args *arg,
288                               bool vdev_req,
289                               unsigned int vdev_id)
290 {
291         struct dlb2_function_resources *rsrcs;
292         struct dlb2_bitmap *map;
293         int i;
294
295         if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
296                 return -EINVAL;
297
298         if (vdev_req)
299                 rsrcs = &hw->vdev[vdev_id];
300         else
301                 rsrcs = &hw->pf;
302
303         arg->num_sched_domains = rsrcs->num_avail_domains;
304
305         arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
306
307         arg->num_ldb_ports = 0;
308         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
309                 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
310
311         arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
312         arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
313         arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
314         arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
315
316         arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
317
318         arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
319
320         map = rsrcs->avail_hist_list_entries;
321
322         arg->num_hist_list_entries = dlb2_bitmap_count(map);
323
324         arg->max_contiguous_hist_list_entries =
325                 dlb2_bitmap_longest_set_range(map);
326
327         if (hw->ver == DLB2_HW_V2) {
328                 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
329                 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
330         } else {
331                 arg->num_credits = rsrcs->num_avail_entries;
332         }
333         return 0;
334 }
335
336 static void dlb2_configure_domain_credits_v2_5(struct dlb2_hw *hw,
337                                                struct dlb2_hw_domain *domain)
338 {
339         u32 reg = 0;
340
341         DLB2_BITS_SET(reg, domain->num_credits, DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
342         DLB2_CSR_WR(hw, DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id), reg);
343 }
344
345 static void dlb2_configure_domain_credits_v2(struct dlb2_hw *hw,
346                                              struct dlb2_hw_domain *domain)
347 {
348         u32 reg = 0;
349
350         DLB2_BITS_SET(reg, domain->num_ldb_credits,
351                       DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
352         DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), reg);
353
354         reg = 0;
355         DLB2_BITS_SET(reg, domain->num_dir_credits,
356                       DLB2_CHP_CFG_DIR_VAS_CRD_COUNT);
357         DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), reg);
358 }
359
360 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
361                                           struct dlb2_hw_domain *domain)
362 {
363         if (hw->ver == DLB2_HW_V2)
364                 dlb2_configure_domain_credits_v2(hw, domain);
365         else
366                 dlb2_configure_domain_credits_v2_5(hw, domain);
367 }
368
369 static int dlb2_attach_credits(struct dlb2_function_resources *rsrcs,
370                                struct dlb2_hw_domain *domain,
371                                u32 num_credits,
372                                struct dlb2_cmd_response *resp)
373 {
374         if (rsrcs->num_avail_entries < num_credits) {
375                 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
376                 return -EINVAL;
377         }
378
379         rsrcs->num_avail_entries -= num_credits;
380         domain->num_credits += num_credits;
381         return 0;
382 }
383
384 static struct dlb2_ldb_port *
385 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
386                        struct dlb2_function_resources *rsrcs,
387                        u32 domain_id,
388                        u32 cos_id)
389 {
390         struct dlb2_list_entry *iter;
391         struct dlb2_ldb_port *port;
392         RTE_SET_USED(iter);
393
394         /*
395          * To reduce the odds of consecutive load-balanced ports mapping to the
396          * same queue(s), the driver attempts to allocate ports whose neighbors
397          * are owned by a different domain.
398          */
399         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
400                 u32 next, prev;
401                 u32 phys_id;
402
403                 phys_id = port->id.phys_id;
404                 next = phys_id + 1;
405                 prev = phys_id - 1;
406
407                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
408                         next = 0;
409                 if (phys_id == 0)
410                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
411
412                 if (!hw->rsrcs.ldb_ports[next].owned ||
413                     hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
414                         continue;
415
416                 if (!hw->rsrcs.ldb_ports[prev].owned ||
417                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
418                         continue;
419
420                 return port;
421         }
422
423         /*
424          * Failing that, the driver looks for a port with one neighbor owned by
425          * a different domain and the other unallocated.
426          */
427         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
428                 u32 next, prev;
429                 u32 phys_id;
430
431                 phys_id = port->id.phys_id;
432                 next = phys_id + 1;
433                 prev = phys_id - 1;
434
435                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
436                         next = 0;
437                 if (phys_id == 0)
438                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
439
440                 if (!hw->rsrcs.ldb_ports[prev].owned &&
441                     hw->rsrcs.ldb_ports[next].owned &&
442                     hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
443                         return port;
444
445                 if (!hw->rsrcs.ldb_ports[next].owned &&
446                     hw->rsrcs.ldb_ports[prev].owned &&
447                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
448                         return port;
449         }
450
451         /*
452          * Failing that, the driver looks for a port with both neighbors
453          * unallocated.
454          */
455         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
456                 u32 next, prev;
457                 u32 phys_id;
458
459                 phys_id = port->id.phys_id;
460                 next = phys_id + 1;
461                 prev = phys_id - 1;
462
463                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
464                         next = 0;
465                 if (phys_id == 0)
466                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
467
468                 if (!hw->rsrcs.ldb_ports[prev].owned &&
469                     !hw->rsrcs.ldb_ports[next].owned)
470                         return port;
471         }
472
473         /* If all else fails, the driver returns the next available port. */
474         return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
475                                    typeof(*port));
476 }
477
478 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
479                                    struct dlb2_function_resources *rsrcs,
480                                    struct dlb2_hw_domain *domain,
481                                    u32 num_ports,
482                                    u32 cos_id,
483                                    struct dlb2_cmd_response *resp)
484 {
485         unsigned int i;
486
487         if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
488                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
489                 return -EINVAL;
490         }
491
492         for (i = 0; i < num_ports; i++) {
493                 struct dlb2_ldb_port *port;
494
495                 port = dlb2_get_next_ldb_port(hw, rsrcs,
496                                               domain->id.phys_id, cos_id);
497                 if (port == NULL) {
498                         DLB2_HW_ERR(hw,
499                                     "[%s()] Internal error: domain validation failed\n",
500                                     __func__);
501                         return -EFAULT;
502                 }
503
504                 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
505                               &port->func_list);
506
507                 port->domain_id = domain->id;
508                 port->owned = true;
509
510                 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
511                               &port->domain_list);
512         }
513
514         rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
515
516         return 0;
517 }
518
519
520 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
521                                  struct dlb2_function_resources *rsrcs,
522                                  struct dlb2_hw_domain *domain,
523                                  struct dlb2_create_sched_domain_args *args,
524                                  struct dlb2_cmd_response *resp)
525 {
526         unsigned int i, j;
527         int ret;
528
529         if (args->cos_strict) {
530                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
531                         u32 num = args->num_cos_ldb_ports[i];
532
533                         /* Allocate ports from specific classes-of-service */
534                         ret = __dlb2_attach_ldb_ports(hw,
535                                                       rsrcs,
536                                                       domain,
537                                                       num,
538                                                       i,
539                                                       resp);
540                         if (ret)
541                                 return ret;
542                 }
543         } else {
544                 unsigned int k;
545                 u32 cos_id;
546
547                 /*
548                  * Attempt to allocate from specific class-of-service, but
549                  * fallback to the other classes if that fails.
550                  */
551                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
552                         for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
553                                 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
554                                         cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
555
556                                         ret = __dlb2_attach_ldb_ports(hw,
557                                                                       rsrcs,
558                                                                       domain,
559                                                                       1,
560                                                                       cos_id,
561                                                                       resp);
562                                         if (ret == 0)
563                                                 break;
564                                 }
565
566                                 if (ret)
567                                         return ret;
568                         }
569                 }
570         }
571
572         /* Allocate num_ldb_ports from any class-of-service */
573         for (i = 0; i < args->num_ldb_ports; i++) {
574                 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
575                         ret = __dlb2_attach_ldb_ports(hw,
576                                                       rsrcs,
577                                                       domain,
578                                                       1,
579                                                       j,
580                                                       resp);
581                         if (ret == 0)
582                                 break;
583                 }
584
585                 if (ret)
586                         return ret;
587         }
588
589         return 0;
590 }
591
592 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
593                                  struct dlb2_function_resources *rsrcs,
594                                  struct dlb2_hw_domain *domain,
595                                  u32 num_ports,
596                                  struct dlb2_cmd_response *resp)
597 {
598         unsigned int i;
599
600         if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
601                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
602                 return -EINVAL;
603         }
604
605         for (i = 0; i < num_ports; i++) {
606                 struct dlb2_dir_pq_pair *port;
607
608                 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
609                                            typeof(*port));
610                 if (port == NULL) {
611                         DLB2_HW_ERR(hw,
612                                     "[%s()] Internal error: domain validation failed\n",
613                                     __func__);
614                         return -EFAULT;
615                 }
616
617                 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
618
619                 port->domain_id = domain->id;
620                 port->owned = true;
621
622                 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
623         }
624
625         rsrcs->num_avail_dir_pq_pairs -= num_ports;
626
627         return 0;
628 }
629
630 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
631                                    struct dlb2_hw_domain *domain,
632                                    u32 num_credits,
633                                    struct dlb2_cmd_response *resp)
634 {
635         if (rsrcs->num_avail_qed_entries < num_credits) {
636                 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
637                 return -EINVAL;
638         }
639
640         rsrcs->num_avail_qed_entries -= num_credits;
641         domain->num_ldb_credits += num_credits;
642         return 0;
643 }
644
645 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
646                                    struct dlb2_hw_domain *domain,
647                                    u32 num_credits,
648                                    struct dlb2_cmd_response *resp)
649 {
650         if (rsrcs->num_avail_dqed_entries < num_credits) {
651                 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
652                 return -EINVAL;
653         }
654
655         rsrcs->num_avail_dqed_entries -= num_credits;
656         domain->num_dir_credits += num_credits;
657         return 0;
658 }
659
660
661 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
662                                         struct dlb2_hw_domain *domain,
663                                         u32 num_atomic_inflights,
664                                         struct dlb2_cmd_response *resp)
665 {
666         if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
667                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
668                 return -EINVAL;
669         }
670
671         rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
672         domain->num_avail_aqed_entries += num_atomic_inflights;
673         return 0;
674 }
675
676 static int
677 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
678                                      struct dlb2_hw_domain *domain,
679                                      u32 num_hist_list_entries,
680                                      struct dlb2_cmd_response *resp)
681 {
682         struct dlb2_bitmap *bitmap;
683         int base;
684
685         if (num_hist_list_entries) {
686                 bitmap = rsrcs->avail_hist_list_entries;
687
688                 base = dlb2_bitmap_find_set_bit_range(bitmap,
689                                                       num_hist_list_entries);
690                 if (base < 0)
691                         goto error;
692
693                 domain->total_hist_list_entries = num_hist_list_entries;
694                 domain->avail_hist_list_entries = num_hist_list_entries;
695                 domain->hist_list_entry_base = base;
696                 domain->hist_list_entry_offset = 0;
697
698                 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
699         }
700         return 0;
701
702 error:
703         resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
704         return -EINVAL;
705 }
706
707 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
708                                   struct dlb2_function_resources *rsrcs,
709                                   struct dlb2_hw_domain *domain,
710                                   u32 num_queues,
711                                   struct dlb2_cmd_response *resp)
712 {
713         unsigned int i;
714
715         if (rsrcs->num_avail_ldb_queues < num_queues) {
716                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
717                 return -EINVAL;
718         }
719
720         for (i = 0; i < num_queues; i++) {
721                 struct dlb2_ldb_queue *queue;
722
723                 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
724                                             typeof(*queue));
725                 if (queue == NULL) {
726                         DLB2_HW_ERR(hw,
727                                     "[%s()] Internal error: domain validation failed\n",
728                                     __func__);
729                         return -EFAULT;
730                 }
731
732                 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
733
734                 queue->domain_id = domain->id;
735                 queue->owned = true;
736
737                 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
738         }
739
740         rsrcs->num_avail_ldb_queues -= num_queues;
741
742         return 0;
743 }
744
745 static int
746 dlb2_domain_attach_resources(struct dlb2_hw *hw,
747                              struct dlb2_function_resources *rsrcs,
748                              struct dlb2_hw_domain *domain,
749                              struct dlb2_create_sched_domain_args *args,
750                              struct dlb2_cmd_response *resp)
751 {
752         int ret;
753
754         ret = dlb2_attach_ldb_queues(hw,
755                                      rsrcs,
756                                      domain,
757                                      args->num_ldb_queues,
758                                      resp);
759         if (ret)
760                 return ret;
761
762         ret = dlb2_attach_ldb_ports(hw,
763                                     rsrcs,
764                                     domain,
765                                     args,
766                                     resp);
767         if (ret)
768                 return ret;
769
770         ret = dlb2_attach_dir_ports(hw,
771                                     rsrcs,
772                                     domain,
773                                     args->num_dir_ports,
774                                     resp);
775         if (ret)
776                 return ret;
777
778         if (hw->ver == DLB2_HW_V2) {
779                 ret = dlb2_attach_ldb_credits(rsrcs,
780                                               domain,
781                                               args->num_ldb_credits,
782                                               resp);
783                 if (ret)
784                         return ret;
785
786                 ret = dlb2_attach_dir_credits(rsrcs,
787                                               domain,
788                                               args->num_dir_credits,
789                                               resp);
790                 if (ret)
791                         return ret;
792         } else {  /* DLB 2.5 */
793                 ret = dlb2_attach_credits(rsrcs,
794                                           domain,
795                                           args->num_credits,
796                                           resp);
797                 if (ret)
798                         return ret;
799         }
800
801         ret = dlb2_attach_domain_hist_list_entries(rsrcs,
802                                                    domain,
803                                                    args->num_hist_list_entries,
804                                                    resp);
805         if (ret)
806                 return ret;
807
808         ret = dlb2_attach_atomic_inflights(rsrcs,
809                                            domain,
810                                            args->num_atomic_inflights,
811                                            resp);
812         if (ret)
813                 return ret;
814
815         dlb2_configure_domain_credits(hw, domain);
816
817         domain->configured = true;
818
819         domain->started = false;
820
821         rsrcs->num_avail_domains--;
822
823         return 0;
824 }
825
826 static int
827 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
828                                   struct dlb2_create_sched_domain_args *args,
829                                   struct dlb2_cmd_response *resp,
830                                   struct dlb2_hw *hw,
831                                   struct dlb2_hw_domain **out_domain)
832 {
833         u32 num_avail_ldb_ports, req_ldb_ports;
834         struct dlb2_bitmap *avail_hl_entries;
835         unsigned int max_contig_hl_range;
836         struct dlb2_hw_domain *domain;
837         int i;
838
839         avail_hl_entries = rsrcs->avail_hist_list_entries;
840
841         max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
842
843         num_avail_ldb_ports = 0;
844         req_ldb_ports = 0;
845         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
846                 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
847
848                 req_ldb_ports += args->num_cos_ldb_ports[i];
849         }
850
851         req_ldb_ports += args->num_ldb_ports;
852
853         if (rsrcs->num_avail_domains < 1) {
854                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
855                 return -EINVAL;
856         }
857
858         domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
859         if (domain == NULL) {
860                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
861                 return -EFAULT;
862         }
863
864         if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
865                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
866                 return -EINVAL;
867         }
868
869         if (req_ldb_ports > num_avail_ldb_ports) {
870                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
871                 return -EINVAL;
872         }
873
874         for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
875                 if (args->num_cos_ldb_ports[i] >
876                     rsrcs->num_avail_ldb_ports[i]) {
877                         resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
878                         return -EINVAL;
879                 }
880         }
881
882         if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
883                 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
884                 return -EINVAL;
885         }
886
887         if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
888                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
889                 return -EINVAL;
890         }
891         if (hw->ver == DLB2_HW_V2_5) {
892                 if (rsrcs->num_avail_entries < args->num_credits) {
893                         resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
894                         return -EINVAL;
895                 }
896         } else {
897                 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
898                         resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
899                         return -EINVAL;
900                 }
901                 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
902                         resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
903                         return -EINVAL;
904                 }
905         }
906
907         if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
908                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
909                 return -EINVAL;
910         }
911
912         if (max_contig_hl_range < args->num_hist_list_entries) {
913                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
914                 return -EINVAL;
915         }
916
917         *out_domain = domain;
918
919         return 0;
920 }
921
922 static void
923 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
924                                   struct dlb2_create_sched_domain_args *args,
925                                   bool vdev_req,
926                                   unsigned int vdev_id)
927 {
928         DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
929         if (vdev_req)
930                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
931         DLB2_HW_DBG(hw, "\tNumber of LDB queues:          %d\n",
932                     args->num_ldb_queues);
933         DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
934                     args->num_ldb_ports);
935         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0):   %d\n",
936                     args->num_cos_ldb_ports[0]);
937         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1):   %d\n",
938                     args->num_cos_ldb_ports[1]);
939         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2):   %d\n",
940                     args->num_cos_ldb_ports[2]);
941         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3):   %d\n",
942                     args->num_cos_ldb_ports[3]);
943         DLB2_HW_DBG(hw, "\tStrict CoS allocation:         %d\n",
944                     args->cos_strict);
945         DLB2_HW_DBG(hw, "\tNumber of DIR ports:           %d\n",
946                     args->num_dir_ports);
947         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:       %d\n",
948                     args->num_atomic_inflights);
949         DLB2_HW_DBG(hw, "\tNumber of hist list entries:   %d\n",
950                     args->num_hist_list_entries);
951         if (hw->ver == DLB2_HW_V2) {
952                 DLB2_HW_DBG(hw, "\tNumber of LDB credits:         %d\n",
953                             args->num_ldb_credits);
954                 DLB2_HW_DBG(hw, "\tNumber of DIR credits:         %d\n",
955                             args->num_dir_credits);
956         } else {
957                 DLB2_HW_DBG(hw, "\tNumber of credits:         %d\n",
958                             args->num_credits);
959         }
960 }
961
962 /**
963  * dlb2_hw_create_sched_domain() - create a scheduling domain
964  * @hw: dlb2_hw handle for a particular device.
965  * @args: scheduling domain creation arguments.
966  * @resp: response structure.
967  * @vdev_req: indicates whether this request came from a vdev.
968  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
969  *
970  * This function creates a scheduling domain containing the resources specified
971  * in args. The individual resources (queues, ports, credits) can be configured
972  * after creating a scheduling domain.
973  *
974  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
975  * device.
976  *
977  * Return:
978  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
979  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
980  * contains the domain ID.
981  *
982  * resp->id contains a virtual ID if vdev_req is true.
983  *
984  * Errors:
985  * EINVAL - A requested resource is unavailable, or the requested domain name
986  *          is already in use.
987  * EFAULT - Internal error (resp->status not set).
988  */
989 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
990                                 struct dlb2_create_sched_domain_args *args,
991                                 struct dlb2_cmd_response *resp,
992                                 bool vdev_req,
993                                 unsigned int vdev_id)
994 {
995         struct dlb2_function_resources *rsrcs;
996         struct dlb2_hw_domain *domain;
997         int ret;
998
999         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1000
1001         dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
1002
1003         /*
1004          * Verify that hardware resources are available before attempting to
1005          * satisfy the request. This simplifies the error unwinding code.
1006          */
1007         ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp, hw, &domain);
1008         if (ret)
1009                 return ret;
1010
1011         dlb2_init_domain_rsrc_lists(domain);
1012
1013         ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
1014         if (ret) {
1015                 DLB2_HW_ERR(hw,
1016                             "[%s()] Internal error: failed to verify args.\n",
1017                             __func__);
1018
1019                 return ret;
1020         }
1021
1022         dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
1023
1024         dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
1025
1026         resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
1027         resp->status = 0;
1028
1029         return 0;
1030 }
1031
1032 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
1033                                      struct dlb2_dir_pq_pair *port)
1034 {
1035         u32 reg = 0;
1036
1037         DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
1038         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1039
1040         dlb2_flush_csr(hw);
1041 }
1042
1043 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
1044                                    struct dlb2_dir_pq_pair *port)
1045 {
1046         u32 cnt;
1047
1048         cnt = DLB2_CSR_RD(hw,
1049                           DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
1050
1051         /*
1052          * Account for the initial token count, which is used in order to
1053          * provide a CQ with depth less than 8.
1054          */
1055
1056         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
1057                port->init_tkn_cnt;
1058 }
1059
1060 static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
1061                               struct dlb2_dir_pq_pair *port)
1062 {
1063         unsigned int port_id = port->id.phys_id;
1064         u32 cnt;
1065
1066         /* Return any outstanding tokens */
1067         cnt = dlb2_dir_cq_token_count(hw, port);
1068
1069         if (cnt != 0) {
1070                 struct dlb2_hcw hcw_mem[8], *hcw;
1071                 void __iomem *pp_addr;
1072
1073                 pp_addr = os_map_producer_port(hw, port_id, false);
1074
1075                 /* Point hcw to a 64B-aligned location */
1076                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1077
1078                 /*
1079                  * Program the first HCW for a batch token return and
1080                  * the rest as NOOPS
1081                  */
1082                 memset(hcw, 0, 4 * sizeof(*hcw));
1083                 hcw->cq_token = 1;
1084                 hcw->lock_id = cnt - 1;
1085
1086                 dlb2_movdir64b(pp_addr, hcw);
1087
1088                 os_fence_hcw(hw, pp_addr);
1089
1090                 os_unmap_producer_port(hw, pp_addr);
1091         }
1092
1093         return cnt;
1094 }
1095
1096 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1097                                     struct dlb2_dir_pq_pair *port)
1098 {
1099         u32 reg = 0;
1100
1101         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1102
1103         dlb2_flush_csr(hw);
1104 }
1105
1106 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1107                                      struct dlb2_hw_domain *domain,
1108                                      bool toggle_port)
1109 {
1110         struct dlb2_list_entry *iter;
1111         struct dlb2_dir_pq_pair *port;
1112         int drain_cnt = 0;
1113         RTE_SET_USED(iter);
1114
1115         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1116                 /*
1117                  * Can't drain a port if it's not configured, and there's
1118                  * nothing to drain if its queue is unconfigured.
1119                  */
1120                 if (!port->port_configured || !port->queue_configured)
1121                         continue;
1122
1123                 if (toggle_port)
1124                         dlb2_dir_port_cq_disable(hw, port);
1125
1126                 drain_cnt = dlb2_drain_dir_cq(hw, port);
1127
1128                 if (toggle_port)
1129                         dlb2_dir_port_cq_enable(hw, port);
1130         }
1131
1132         return drain_cnt;
1133 }
1134
1135 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1136                                 struct dlb2_dir_pq_pair *queue)
1137 {
1138         u32 cnt;
1139
1140         cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
1141                                                       queue->id.phys_id));
1142
1143         return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
1144 }
1145
1146 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1147                                     struct dlb2_dir_pq_pair *queue)
1148 {
1149         return dlb2_dir_queue_depth(hw, queue) == 0;
1150 }
1151
1152 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1153                                          struct dlb2_hw_domain *domain)
1154 {
1155         struct dlb2_list_entry *iter;
1156         struct dlb2_dir_pq_pair *queue;
1157         RTE_SET_USED(iter);
1158
1159         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1160                 if (!dlb2_dir_queue_is_empty(hw, queue))
1161                         return false;
1162         }
1163
1164         return true;
1165 }
1166 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1167                                         struct dlb2_hw_domain *domain)
1168 {
1169         int i;
1170
1171         /* If the domain hasn't been started, there's no traffic to drain */
1172         if (!domain->started)
1173                 return 0;
1174
1175         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1176                 int drain_cnt;
1177
1178                 drain_cnt = dlb2_domain_drain_dir_cqs(hw, domain, false);
1179
1180                 if (dlb2_domain_dir_queues_empty(hw, domain))
1181                         break;
1182
1183                 /*
1184                  * Allow time for DLB to schedule QEs before draining
1185                  * the CQs again.
1186                  */
1187                 if (!drain_cnt)
1188                         rte_delay_us(1);
1189
1190         }
1191
1192         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1193                 DLB2_HW_ERR(hw,
1194                             "[%s()] Internal error: failed to empty queues\n",
1195                             __func__);
1196                 return -EFAULT;
1197         }
1198
1199         /*
1200          * Drain the CQs one more time. For the queues to go empty, they would
1201          * have scheduled one or more QEs.
1202          */
1203         dlb2_domain_drain_dir_cqs(hw, domain, true);
1204
1205         return 0;
1206 }
1207
1208 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1209                                     struct dlb2_ldb_port *port)
1210 {
1211         u32 reg = 0;
1212
1213         /*
1214          * Don't re-enable the port if a removal is pending. The caller should
1215          * mark this port as enabled (if it isn't already), and when the
1216          * removal completes the port will be enabled.
1217          */
1218         if (port->num_pending_removals)
1219                 return;
1220
1221         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1222
1223         dlb2_flush_csr(hw);
1224 }
1225
1226 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1227                                      struct dlb2_ldb_port *port)
1228 {
1229         u32 reg = 0;
1230
1231         DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
1232         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1233
1234         dlb2_flush_csr(hw);
1235 }
1236
1237 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1238                                       struct dlb2_ldb_port *port)
1239 {
1240         u32 cnt;
1241
1242         cnt = DLB2_CSR_RD(hw,
1243                           DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
1244
1245         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
1246 }
1247
1248 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1249                                    struct dlb2_ldb_port *port)
1250 {
1251         u32 cnt;
1252
1253         cnt = DLB2_CSR_RD(hw,
1254                           DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
1255
1256         /*
1257          * Account for the initial token count, which is used in order to
1258          * provide a CQ with depth less than 8.
1259          */
1260
1261         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
1262                 port->init_tkn_cnt;
1263 }
1264
1265 static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1266 {
1267         u32 infl_cnt, tkn_cnt;
1268         unsigned int i;
1269
1270         infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1271         tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1272
1273         if (infl_cnt || tkn_cnt) {
1274                 struct dlb2_hcw hcw_mem[8], *hcw;
1275                 void __iomem *pp_addr;
1276
1277                 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1278
1279                 /* Point hcw to a 64B-aligned location */
1280                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1281
1282                 /*
1283                  * Program the first HCW for a completion and token return and
1284                  * the other HCWs as NOOPS
1285                  */
1286
1287                 memset(hcw, 0, 4 * sizeof(*hcw));
1288                 hcw->qe_comp = (infl_cnt > 0);
1289                 hcw->cq_token = (tkn_cnt > 0);
1290                 hcw->lock_id = tkn_cnt - 1;
1291
1292                 /* Return tokens in the first HCW */
1293                 dlb2_movdir64b(pp_addr, hcw);
1294
1295                 hcw->cq_token = 0;
1296
1297                 /* Issue remaining completions (if any) */
1298                 for (i = 1; i < infl_cnt; i++)
1299                         dlb2_movdir64b(pp_addr, hcw);
1300
1301                 os_fence_hcw(hw, pp_addr);
1302
1303                 os_unmap_producer_port(hw, pp_addr);
1304         }
1305
1306         return tkn_cnt;
1307 }
1308
1309 static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1310                                       struct dlb2_hw_domain *domain,
1311                                       bool toggle_port)
1312 {
1313         struct dlb2_list_entry *iter;
1314         struct dlb2_ldb_port *port;
1315         int drain_cnt = 0;
1316         int i;
1317         RTE_SET_USED(iter);
1318
1319         /* If the domain hasn't been started, there's no traffic to drain */
1320         if (!domain->started)
1321                 return 0;
1322
1323         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1324                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1325                         if (toggle_port)
1326                                 dlb2_ldb_port_cq_disable(hw, port);
1327
1328                         drain_cnt = dlb2_drain_ldb_cq(hw, port);
1329
1330                         if (toggle_port)
1331                                 dlb2_ldb_port_cq_enable(hw, port);
1332                 }
1333         }
1334
1335         return drain_cnt;
1336 }
1337
1338 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1339                                 struct dlb2_ldb_queue *queue)
1340 {
1341         u32 aqed, ldb, atm;
1342
1343         aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1344                                                        queue->id.phys_id));
1345         ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1346                                                       queue->id.phys_id));
1347         atm = DLB2_CSR_RD(hw,
1348                           DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
1349
1350         return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
1351                + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
1352                + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
1353 }
1354
1355 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1356                                     struct dlb2_ldb_queue *queue)
1357 {
1358         return dlb2_ldb_queue_depth(hw, queue) == 0;
1359 }
1360
1361 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1362                                             struct dlb2_hw_domain *domain)
1363 {
1364         struct dlb2_list_entry *iter;
1365         struct dlb2_ldb_queue *queue;
1366         RTE_SET_USED(iter);
1367
1368         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1369                 if (queue->num_mappings == 0)
1370                         continue;
1371
1372                 if (!dlb2_ldb_queue_is_empty(hw, queue))
1373                         return false;
1374         }
1375
1376         return true;
1377 }
1378
1379 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1380                                            struct dlb2_hw_domain *domain)
1381 {
1382         int i;
1383
1384         /* If the domain hasn't been started, there's no traffic to drain */
1385         if (!domain->started)
1386                 return 0;
1387
1388         if (domain->num_pending_removals > 0) {
1389                 DLB2_HW_ERR(hw,
1390                             "[%s()] Internal error: failed to unmap domain queues\n",
1391                             __func__);
1392                 return -EFAULT;
1393         }
1394
1395         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1396                 int drain_cnt;
1397
1398                 drain_cnt = dlb2_domain_drain_ldb_cqs(hw, domain, false);
1399
1400                 if (dlb2_domain_mapped_queues_empty(hw, domain))
1401                         break;
1402
1403                 /*
1404                  * Allow time for DLB to schedule QEs before draining
1405                  * the CQs again.
1406                  */
1407                 if (!drain_cnt)
1408                         rte_delay_us(1);
1409         }
1410
1411         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1412                 DLB2_HW_ERR(hw,
1413                             "[%s()] Internal error: failed to empty queues\n",
1414                             __func__);
1415                 return -EFAULT;
1416         }
1417
1418         /*
1419          * Drain the CQs one more time. For the queues to go empty, they would
1420          * have scheduled one or more QEs.
1421          */
1422         dlb2_domain_drain_ldb_cqs(hw, domain, true);
1423
1424         return 0;
1425 }
1426
1427 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1428                                        struct dlb2_hw_domain *domain)
1429 {
1430         struct dlb2_list_entry *iter;
1431         struct dlb2_ldb_port *port;
1432         int i;
1433         RTE_SET_USED(iter);
1434
1435         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1436                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1437                         port->enabled = true;
1438
1439                         dlb2_ldb_port_cq_enable(hw, port);
1440                 }
1441         }
1442 }
1443
1444 static struct dlb2_ldb_queue *
1445 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1446                            u32 id,
1447                            bool vdev_req,
1448                            unsigned int vdev_id)
1449 {
1450         struct dlb2_list_entry *iter1;
1451         struct dlb2_list_entry *iter2;
1452         struct dlb2_function_resources *rsrcs;
1453         struct dlb2_hw_domain *domain;
1454         struct dlb2_ldb_queue *queue;
1455         RTE_SET_USED(iter1);
1456         RTE_SET_USED(iter2);
1457
1458         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1459                 return NULL;
1460
1461         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1462
1463         if (!vdev_req)
1464                 return &hw->rsrcs.ldb_queues[id];
1465
1466         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1467                 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
1468                         if (queue->id.virt_id == id)
1469                                 return queue;
1470                 }
1471         }
1472
1473         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
1474                 if (queue->id.virt_id == id)
1475                         return queue;
1476         }
1477
1478         return NULL;
1479 }
1480
1481 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1482                                                       u32 id,
1483                                                       bool vdev_req,
1484                                                       unsigned int vdev_id)
1485 {
1486         struct dlb2_list_entry *iteration;
1487         struct dlb2_function_resources *rsrcs;
1488         struct dlb2_hw_domain *domain;
1489         RTE_SET_USED(iteration);
1490
1491         if (id >= DLB2_MAX_NUM_DOMAINS)
1492                 return NULL;
1493
1494         if (!vdev_req)
1495                 return &hw->domains[id];
1496
1497         rsrcs = &hw->vdev[vdev_id];
1498
1499         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
1500                 if (domain->id.virt_id == id)
1501                         return domain;
1502         }
1503
1504         return NULL;
1505 }
1506
1507 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1508                                            struct dlb2_ldb_port *port,
1509                                            struct dlb2_ldb_queue *queue,
1510                                            int slot,
1511                                            enum dlb2_qid_map_state new_state)
1512 {
1513         enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1514         struct dlb2_hw_domain *domain;
1515         int domain_id;
1516
1517         domain_id = port->domain_id.phys_id;
1518
1519         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1520         if (domain == NULL) {
1521                 DLB2_HW_ERR(hw,
1522                             "[%s()] Internal error: unable to find domain %d\n",
1523                             __func__, domain_id);
1524                 return -EINVAL;
1525         }
1526
1527         switch (curr_state) {
1528         case DLB2_QUEUE_UNMAPPED:
1529                 switch (new_state) {
1530                 case DLB2_QUEUE_MAPPED:
1531                         queue->num_mappings++;
1532                         port->num_mappings++;
1533                         break;
1534                 case DLB2_QUEUE_MAP_IN_PROG:
1535                         queue->num_pending_additions++;
1536                         domain->num_pending_additions++;
1537                         break;
1538                 default:
1539                         goto error;
1540                 }
1541                 break;
1542         case DLB2_QUEUE_MAPPED:
1543                 switch (new_state) {
1544                 case DLB2_QUEUE_UNMAPPED:
1545                         queue->num_mappings--;
1546                         port->num_mappings--;
1547                         break;
1548                 case DLB2_QUEUE_UNMAP_IN_PROG:
1549                         port->num_pending_removals++;
1550                         domain->num_pending_removals++;
1551                         break;
1552                 case DLB2_QUEUE_MAPPED:
1553                         /* Priority change, nothing to update */
1554                         break;
1555                 default:
1556                         goto error;
1557                 }
1558                 break;
1559         case DLB2_QUEUE_MAP_IN_PROG:
1560                 switch (new_state) {
1561                 case DLB2_QUEUE_UNMAPPED:
1562                         queue->num_pending_additions--;
1563                         domain->num_pending_additions--;
1564                         break;
1565                 case DLB2_QUEUE_MAPPED:
1566                         queue->num_mappings++;
1567                         port->num_mappings++;
1568                         queue->num_pending_additions--;
1569                         domain->num_pending_additions--;
1570                         break;
1571                 default:
1572                         goto error;
1573                 }
1574                 break;
1575         case DLB2_QUEUE_UNMAP_IN_PROG:
1576                 switch (new_state) {
1577                 case DLB2_QUEUE_UNMAPPED:
1578                         port->num_pending_removals--;
1579                         domain->num_pending_removals--;
1580                         queue->num_mappings--;
1581                         port->num_mappings--;
1582                         break;
1583                 case DLB2_QUEUE_MAPPED:
1584                         port->num_pending_removals--;
1585                         domain->num_pending_removals--;
1586                         break;
1587                 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1588                         /* Nothing to update */
1589                         break;
1590                 default:
1591                         goto error;
1592                 }
1593                 break;
1594         case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1595                 switch (new_state) {
1596                 case DLB2_QUEUE_UNMAP_IN_PROG:
1597                         /* Nothing to update */
1598                         break;
1599                 case DLB2_QUEUE_UNMAPPED:
1600                         /*
1601                          * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1602                          * becomes UNMAPPED before it transitions to
1603                          * MAP_IN_PROG.
1604                          */
1605                         queue->num_mappings--;
1606                         port->num_mappings--;
1607                         port->num_pending_removals--;
1608                         domain->num_pending_removals--;
1609                         break;
1610                 default:
1611                         goto error;
1612                 }
1613                 break;
1614         default:
1615                 goto error;
1616         }
1617
1618         port->qid_map[slot].state = new_state;
1619
1620         DLB2_HW_DBG(hw,
1621                     "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1622                     __func__, queue->id.phys_id, port->id.phys_id,
1623                     curr_state, new_state);
1624         return 0;
1625
1626 error:
1627         DLB2_HW_ERR(hw,
1628                     "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1629                     __func__, queue->id.phys_id, port->id.phys_id,
1630                     curr_state, new_state);
1631         return -EFAULT;
1632 }
1633
1634 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1635                                 enum dlb2_qid_map_state state,
1636                                 int *slot)
1637 {
1638         int i;
1639
1640         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1641                 if (port->qid_map[i].state == state)
1642                         break;
1643         }
1644
1645         *slot = i;
1646
1647         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1648 }
1649
1650 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1651                                       enum dlb2_qid_map_state state,
1652                                       struct dlb2_ldb_queue *queue,
1653                                       int *slot)
1654 {
1655         int i;
1656
1657         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1658                 if (port->qid_map[i].state == state &&
1659                     port->qid_map[i].qid == queue->id.phys_id)
1660                         break;
1661         }
1662
1663         *slot = i;
1664
1665         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1666 }
1667
1668 /*
1669  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1670  * their function names imply, and should only be called by the dynamic CQ
1671  * mapping code.
1672  */
1673 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1674                                               struct dlb2_hw_domain *domain,
1675                                               struct dlb2_ldb_queue *queue)
1676 {
1677         struct dlb2_list_entry *iter;
1678         struct dlb2_ldb_port *port;
1679         int slot, i;
1680         RTE_SET_USED(iter);
1681
1682         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1683                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1684                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1685
1686                         if (!dlb2_port_find_slot_queue(port, state,
1687                                                        queue, &slot))
1688                                 continue;
1689
1690                         if (port->enabled)
1691                                 dlb2_ldb_port_cq_disable(hw, port);
1692                 }
1693         }
1694 }
1695
1696 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1697                                              struct dlb2_hw_domain *domain,
1698                                              struct dlb2_ldb_queue *queue)
1699 {
1700         struct dlb2_list_entry *iter;
1701         struct dlb2_ldb_port *port;
1702         int slot, i;
1703         RTE_SET_USED(iter);
1704
1705         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1706                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1707                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1708
1709                         if (!dlb2_port_find_slot_queue(port, state,
1710                                                        queue, &slot))
1711                                 continue;
1712
1713                         if (port->enabled)
1714                                 dlb2_ldb_port_cq_enable(hw, port);
1715                 }
1716         }
1717 }
1718
1719 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1720                                                 struct dlb2_ldb_port *port,
1721                                                 int slot)
1722 {
1723         u32 ctrl = 0;
1724
1725         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1726         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1727         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1728
1729         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1730
1731         dlb2_flush_csr(hw);
1732 }
1733
1734 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1735                                               struct dlb2_ldb_port *port,
1736                                               int slot)
1737 {
1738         u32 ctrl = 0;
1739
1740         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1741         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1742         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1743         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1744
1745         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1746
1747         dlb2_flush_csr(hw);
1748 }
1749
1750 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1751                                         struct dlb2_ldb_port *p,
1752                                         struct dlb2_ldb_queue *q,
1753                                         u8 priority)
1754 {
1755         enum dlb2_qid_map_state state;
1756         u32 lsp_qid2cq2;
1757         u32 lsp_qid2cq;
1758         u32 atm_qid2cq;
1759         u32 cq2priov;
1760         u32 cq2qid;
1761         int i;
1762
1763         /* Look for a pending or already mapped slot, else an unused slot */
1764         if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1765             !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1766             !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1767                 DLB2_HW_ERR(hw,
1768                             "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1769                             __func__, __LINE__);
1770                 return -EFAULT;
1771         }
1772
1773         /* Read-modify-write the priority and valid bit register */
1774         cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
1775
1776         cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
1777         cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
1778                     & DLB2_LSP_CQ2PRIOV_PRIO;
1779
1780         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
1781
1782         /* Read-modify-write the QID map register */
1783         if (i < 4)
1784                 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
1785                                                           p->id.phys_id));
1786         else
1787                 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
1788                                                           p->id.phys_id));
1789
1790         if (i == 0 || i == 4)
1791                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
1792         if (i == 1 || i == 5)
1793                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
1794         if (i == 2 || i == 6)
1795                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
1796         if (i == 3 || i == 7)
1797                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
1798
1799         if (i < 4)
1800                 DLB2_CSR_WR(hw,
1801                             DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
1802         else
1803                 DLB2_CSR_WR(hw,
1804                             DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
1805
1806         atm_qid2cq = DLB2_CSR_RD(hw,
1807                                  DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1808                                                 p->id.phys_id / 4));
1809
1810         lsp_qid2cq = DLB2_CSR_RD(hw,
1811                                  DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
1812                                                 p->id.phys_id / 4));
1813
1814         lsp_qid2cq2 = DLB2_CSR_RD(hw,
1815                                   DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
1816                                                   p->id.phys_id / 4));
1817
1818         switch (p->id.phys_id % 4) {
1819         case 0:
1820                 DLB2_BIT_SET(atm_qid2cq,
1821                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
1822                 DLB2_BIT_SET(lsp_qid2cq,
1823                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
1824                 DLB2_BIT_SET(lsp_qid2cq2,
1825                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
1826                 break;
1827
1828         case 1:
1829                 DLB2_BIT_SET(atm_qid2cq,
1830                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
1831                 DLB2_BIT_SET(lsp_qid2cq,
1832                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
1833                 DLB2_BIT_SET(lsp_qid2cq2,
1834                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
1835                 break;
1836
1837         case 2:
1838                 DLB2_BIT_SET(atm_qid2cq,
1839                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
1840                 DLB2_BIT_SET(lsp_qid2cq,
1841                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
1842                 DLB2_BIT_SET(lsp_qid2cq2,
1843                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
1844                 break;
1845
1846         case 3:
1847                 DLB2_BIT_SET(atm_qid2cq,
1848                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
1849                 DLB2_BIT_SET(lsp_qid2cq,
1850                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
1851                 DLB2_BIT_SET(lsp_qid2cq2,
1852                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
1853                 break;
1854         }
1855
1856         DLB2_CSR_WR(hw,
1857                     DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1858                     atm_qid2cq);
1859
1860         DLB2_CSR_WR(hw,
1861                     DLB2_LSP_QID2CQIDIX(hw->ver,
1862                                         q->id.phys_id, p->id.phys_id / 4),
1863                     lsp_qid2cq);
1864
1865         DLB2_CSR_WR(hw,
1866                     DLB2_LSP_QID2CQIDIX2(hw->ver,
1867                                          q->id.phys_id, p->id.phys_id / 4),
1868                     lsp_qid2cq2);
1869
1870         dlb2_flush_csr(hw);
1871
1872         p->qid_map[i].qid = q->id.phys_id;
1873         p->qid_map[i].priority = priority;
1874
1875         state = DLB2_QUEUE_MAPPED;
1876
1877         return dlb2_port_slot_state_transition(hw, p, q, i, state);
1878 }
1879
1880 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1881                                            struct dlb2_ldb_port *port,
1882                                            struct dlb2_ldb_queue *queue,
1883                                            int slot)
1884 {
1885         u32 ctrl = 0;
1886         u32 active;
1887         u32 enq;
1888
1889         /* Set the atomic scheduling haswork bit */
1890         active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1891                                                          queue->id.phys_id));
1892
1893         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1894         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1895         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1896         DLB2_BITS_SET(ctrl,
1897                       DLB2_BITS_GET(active,
1898                                     DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
1899                                     DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1900
1901         /* Set the non-atomic scheduling haswork bit */
1902         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1903
1904         enq = DLB2_CSR_RD(hw,
1905                           DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1906                                                        queue->id.phys_id));
1907
1908         memset(&ctrl, 0, sizeof(ctrl));
1909
1910         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1911         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1912         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1913         DLB2_BITS_SET(ctrl,
1914                       DLB2_BITS_GET(enq,
1915                                     DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
1916                       DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1917
1918         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1919
1920         dlb2_flush_csr(hw);
1921
1922         return 0;
1923 }
1924
1925 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1926                                               struct dlb2_ldb_port *port,
1927                                               u8 slot)
1928 {
1929         u32 ctrl = 0;
1930
1931         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1932         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1933         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1934
1935         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1936
1937         memset(&ctrl, 0, sizeof(ctrl));
1938
1939         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1940         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1941         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1942
1943         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1944
1945         dlb2_flush_csr(hw);
1946 }
1947
1948
1949 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1950                                               struct dlb2_ldb_queue *queue)
1951 {
1952         u32 infl_lim = 0;
1953
1954         DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
1955                  DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
1956
1957         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1958                     infl_lim);
1959 }
1960
1961 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1962                                                 struct dlb2_ldb_queue *queue)
1963 {
1964         DLB2_CSR_WR(hw,
1965                     DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1966                     DLB2_LSP_QID_LDB_INFL_LIM_RST);
1967 }
1968
1969 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1970                                                 struct dlb2_hw_domain *domain,
1971                                                 struct dlb2_ldb_port *port,
1972                                                 struct dlb2_ldb_queue *queue)
1973 {
1974         struct dlb2_list_entry *iter;
1975         enum dlb2_qid_map_state state;
1976         int slot, ret, i;
1977         u32 infl_cnt;
1978         u8 prio;
1979         RTE_SET_USED(iter);
1980
1981         infl_cnt = DLB2_CSR_RD(hw,
1982                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
1983                                                     queue->id.phys_id));
1984
1985         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
1986                 DLB2_HW_ERR(hw,
1987                             "[%s()] Internal error: non-zero QID inflight count\n",
1988                             __func__);
1989                 return -EINVAL;
1990         }
1991
1992         /*
1993          * Static map the port and set its corresponding has_work bits.
1994          */
1995         state = DLB2_QUEUE_MAP_IN_PROG;
1996         if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1997                 return -EINVAL;
1998
1999         prio = port->qid_map[slot].priority;
2000
2001         /*
2002          * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
2003          * the port's qid_map state.
2004          */
2005         ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2006         if (ret)
2007                 return ret;
2008
2009         ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
2010         if (ret)
2011                 return ret;
2012
2013         /*
2014          * Ensure IF_status(cq,qid) is 0 before enabling the port to
2015          * prevent spurious schedules to cause the queue's inflight
2016          * count to increase.
2017          */
2018         dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
2019
2020         /* Reset the queue's inflight status */
2021         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2022                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2023                         state = DLB2_QUEUE_MAPPED;
2024                         if (!dlb2_port_find_slot_queue(port, state,
2025                                                        queue, &slot))
2026                                 continue;
2027
2028                         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2029                 }
2030         }
2031
2032         dlb2_ldb_queue_set_inflight_limit(hw, queue);
2033
2034         /* Re-enable CQs mapped to this queue */
2035         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2036
2037         /* If this queue has other mappings pending, clear its inflight limit */
2038         if (queue->num_pending_additions > 0)
2039                 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
2040
2041         return 0;
2042 }
2043
2044 /**
2045  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
2046  * @hw: dlb2_hw handle for a particular device.
2047  * @port: load-balanced port
2048  * @queue: load-balanced queue
2049  * @priority: queue servicing priority
2050  *
2051  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
2052  * at a later point, and <0 if an error occurred.
2053  */
2054 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
2055                                          struct dlb2_ldb_port *port,
2056                                          struct dlb2_ldb_queue *queue,
2057                                          u8 priority)
2058 {
2059         enum dlb2_qid_map_state state;
2060         struct dlb2_hw_domain *domain;
2061         int domain_id, slot, ret;
2062         u32 infl_cnt;
2063
2064         domain_id = port->domain_id.phys_id;
2065
2066         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
2067         if (domain == NULL) {
2068                 DLB2_HW_ERR(hw,
2069                             "[%s()] Internal error: unable to find domain %d\n",
2070                             __func__, port->domain_id.phys_id);
2071                 return -EINVAL;
2072         }
2073
2074         /*
2075          * Set the QID inflight limit to 0 to prevent further scheduling of the
2076          * queue.
2077          */
2078         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
2079                                                   queue->id.phys_id), 0);
2080
2081         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
2082                 DLB2_HW_ERR(hw,
2083                             "Internal error: No available unmapped slots\n");
2084                 return -EFAULT;
2085         }
2086
2087         port->qid_map[slot].qid = queue->id.phys_id;
2088         port->qid_map[slot].priority = priority;
2089
2090         state = DLB2_QUEUE_MAP_IN_PROG;
2091         ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
2092         if (ret)
2093                 return ret;
2094
2095         infl_cnt = DLB2_CSR_RD(hw,
2096                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2097                                                     queue->id.phys_id));
2098
2099         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2100                 /*
2101                  * The queue is owed completions so it's not safe to map it
2102                  * yet. Schedule a kernel thread to complete the mapping later,
2103                  * once software has completed all the queue's inflight events.
2104                  */
2105                 if (!os_worker_active(hw))
2106                         os_schedule_work(hw);
2107
2108                 return 1;
2109         }
2110
2111         /*
2112          * Disable the affected CQ, and the CQs already mapped to the QID,
2113          * before reading the QID's inflight count a second time. There is an
2114          * unlikely race in which the QID may schedule one more QE after we
2115          * read an inflight count of 0, and disabling the CQs guarantees that
2116          * the race will not occur after a re-read of the inflight count
2117          * register.
2118          */
2119         if (port->enabled)
2120                 dlb2_ldb_port_cq_disable(hw, port);
2121
2122         dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2123
2124         infl_cnt = DLB2_CSR_RD(hw,
2125                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2126                                                     queue->id.phys_id));
2127
2128         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2129                 if (port->enabled)
2130                         dlb2_ldb_port_cq_enable(hw, port);
2131
2132                 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2133
2134                 /*
2135                  * The queue is owed completions so it's not safe to map it
2136                  * yet. Schedule a kernel thread to complete the mapping later,
2137                  * once software has completed all the queue's inflight events.
2138                  */
2139                 if (!os_worker_active(hw))
2140                         os_schedule_work(hw);
2141
2142                 return 1;
2143         }
2144
2145         return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2146 }
2147
2148 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2149                                         struct dlb2_hw_domain *domain,
2150                                         struct dlb2_ldb_port *port)
2151 {
2152         int i;
2153
2154         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2155                 u32 infl_cnt;
2156                 struct dlb2_ldb_queue *queue;
2157                 int qid;
2158
2159                 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2160                         continue;
2161
2162                 qid = port->qid_map[i].qid;
2163
2164                 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2165
2166                 if (queue == NULL) {
2167                         DLB2_HW_ERR(hw,
2168                                     "[%s()] Internal error: unable to find queue %d\n",
2169                                     __func__, qid);
2170                         continue;
2171                 }
2172
2173                 infl_cnt = DLB2_CSR_RD(hw,
2174                                        DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2175
2176                 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
2177                         continue;
2178
2179                 /*
2180                  * Disable the affected CQ, and the CQs already mapped to the
2181                  * QID, before reading the QID's inflight count a second time.
2182                  * There is an unlikely race in which the QID may schedule one
2183                  * more QE after we read an inflight count of 0, and disabling
2184                  * the CQs guarantees that the race will not occur after a
2185                  * re-read of the inflight count register.
2186                  */
2187                 if (port->enabled)
2188                         dlb2_ldb_port_cq_disable(hw, port);
2189
2190                 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2191
2192                 infl_cnt = DLB2_CSR_RD(hw,
2193                                        DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2194
2195                 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2196                         if (port->enabled)
2197                                 dlb2_ldb_port_cq_enable(hw, port);
2198
2199                         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2200
2201                         continue;
2202                 }
2203
2204                 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2205         }
2206 }
2207
2208 static unsigned int
2209 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2210                                       struct dlb2_hw_domain *domain)
2211 {
2212         struct dlb2_list_entry *iter;
2213         struct dlb2_ldb_port *port;
2214         int i;
2215         RTE_SET_USED(iter);
2216
2217         if (!domain->configured || domain->num_pending_additions == 0)
2218                 return 0;
2219
2220         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2221                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2222                         dlb2_domain_finish_map_port(hw, domain, port);
2223         }
2224
2225         return domain->num_pending_additions;
2226 }
2227
2228 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2229                                    struct dlb2_ldb_port *port,
2230                                    struct dlb2_ldb_queue *queue)
2231 {
2232         enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2233         u32 lsp_qid2cq2;
2234         u32 lsp_qid2cq;
2235         u32 atm_qid2cq;
2236         u32 cq2priov;
2237         u32 queue_id;
2238         u32 port_id;
2239         int i;
2240
2241         /* Find the queue's slot */
2242         mapped = DLB2_QUEUE_MAPPED;
2243         in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2244         pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2245
2246         if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2247             !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2248             !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2249                 DLB2_HW_ERR(hw,
2250                             "[%s():%d] Internal error: QID %d isn't mapped\n",
2251                             __func__, __LINE__, queue->id.phys_id);
2252                 return -EFAULT;
2253         }
2254
2255         port_id = port->id.phys_id;
2256         queue_id = queue->id.phys_id;
2257
2258         /* Read-modify-write the priority and valid bit register */
2259         cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
2260
2261         cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
2262
2263         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
2264
2265         atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
2266                                                          port_id / 4));
2267
2268         lsp_qid2cq = DLB2_CSR_RD(hw,
2269                                  DLB2_LSP_QID2CQIDIX(hw->ver,
2270                                                 queue_id, port_id / 4));
2271
2272         lsp_qid2cq2 = DLB2_CSR_RD(hw,
2273                                   DLB2_LSP_QID2CQIDIX2(hw->ver,
2274                                                   queue_id, port_id / 4));
2275
2276         switch (port_id % 4) {
2277         case 0:
2278                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2279                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2280                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2281                 break;
2282
2283         case 1:
2284                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2285                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2286                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2287                 break;
2288
2289         case 2:
2290                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2291                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2292                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2293                 break;
2294
2295         case 3:
2296                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2297                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2298                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2299                 break;
2300         }
2301
2302         DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
2303
2304         DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
2305                     lsp_qid2cq);
2306
2307         DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
2308                     lsp_qid2cq2);
2309
2310         dlb2_flush_csr(hw);
2311
2312         unmapped = DLB2_QUEUE_UNMAPPED;
2313
2314         return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2315 }
2316
2317 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2318                                  struct dlb2_hw_domain *domain,
2319                                  struct dlb2_ldb_port *port,
2320                                  struct dlb2_ldb_queue *queue,
2321                                  u8 prio)
2322 {
2323         if (domain->started)
2324                 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2325         else
2326                 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2327 }
2328
2329 static void
2330 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2331                                    struct dlb2_hw_domain *domain,
2332                                    struct dlb2_ldb_port *port,
2333                                    int slot)
2334 {
2335         enum dlb2_qid_map_state state;
2336         struct dlb2_ldb_queue *queue;
2337
2338         queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2339
2340         state = port->qid_map[slot].state;
2341
2342         /* Update the QID2CQIDX and CQ2QID vectors */
2343         dlb2_ldb_port_unmap_qid(hw, port, queue);
2344
2345         /*
2346          * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2347          * the has_work bits
2348          */
2349         dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2350
2351         /* Reset the {CQ, slot} to its default state */
2352         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2353
2354         /* Re-enable the CQ if it was not manually disabled by the user */
2355         if (port->enabled)
2356                 dlb2_ldb_port_cq_enable(hw, port);
2357
2358         /*
2359          * If there is a mapping that is pending this slot's removal, perform
2360          * the mapping now.
2361          */
2362         if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2363                 struct dlb2_ldb_port_qid_map *map;
2364                 struct dlb2_ldb_queue *map_queue;
2365                 u8 prio;
2366
2367                 map = &port->qid_map[slot];
2368
2369                 map->qid = map->pending_qid;
2370                 map->priority = map->pending_priority;
2371
2372                 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2373                 prio = map->priority;
2374
2375                 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2376         }
2377 }
2378
2379
2380 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2381                                           struct dlb2_hw_domain *domain,
2382                                           struct dlb2_ldb_port *port)
2383 {
2384         u32 infl_cnt;
2385         int i;
2386         const int max_iters = 1000;
2387         const int iter_poll_us = 100;
2388
2389         if (port->num_pending_removals == 0)
2390                 return false;
2391
2392         /*
2393          * The unmap requires all the CQ's outstanding inflights to be
2394          * completed. Poll up to 100ms.
2395          */
2396         for (i = 0; i < max_iters; i++) {
2397                 infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
2398                                                        port->id.phys_id));
2399
2400                 if (DLB2_BITS_GET(infl_cnt,
2401                                   DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) == 0)
2402                         break;
2403                 rte_delay_us_sleep(iter_poll_us);
2404         }
2405
2406         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
2407                 return false;
2408
2409         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2410                 struct dlb2_ldb_port_qid_map *map;
2411
2412                 map = &port->qid_map[i];
2413
2414                 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2415                     map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2416                         continue;
2417
2418                 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2419         }
2420
2421         return true;
2422 }
2423
2424 static unsigned int
2425 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2426                                         struct dlb2_hw_domain *domain)
2427 {
2428         struct dlb2_list_entry *iter;
2429         struct dlb2_ldb_port *port;
2430         int i;
2431         RTE_SET_USED(iter);
2432
2433         if (!domain->configured || domain->num_pending_removals == 0)
2434                 return 0;
2435
2436         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2437                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2438                         dlb2_domain_finish_unmap_port(hw, domain, port);
2439         }
2440
2441         return domain->num_pending_removals;
2442 }
2443
2444 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2445                                         struct dlb2_hw_domain *domain)
2446 {
2447         struct dlb2_list_entry *iter;
2448         struct dlb2_ldb_port *port;
2449         int i;
2450         RTE_SET_USED(iter);
2451
2452         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2453                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2454                         port->enabled = false;
2455
2456                         dlb2_ldb_port_cq_disable(hw, port);
2457                 }
2458         }
2459 }
2460
2461
2462 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2463                                   u32 domain_id,
2464                                   bool vdev_req,
2465                                   unsigned int vdev_id)
2466 {
2467         DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2468         if (vdev_req)
2469                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2470         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2471 }
2472
2473 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2474                                          struct dlb2_hw_domain *domain,
2475                                          unsigned int vdev_id)
2476 {
2477         struct dlb2_list_entry *iter;
2478         struct dlb2_dir_pq_pair *port;
2479         u32 vpp_v = 0;
2480         RTE_SET_USED(iter);
2481
2482         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2483                 unsigned int offs;
2484                 u32 virt_id;
2485
2486                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2487                         virt_id = port->id.virt_id;
2488                 else
2489                         virt_id = port->id.phys_id;
2490
2491                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2492
2493                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
2494         }
2495 }
2496
2497 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2498                                          struct dlb2_hw_domain *domain,
2499                                          unsigned int vdev_id)
2500 {
2501         struct dlb2_list_entry *iter;
2502         struct dlb2_ldb_port *port;
2503         u32 vpp_v = 0;
2504         int i;
2505         RTE_SET_USED(iter);
2506
2507         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2508                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2509                         unsigned int offs;
2510                         u32 virt_id;
2511
2512                         if (hw->virt_mode == DLB2_VIRT_SRIOV)
2513                                 virt_id = port->id.virt_id;
2514                         else
2515                                 virt_id = port->id.phys_id;
2516
2517                         offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2518
2519                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
2520                 }
2521         }
2522 }
2523
2524 static void
2525 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2526                                         struct dlb2_hw_domain *domain)
2527 {
2528         struct dlb2_list_entry *iter;
2529         struct dlb2_ldb_port *port;
2530         u32 int_en = 0;
2531         u32 wd_en = 0;
2532         int i;
2533         RTE_SET_USED(iter);
2534
2535         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2536                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2537                         DLB2_CSR_WR(hw,
2538                                     DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
2539                                                        port->id.phys_id),
2540                                     int_en);
2541
2542                         DLB2_CSR_WR(hw,
2543                                     DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
2544                                                       port->id.phys_id),
2545                                     wd_en);
2546                 }
2547         }
2548 }
2549
2550 static void
2551 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2552                                         struct dlb2_hw_domain *domain)
2553 {
2554         struct dlb2_list_entry *iter;
2555         struct dlb2_dir_pq_pair *port;
2556         u32 int_en = 0;
2557         u32 wd_en = 0;
2558         RTE_SET_USED(iter);
2559
2560         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2561                 DLB2_CSR_WR(hw,
2562                             DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2563                             int_en);
2564
2565                 DLB2_CSR_WR(hw,
2566                             DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
2567                             wd_en);
2568         }
2569 }
2570
2571 static void
2572 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2573                                           struct dlb2_hw_domain *domain)
2574 {
2575         int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2576         struct dlb2_list_entry *iter;
2577         struct dlb2_ldb_queue *queue;
2578         RTE_SET_USED(iter);
2579
2580         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2581                 int idx = domain_offset + queue->id.phys_id;
2582
2583                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
2584
2585                 if (queue->id.vdev_owned) {
2586                         DLB2_CSR_WR(hw,
2587                                     DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2588                                     0);
2589
2590                         idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2591                                 queue->id.virt_id;
2592
2593                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
2594
2595                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
2596                 }
2597         }
2598 }
2599
2600 static void
2601 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2602                                           struct dlb2_hw_domain *domain)
2603 {
2604         struct dlb2_list_entry *iter;
2605         struct dlb2_dir_pq_pair *queue;
2606         unsigned long max_ports;
2607         int domain_offset;
2608         RTE_SET_USED(iter);
2609
2610         max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
2611
2612         domain_offset = domain->id.phys_id * max_ports;
2613
2614         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2615                 int idx = domain_offset + queue->id.phys_id;
2616
2617                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
2618
2619                 if (queue->id.vdev_owned) {
2620                         idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
2621
2622                         DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
2623
2624                         DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
2625                 }
2626         }
2627 }
2628
2629 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2630                                                struct dlb2_hw_domain *domain)
2631 {
2632         struct dlb2_list_entry *iter;
2633         struct dlb2_ldb_port *port;
2634         u32 chk_en = 0;
2635         int i;
2636         RTE_SET_USED(iter);
2637
2638         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2639                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2640                         DLB2_CSR_WR(hw,
2641                                     DLB2_CHP_SN_CHK_ENBL(hw->ver,
2642                                                          port->id.phys_id),
2643                                     chk_en);
2644                 }
2645         }
2646 }
2647
2648 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2649                                                  struct dlb2_hw_domain *domain)
2650 {
2651         struct dlb2_list_entry *iter;
2652         struct dlb2_ldb_port *port;
2653         int i;
2654         RTE_SET_USED(iter);
2655
2656         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2657                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2658                         int j;
2659
2660                         for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
2661                                 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2662                                         break;
2663                         }
2664
2665                         if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2666                                 DLB2_HW_ERR(hw,
2667                                             "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2668                                             __func__, port->id.phys_id);
2669                                 return -EFAULT;
2670                         }
2671                 }
2672         }
2673
2674         return 0;
2675 }
2676
2677 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2678                                         struct dlb2_hw_domain *domain)
2679 {
2680         struct dlb2_list_entry *iter;
2681         struct dlb2_dir_pq_pair *port;
2682         RTE_SET_USED(iter);
2683
2684         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2685                 port->enabled = false;
2686
2687                 dlb2_dir_port_cq_disable(hw, port);
2688         }
2689 }
2690
2691 static void
2692 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2693                                        struct dlb2_hw_domain *domain)
2694 {
2695         struct dlb2_list_entry *iter;
2696         struct dlb2_dir_pq_pair *port;
2697         u32 pp_v = 0;
2698         RTE_SET_USED(iter);
2699
2700         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2701                 DLB2_CSR_WR(hw,
2702                             DLB2_SYS_DIR_PP_V(port->id.phys_id),
2703                             pp_v);
2704         }
2705 }
2706
2707 static void
2708 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2709                                        struct dlb2_hw_domain *domain)
2710 {
2711         struct dlb2_list_entry *iter;
2712         struct dlb2_ldb_port *port;
2713         u32 pp_v = 0;
2714         int i;
2715         RTE_SET_USED(iter);
2716
2717         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2718                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2719                         DLB2_CSR_WR(hw,
2720                                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2721                                     pp_v);
2722                 }
2723         }
2724 }
2725
2726 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2727                                             struct dlb2_hw_domain *domain)
2728 {
2729         struct dlb2_list_entry *iter;
2730         struct dlb2_dir_pq_pair *dir_port;
2731         struct dlb2_ldb_port *ldb_port;
2732         struct dlb2_ldb_queue *queue;
2733         int i;
2734         RTE_SET_USED(iter);
2735
2736         /*
2737          * Confirm that all the domain's queue's inflight counts and AQED
2738          * active counts are 0.
2739          */
2740         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2741                 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2742                         DLB2_HW_ERR(hw,
2743                                     "[%s()] Internal error: failed to empty ldb queue %d\n",
2744                                     __func__, queue->id.phys_id);
2745                         return -EFAULT;
2746                 }
2747         }
2748
2749         /* Confirm that all the domain's CQs inflight and token counts are 0. */
2750         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2751                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2752                         if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2753                             dlb2_ldb_cq_token_count(hw, ldb_port)) {
2754                                 DLB2_HW_ERR(hw,
2755                                             "[%s()] Internal error: failed to empty ldb port %d\n",
2756                                             __func__, ldb_port->id.phys_id);
2757                                 return -EFAULT;
2758                         }
2759                 }
2760         }
2761
2762         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2763                 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2764                         DLB2_HW_ERR(hw,
2765                                     "[%s()] Internal error: failed to empty dir queue %d\n",
2766                                     __func__, dir_port->id.phys_id);
2767                         return -EFAULT;
2768                 }
2769
2770                 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2771                         DLB2_HW_ERR(hw,
2772                                     "[%s()] Internal error: failed to empty dir port %d\n",
2773                                     __func__, dir_port->id.phys_id);
2774                         return -EFAULT;
2775                 }
2776         }
2777
2778         return 0;
2779 }
2780
2781 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2782                                                    struct dlb2_ldb_port *port)
2783 {
2784         DLB2_CSR_WR(hw,
2785                     DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2786                     DLB2_SYS_LDB_PP2VAS_RST);
2787
2788         DLB2_CSR_WR(hw,
2789                     DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
2790                     DLB2_CHP_LDB_CQ2VAS_RST);
2791
2792         DLB2_CSR_WR(hw,
2793                     DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2794                     DLB2_SYS_LDB_PP2VDEV_RST);
2795
2796         if (port->id.vdev_owned) {
2797                 unsigned int offs;
2798                 u32 virt_id;
2799
2800                 /*
2801                  * DLB uses producer port address bits 17:12 to determine the
2802                  * producer port ID. In Scalable IOV mode, PP accesses come
2803                  * through the PF MMIO window for the physical producer port,
2804                  * so for translation purposes the virtual and physical port
2805                  * IDs are equal.
2806                  */
2807                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2808                         virt_id = port->id.virt_id;
2809                 else
2810                         virt_id = port->id.phys_id;
2811
2812                 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2813
2814                 DLB2_CSR_WR(hw,
2815                             DLB2_SYS_VF_LDB_VPP2PP(offs),
2816                             DLB2_SYS_VF_LDB_VPP2PP_RST);
2817
2818                 DLB2_CSR_WR(hw,
2819                             DLB2_SYS_VF_LDB_VPP_V(offs),
2820                             DLB2_SYS_VF_LDB_VPP_V_RST);
2821         }
2822
2823         DLB2_CSR_WR(hw,
2824                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2825                     DLB2_SYS_LDB_PP_V_RST);
2826
2827         DLB2_CSR_WR(hw,
2828                     DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
2829                     DLB2_LSP_CQ_LDB_DSBL_RST);
2830
2831         DLB2_CSR_WR(hw,
2832                     DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
2833                     DLB2_CHP_LDB_CQ_DEPTH_RST);
2834
2835         if (hw->ver != DLB2_HW_V2)
2836                 DLB2_CSR_WR(hw,
2837                             DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
2838                             DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
2839
2840         DLB2_CSR_WR(hw,
2841                     DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
2842                     DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2843
2844         DLB2_CSR_WR(hw,
2845                     DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
2846                     DLB2_CHP_HIST_LIST_LIM_RST);
2847
2848         DLB2_CSR_WR(hw,
2849                     DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
2850                     DLB2_CHP_HIST_LIST_BASE_RST);
2851
2852         DLB2_CSR_WR(hw,
2853                     DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
2854                     DLB2_CHP_HIST_LIST_POP_PTR_RST);
2855
2856         DLB2_CSR_WR(hw,
2857                     DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
2858                     DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2859
2860         DLB2_CSR_WR(hw,
2861                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2862                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2863
2864         DLB2_CSR_WR(hw,
2865                     DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2866                     DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2867
2868         DLB2_CSR_WR(hw,
2869                     DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
2870                     DLB2_CHP_LDB_CQ_INT_ENB_RST);
2871
2872         DLB2_CSR_WR(hw,
2873                     DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2874                     DLB2_SYS_LDB_CQ_ISR_RST);
2875
2876         DLB2_CSR_WR(hw,
2877                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2878                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2879
2880         DLB2_CSR_WR(hw,
2881                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2882                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2883
2884         DLB2_CSR_WR(hw,
2885                     DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
2886                     DLB2_CHP_LDB_CQ_WPTR_RST);
2887
2888         DLB2_CSR_WR(hw,
2889                     DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
2890                     DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2891
2892         DLB2_CSR_WR(hw,
2893                     DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2894                     DLB2_SYS_LDB_CQ_ADDR_L_RST);
2895
2896         DLB2_CSR_WR(hw,
2897                     DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2898                     DLB2_SYS_LDB_CQ_ADDR_U_RST);
2899
2900         if (hw->ver == DLB2_HW_V2)
2901                 DLB2_CSR_WR(hw,
2902                             DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2903                             DLB2_SYS_LDB_CQ_AT_RST);
2904
2905         DLB2_CSR_WR(hw,
2906                     DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
2907                     DLB2_SYS_LDB_CQ_PASID_RST);
2908
2909         DLB2_CSR_WR(hw,
2910                     DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2911                     DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2912
2913         DLB2_CSR_WR(hw,
2914                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
2915                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2916
2917         DLB2_CSR_WR(hw,
2918                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
2919                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2920
2921         DLB2_CSR_WR(hw,
2922                     DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
2923                     DLB2_LSP_CQ2QID0_RST);
2924
2925         DLB2_CSR_WR(hw,
2926                     DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
2927                     DLB2_LSP_CQ2QID1_RST);
2928
2929         DLB2_CSR_WR(hw,
2930                     DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
2931                     DLB2_LSP_CQ2PRIOV_RST);
2932 }
2933
2934 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2935                                                  struct dlb2_hw_domain *domain)
2936 {
2937         struct dlb2_list_entry *iter;
2938         struct dlb2_ldb_port *port;
2939         int i;
2940         RTE_SET_USED(iter);
2941
2942         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2943                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2944                         __dlb2_domain_reset_ldb_port_registers(hw, port);
2945         }
2946 }
2947
2948 static void
2949 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2950                                        struct dlb2_dir_pq_pair *port)
2951 {
2952         u32 reg = 0;
2953
2954         DLB2_CSR_WR(hw,
2955                     DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
2956                     DLB2_CHP_DIR_CQ2VAS_RST);
2957
2958         DLB2_CSR_WR(hw,
2959                     DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
2960                     DLB2_LSP_CQ_DIR_DSBL_RST);
2961
2962         DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
2963
2964         if (hw->ver == DLB2_HW_V2)
2965                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2966         else
2967                 DLB2_CSR_WR(hw,
2968                             DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
2969
2970         DLB2_CSR_WR(hw,
2971                     DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
2972                     DLB2_CHP_DIR_CQ_DEPTH_RST);
2973
2974         DLB2_CSR_WR(hw,
2975                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2976                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2977
2978         DLB2_CSR_WR(hw,
2979                     DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2980                     DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2981
2982         DLB2_CSR_WR(hw,
2983                     DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2984                     DLB2_CHP_DIR_CQ_INT_ENB_RST);
2985
2986         DLB2_CSR_WR(hw,
2987                     DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2988                     DLB2_SYS_DIR_CQ_ISR_RST);
2989
2990         DLB2_CSR_WR(hw,
2991                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
2992                                                       port->id.phys_id),
2993                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2994
2995         DLB2_CSR_WR(hw,
2996                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2997                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2998
2999         DLB2_CSR_WR(hw,
3000                     DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
3001                     DLB2_CHP_DIR_CQ_WPTR_RST);
3002
3003         DLB2_CSR_WR(hw,
3004                     DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
3005                     DLB2_LSP_CQ_DIR_TKN_CNT_RST);
3006
3007         DLB2_CSR_WR(hw,
3008                     DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
3009                     DLB2_SYS_DIR_CQ_ADDR_L_RST);
3010
3011         DLB2_CSR_WR(hw,
3012                     DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
3013                     DLB2_SYS_DIR_CQ_ADDR_U_RST);
3014
3015         DLB2_CSR_WR(hw,
3016                     DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
3017                     DLB2_SYS_DIR_CQ_AT_RST);
3018
3019         if (hw->ver == DLB2_HW_V2)
3020                 DLB2_CSR_WR(hw,
3021                             DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
3022                             DLB2_SYS_DIR_CQ_AT_RST);
3023
3024         DLB2_CSR_WR(hw,
3025                     DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
3026                     DLB2_SYS_DIR_CQ_PASID_RST);
3027
3028         DLB2_CSR_WR(hw,
3029                     DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
3030                     DLB2_SYS_DIR_CQ_FMT_RST);
3031
3032         DLB2_CSR_WR(hw,
3033                     DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
3034                     DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
3035
3036         DLB2_CSR_WR(hw,
3037                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3038                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
3039
3040         DLB2_CSR_WR(hw,
3041                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3042                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
3043
3044         DLB2_CSR_WR(hw,
3045                     DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
3046                     DLB2_SYS_DIR_PP2VAS_RST);
3047
3048         DLB2_CSR_WR(hw,
3049                     DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3050                     DLB2_CHP_DIR_CQ2VAS_RST);
3051
3052         DLB2_CSR_WR(hw,
3053                     DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
3054                     DLB2_SYS_DIR_PP2VDEV_RST);
3055
3056         if (port->id.vdev_owned) {
3057                 unsigned int offs;
3058                 u32 virt_id;
3059
3060                 /*
3061                  * DLB uses producer port address bits 17:12 to determine the
3062                  * producer port ID. In Scalable IOV mode, PP accesses come
3063                  * through the PF MMIO window for the physical producer port,
3064                  * so for translation purposes the virtual and physical port
3065                  * IDs are equal.
3066                  */
3067                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3068                         virt_id = port->id.virt_id;
3069                 else
3070                         virt_id = port->id.phys_id;
3071
3072                 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3073                         virt_id;
3074
3075                 DLB2_CSR_WR(hw,
3076                             DLB2_SYS_VF_DIR_VPP2PP(offs),
3077                             DLB2_SYS_VF_DIR_VPP2PP_RST);
3078
3079                 DLB2_CSR_WR(hw,
3080                             DLB2_SYS_VF_DIR_VPP_V(offs),
3081                             DLB2_SYS_VF_DIR_VPP_V_RST);
3082         }
3083
3084         DLB2_CSR_WR(hw,
3085                     DLB2_SYS_DIR_PP_V(port->id.phys_id),
3086                     DLB2_SYS_DIR_PP_V_RST);
3087 }
3088
3089 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3090                                                  struct dlb2_hw_domain *domain)
3091 {
3092         struct dlb2_list_entry *iter;
3093         struct dlb2_dir_pq_pair *port;
3094         RTE_SET_USED(iter);
3095
3096         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3097                 __dlb2_domain_reset_dir_port_registers(hw, port);
3098 }
3099
3100 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
3101                                                   struct dlb2_hw_domain *domain)
3102 {
3103         struct dlb2_list_entry *iter;
3104         struct dlb2_ldb_queue *queue;
3105         RTE_SET_USED(iter);
3106
3107         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3108                 unsigned int queue_id = queue->id.phys_id;
3109                 int i;
3110
3111                 DLB2_CSR_WR(hw,
3112                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
3113                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3114
3115                 DLB2_CSR_WR(hw,
3116                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
3117                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3118
3119                 DLB2_CSR_WR(hw,
3120                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
3121                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3122
3123                 DLB2_CSR_WR(hw,
3124                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
3125                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3126
3127                 DLB2_CSR_WR(hw,
3128                             DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
3129                             DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3130
3131                 DLB2_CSR_WR(hw,
3132                             DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
3133                             DLB2_LSP_QID_LDB_INFL_LIM_RST);
3134
3135                 DLB2_CSR_WR(hw,
3136                             DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
3137                             DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3138
3139                 DLB2_CSR_WR(hw,
3140                             DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
3141                             DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3142
3143                 DLB2_CSR_WR(hw,
3144                             DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
3145                             DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3146
3147                 DLB2_CSR_WR(hw,
3148                             DLB2_SYS_LDB_QID_ITS(queue_id),
3149                             DLB2_SYS_LDB_QID_ITS_RST);
3150
3151                 DLB2_CSR_WR(hw,
3152                             DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
3153                             DLB2_CHP_ORD_QID_SN_RST);
3154
3155                 DLB2_CSR_WR(hw,
3156                             DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
3157                             DLB2_CHP_ORD_QID_SN_MAP_RST);
3158
3159                 DLB2_CSR_WR(hw,
3160                             DLB2_SYS_LDB_QID_V(queue_id),
3161                             DLB2_SYS_LDB_QID_V_RST);
3162
3163                 DLB2_CSR_WR(hw,
3164                             DLB2_SYS_LDB_QID_CFG_V(queue_id),
3165                             DLB2_SYS_LDB_QID_CFG_V_RST);
3166
3167                 if (queue->sn_cfg_valid) {
3168                         u32 offs[2];
3169
3170                         offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
3171                                                          queue->sn_slot);
3172                         offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
3173                                                          queue->sn_slot);
3174
3175                         DLB2_CSR_WR(hw,
3176                                     offs[queue->sn_group],
3177                                     DLB2_RO_GRP_0_SLT_SHFT_RST);
3178                 }
3179
3180                 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3181                         DLB2_CSR_WR(hw,
3182                                     DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
3183                                     DLB2_LSP_QID2CQIDIX_00_RST);
3184
3185                         DLB2_CSR_WR(hw,
3186                                     DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
3187                                     DLB2_LSP_QID2CQIDIX2_00_RST);
3188
3189                         DLB2_CSR_WR(hw,
3190                                     DLB2_ATM_QID2CQIDIX(queue_id, i),
3191                                     DLB2_ATM_QID2CQIDIX_00_RST);
3192                 }
3193         }
3194 }
3195
3196 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3197                                                   struct dlb2_hw_domain *domain)
3198 {
3199         struct dlb2_list_entry *iter;
3200         struct dlb2_dir_pq_pair *queue;
3201         RTE_SET_USED(iter);
3202
3203         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3204                 DLB2_CSR_WR(hw,
3205                             DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
3206                                                        queue->id.phys_id),
3207                             DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3208
3209                 DLB2_CSR_WR(hw,
3210                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
3211                                                           queue->id.phys_id),
3212                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3213
3214                 DLB2_CSR_WR(hw,
3215                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
3216                                                           queue->id.phys_id),
3217                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3218
3219                 DLB2_CSR_WR(hw,
3220                             DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
3221                                                          queue->id.phys_id),
3222                             DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3223
3224                 DLB2_CSR_WR(hw,
3225                             DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3226                             DLB2_SYS_DIR_QID_ITS_RST);
3227
3228                 DLB2_CSR_WR(hw,
3229                             DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3230                             DLB2_SYS_DIR_QID_V_RST);
3231         }
3232 }
3233
3234
3235
3236
3237
3238 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3239                                         struct dlb2_hw_domain *domain)
3240 {
3241         dlb2_domain_reset_ldb_port_registers(hw, domain);
3242
3243         dlb2_domain_reset_dir_port_registers(hw, domain);
3244
3245         dlb2_domain_reset_ldb_queue_registers(hw, domain);
3246
3247         dlb2_domain_reset_dir_queue_registers(hw, domain);
3248
3249         if (hw->ver == DLB2_HW_V2) {
3250                 DLB2_CSR_WR(hw,
3251                             DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3252                             DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3253
3254                 DLB2_CSR_WR(hw,
3255                             DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3256                             DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3257         } else
3258                 DLB2_CSR_WR(hw,
3259                             DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
3260                             DLB2_CHP_CFG_VAS_CRD_RST);
3261 }
3262
3263 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3264                                             struct dlb2_hw_domain *domain)
3265 {
3266         struct dlb2_dir_pq_pair *tmp_dir_port;
3267         struct dlb2_ldb_queue *tmp_ldb_queue;
3268         struct dlb2_ldb_port *tmp_ldb_port;
3269         struct dlb2_list_entry *iter1;
3270         struct dlb2_list_entry *iter2;
3271         struct dlb2_function_resources *rsrcs;
3272         struct dlb2_dir_pq_pair *dir_port;
3273         struct dlb2_ldb_queue *ldb_queue;
3274         struct dlb2_ldb_port *ldb_port;
3275         struct dlb2_list_head *list;
3276         int ret, i;
3277         RTE_SET_USED(tmp_dir_port);
3278         RTE_SET_USED(tmp_ldb_queue);
3279         RTE_SET_USED(tmp_ldb_port);
3280         RTE_SET_USED(iter1);
3281         RTE_SET_USED(iter2);
3282
3283         rsrcs = domain->parent_func;
3284
3285         /* Move the domain's ldb queues to the function's avail list */
3286         list = &domain->used_ldb_queues;
3287         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3288                 if (ldb_queue->sn_cfg_valid) {
3289                         struct dlb2_sn_group *grp;
3290
3291                         grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3292
3293                         dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3294                         ldb_queue->sn_cfg_valid = false;
3295                 }
3296
3297                 ldb_queue->owned = false;
3298                 ldb_queue->num_mappings = 0;
3299                 ldb_queue->num_pending_additions = 0;
3300
3301                 dlb2_list_del(&domain->used_ldb_queues,
3302                               &ldb_queue->domain_list);
3303                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3304                               &ldb_queue->func_list);
3305                 rsrcs->num_avail_ldb_queues++;
3306         }
3307
3308         list = &domain->avail_ldb_queues;
3309         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3310                 ldb_queue->owned = false;
3311
3312                 dlb2_list_del(&domain->avail_ldb_queues,
3313                               &ldb_queue->domain_list);
3314                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3315                               &ldb_queue->func_list);
3316                 rsrcs->num_avail_ldb_queues++;
3317         }
3318
3319         /* Move the domain's ldb ports to the function's avail list */
3320         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3321                 list = &domain->used_ldb_ports[i];
3322                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3323                                        iter1, iter2) {
3324                         int j;
3325
3326                         ldb_port->owned = false;
3327                         ldb_port->configured = false;
3328                         ldb_port->num_pending_removals = 0;
3329                         ldb_port->num_mappings = 0;
3330                         ldb_port->init_tkn_cnt = 0;
3331                         ldb_port->cq_depth = 0;
3332                         for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3333                                 ldb_port->qid_map[j].state =
3334                                         DLB2_QUEUE_UNMAPPED;
3335
3336                         dlb2_list_del(&domain->used_ldb_ports[i],
3337                                       &ldb_port->domain_list);
3338                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3339                                       &ldb_port->func_list);
3340                         rsrcs->num_avail_ldb_ports[i]++;
3341                 }
3342
3343                 list = &domain->avail_ldb_ports[i];
3344                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3345                                        iter1, iter2) {
3346                         ldb_port->owned = false;
3347
3348                         dlb2_list_del(&domain->avail_ldb_ports[i],
3349                                       &ldb_port->domain_list);
3350                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3351                                       &ldb_port->func_list);
3352                         rsrcs->num_avail_ldb_ports[i]++;
3353                 }
3354         }
3355
3356         /* Move the domain's dir ports to the function's avail list */
3357         list = &domain->used_dir_pq_pairs;
3358         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3359                 dir_port->owned = false;
3360                 dir_port->port_configured = false;
3361                 dir_port->init_tkn_cnt = 0;
3362
3363                 dlb2_list_del(&domain->used_dir_pq_pairs,
3364                               &dir_port->domain_list);
3365
3366                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3367                               &dir_port->func_list);
3368                 rsrcs->num_avail_dir_pq_pairs++;
3369         }
3370
3371         list = &domain->avail_dir_pq_pairs;
3372         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3373                 dir_port->owned = false;
3374
3375                 dlb2_list_del(&domain->avail_dir_pq_pairs,
3376                               &dir_port->domain_list);
3377
3378                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3379                               &dir_port->func_list);
3380                 rsrcs->num_avail_dir_pq_pairs++;
3381         }
3382
3383         /* Return hist list entries to the function */
3384         ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3385                                     domain->hist_list_entry_base,
3386                                     domain->total_hist_list_entries);
3387         if (ret) {
3388                 DLB2_HW_ERR(hw,
3389                             "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3390                             __func__);
3391                 return ret;
3392         }
3393
3394         domain->total_hist_list_entries = 0;
3395         domain->avail_hist_list_entries = 0;
3396         domain->hist_list_entry_base = 0;
3397         domain->hist_list_entry_offset = 0;
3398
3399         if (hw->ver == DLB2_HW_V2_5) {
3400                 rsrcs->num_avail_entries += domain->num_credits;
3401                 domain->num_credits = 0;
3402         } else {
3403                 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3404                 domain->num_ldb_credits = 0;
3405
3406                 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3407                 domain->num_dir_credits = 0;
3408         }
3409         rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3410         rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3411         domain->num_avail_aqed_entries = 0;
3412         domain->num_used_aqed_entries = 0;
3413
3414         domain->num_pending_removals = 0;
3415         domain->num_pending_additions = 0;
3416         domain->configured = false;
3417         domain->started = false;
3418
3419         /*
3420          * Move the domain out of the used_domains list and back to the
3421          * function's avail_domains list.
3422          */
3423         dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3424         dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3425         rsrcs->num_avail_domains++;
3426
3427         return 0;
3428 }
3429
3430 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3431                                             struct dlb2_hw_domain *domain,
3432                                             struct dlb2_ldb_queue *queue)
3433 {
3434         struct dlb2_ldb_port *port = NULL;
3435         int ret, i;
3436
3437         /* If a domain has LDB queues, it must have LDB ports */
3438         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3439                 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
3440                                           typeof(*port));
3441                 if (port)
3442                         break;
3443         }
3444
3445         if (port == NULL) {
3446                 DLB2_HW_ERR(hw,
3447                             "[%s()] Internal error: No configured LDB ports\n",
3448                             __func__);
3449                 return -EFAULT;
3450         }
3451
3452         /* If necessary, free up a QID slot in this CQ */
3453         if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3454                 struct dlb2_ldb_queue *mapped_queue;
3455
3456                 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3457
3458                 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3459                 if (ret)
3460                         return ret;
3461         }
3462
3463         ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3464         if (ret)
3465                 return ret;
3466
3467         return dlb2_domain_drain_mapped_queues(hw, domain);
3468 }
3469
3470 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3471                                              struct dlb2_hw_domain *domain)
3472 {
3473         struct dlb2_list_entry *iter;
3474         struct dlb2_ldb_queue *queue;
3475         int ret;
3476         RTE_SET_USED(iter);
3477
3478         /* If the domain hasn't been started, there's no traffic to drain */
3479         if (!domain->started)
3480                 return 0;
3481
3482         /*
3483          * Pre-condition: the unattached queue must not have any outstanding
3484          * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3485          * prior to this in dlb2_domain_drain_mapped_queues().
3486          */
3487         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3488                 if (queue->num_mappings != 0 ||
3489                     dlb2_ldb_queue_is_empty(hw, queue))
3490                         continue;
3491
3492                 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3493                 if (ret)
3494                         return ret;
3495         }
3496
3497         return 0;
3498 }
3499
3500 /**
3501  * dlb2_reset_domain() - reset a scheduling domain
3502  * @hw: dlb2_hw handle for a particular device.
3503  * @domain_id: domain ID.
3504  * @vdev_req: indicates whether this request came from a vdev.
3505  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3506  *
3507  * This function resets and frees a DLB 2.0 scheduling domain and its associated
3508  * resources.
3509  *
3510  * Pre-condition: the driver must ensure software has stopped sending QEs
3511  * through this domain's producer ports before invoking this function, or
3512  * undefined behavior will result.
3513  *
3514  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3515  * device.
3516  *
3517  * Return:
3518  * Returns 0 upon success, -1 otherwise.
3519  *
3520  * EINVAL - Invalid domain ID, or the domain is not configured.
3521  * EFAULT - Internal error. (Possibly caused if software is the pre-condition
3522  *          is not met.)
3523  * ETIMEDOUT - Hardware component didn't reset in the expected time.
3524  */
3525 int dlb2_reset_domain(struct dlb2_hw *hw,
3526                       u32 domain_id,
3527                       bool vdev_req,
3528                       unsigned int vdev_id)
3529 {
3530         struct dlb2_hw_domain *domain;
3531         int ret;
3532
3533         dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3534
3535         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3536
3537         if (domain == NULL || !domain->configured)
3538                 return -EINVAL;
3539
3540         /* Disable VPPs */
3541         if (vdev_req) {
3542                 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3543
3544                 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3545         }
3546
3547         /* Disable CQ interrupts */
3548         dlb2_domain_disable_dir_port_interrupts(hw, domain);
3549
3550         dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3551
3552         /*
3553          * For each queue owned by this domain, disable its write permissions to
3554          * cause any traffic sent to it to be dropped. Well-behaved software
3555          * should not be sending QEs at this point.
3556          */
3557         dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3558
3559         dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3560
3561         /* Turn off completion tracking on all the domain's PPs. */
3562         dlb2_domain_disable_ldb_seq_checks(hw, domain);
3563
3564         /*
3565          * Disable the LDB CQs and drain them in order to complete the map and
3566          * unmap procedures, which require zero CQ inflights and zero QID
3567          * inflights respectively.
3568          */
3569         dlb2_domain_disable_ldb_cqs(hw, domain);
3570
3571         dlb2_domain_drain_ldb_cqs(hw, domain, false);
3572
3573         ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3574         if (ret)
3575                 return ret;
3576
3577         ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3578         if (ret)
3579                 return ret;
3580
3581         ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3582         if (ret)
3583                 return ret;
3584
3585         /* Re-enable the CQs in order to drain the mapped queues. */
3586         dlb2_domain_enable_ldb_cqs(hw, domain);
3587
3588         ret = dlb2_domain_drain_mapped_queues(hw, domain);
3589         if (ret)
3590                 return ret;
3591
3592         ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3593         if (ret)
3594                 return ret;
3595
3596         /* Done draining LDB QEs, so disable the CQs. */
3597         dlb2_domain_disable_ldb_cqs(hw, domain);
3598
3599         dlb2_domain_drain_dir_queues(hw, domain);
3600
3601         /* Done draining DIR QEs, so disable the CQs. */
3602         dlb2_domain_disable_dir_cqs(hw, domain);
3603
3604         /* Disable PPs */
3605         dlb2_domain_disable_dir_producer_ports(hw, domain);
3606
3607         dlb2_domain_disable_ldb_producer_ports(hw, domain);
3608
3609         ret = dlb2_domain_verify_reset_success(hw, domain);
3610         if (ret)
3611                 return ret;
3612
3613         /* Reset the QID and port state. */
3614         dlb2_domain_reset_registers(hw, domain);
3615
3616         /* Hardware reset complete. Reset the domain's software state */
3617         return dlb2_domain_reset_software_state(hw, domain);
3618 }
3619
3620 static void
3621 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3622                                u32 domain_id,
3623                                struct dlb2_create_ldb_queue_args *args,
3624                                bool vdev_req,
3625                                unsigned int vdev_id)
3626 {
3627         DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3628         if (vdev_req)
3629                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3630         DLB2_HW_DBG(hw, "\tDomain ID:                  %d\n",
3631                     domain_id);
3632         DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3633                     args->num_sequence_numbers);
3634         DLB2_HW_DBG(hw, "\tNumber of QID inflights:    %d\n",
3635                     args->num_qid_inflights);
3636         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:    %d\n",
3637                     args->num_atomic_inflights);
3638 }
3639
3640 static int
3641 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3642                                   struct dlb2_ldb_queue *queue,
3643                                   struct dlb2_create_ldb_queue_args *args)
3644 {
3645         int slot = -1;
3646         int i;
3647
3648         queue->sn_cfg_valid = false;
3649
3650         if (args->num_sequence_numbers == 0)
3651                 return 0;
3652
3653         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3654                 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3655
3656                 if (group->sequence_numbers_per_queue ==
3657                     args->num_sequence_numbers &&
3658                     !dlb2_sn_group_full(group)) {
3659                         slot = dlb2_sn_group_alloc_slot(group);
3660                         if (slot >= 0)
3661                                 break;
3662                 }
3663         }
3664
3665         if (slot == -1) {
3666                 DLB2_HW_ERR(hw,
3667                             "[%s():%d] Internal error: no sequence number slots available\n",
3668                             __func__, __LINE__);
3669                 return -EFAULT;
3670         }
3671
3672         queue->sn_cfg_valid = true;
3673         queue->sn_group = i;
3674         queue->sn_slot = slot;
3675         return 0;
3676 }
3677
3678 static int
3679 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3680                                   u32 domain_id,
3681                                   struct dlb2_create_ldb_queue_args *args,
3682                                   struct dlb2_cmd_response *resp,
3683                                   bool vdev_req,
3684                                   unsigned int vdev_id,
3685                                   struct dlb2_hw_domain **out_domain,
3686                                   struct dlb2_ldb_queue **out_queue)
3687 {
3688         struct dlb2_hw_domain *domain;
3689         struct dlb2_ldb_queue *queue;
3690         int i;
3691
3692         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3693
3694         if (!domain) {
3695                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3696                 return -EINVAL;
3697         }
3698
3699         if (!domain->configured) {
3700                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3701                 return -EINVAL;
3702         }
3703
3704         if (domain->started) {
3705                 resp->status = DLB2_ST_DOMAIN_STARTED;
3706                 return -EINVAL;
3707         }
3708
3709         queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3710         if (!queue) {
3711                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3712                 return -EINVAL;
3713         }
3714
3715         if (args->num_sequence_numbers) {
3716                 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3717                         struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3718
3719                         if (group->sequence_numbers_per_queue ==
3720                             args->num_sequence_numbers &&
3721                             !dlb2_sn_group_full(group))
3722                                 break;
3723                 }
3724
3725                 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3726                         resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3727                         return -EINVAL;
3728                 }
3729         }
3730
3731         if (args->num_qid_inflights < 1 || args->num_qid_inflights > 2048) {
3732                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3733                 return -EINVAL;
3734         }
3735
3736         /* Inflights must be <= number of sequence numbers if ordered */
3737         if (args->num_sequence_numbers != 0 &&
3738             args->num_qid_inflights > args->num_sequence_numbers) {
3739                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3740                 return -EINVAL;
3741         }
3742
3743         if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3744                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3745                 return -EINVAL;
3746         }
3747
3748         if (args->num_atomic_inflights &&
3749             args->lock_id_comp_level != 0 &&
3750             args->lock_id_comp_level != 64 &&
3751             args->lock_id_comp_level != 128 &&
3752             args->lock_id_comp_level != 256 &&
3753             args->lock_id_comp_level != 512 &&
3754             args->lock_id_comp_level != 1024 &&
3755             args->lock_id_comp_level != 2048 &&
3756             args->lock_id_comp_level != 4096 &&
3757             args->lock_id_comp_level != 65536) {
3758                 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3759                 return -EINVAL;
3760         }
3761
3762         *out_domain = domain;
3763         *out_queue = queue;
3764
3765         return 0;
3766 }
3767
3768 static int
3769 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3770                                 struct dlb2_hw_domain *domain,
3771                                 struct dlb2_ldb_queue *queue,
3772                                 struct dlb2_create_ldb_queue_args *args)
3773 {
3774         int ret;
3775         ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3776         if (ret)
3777                 return ret;
3778
3779         /* Attach QID inflights */
3780         queue->num_qid_inflights = args->num_qid_inflights;
3781
3782         /* Attach atomic inflights */
3783         queue->aqed_limit = args->num_atomic_inflights;
3784
3785         domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3786         domain->num_used_aqed_entries += args->num_atomic_inflights;
3787
3788         return 0;
3789 }
3790
3791 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3792                                      struct dlb2_hw_domain *domain,
3793                                      struct dlb2_ldb_queue *queue,
3794                                      struct dlb2_create_ldb_queue_args *args,
3795                                      bool vdev_req,
3796                                      unsigned int vdev_id)
3797 {
3798         struct dlb2_sn_group *sn_group;
3799         unsigned int offs;
3800         u32 reg = 0;
3801         u32 alimit;
3802
3803         /* QID write permissions are turned on when the domain is started */
3804         offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.phys_id;
3805
3806         DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), reg);
3807
3808         /*
3809          * Unordered QIDs get 4K inflights, ordered get as many as the number
3810          * of sequence numbers.
3811          */
3812         DLB2_BITS_SET(reg, args->num_qid_inflights,
3813                       DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
3814         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
3815                                                   queue->id.phys_id), reg);
3816
3817         alimit = queue->aqed_limit;
3818
3819         if (alimit > DLB2_MAX_NUM_AQED_ENTRIES)
3820                 alimit = DLB2_MAX_NUM_AQED_ENTRIES;
3821
3822         reg = 0;
3823         DLB2_BITS_SET(reg, alimit, DLB2_LSP_QID_AQED_ACTIVE_LIM_LIMIT);
3824         DLB2_CSR_WR(hw,
3825                     DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver,
3826                                                  queue->id.phys_id), reg);
3827
3828         reg = 0;
3829         switch (args->lock_id_comp_level) {
3830         case 64:
3831                 DLB2_BITS_SET(reg, 1, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3832                 break;
3833         case 128:
3834                 DLB2_BITS_SET(reg, 2, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3835                 break;
3836         case 256:
3837                 DLB2_BITS_SET(reg, 3, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3838                 break;
3839         case 512:
3840                 DLB2_BITS_SET(reg, 4, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3841                 break;
3842         case 1024:
3843                 DLB2_BITS_SET(reg, 5, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3844                 break;
3845         case 2048:
3846                 DLB2_BITS_SET(reg, 6, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3847                 break;
3848         case 4096:
3849                 DLB2_BITS_SET(reg, 7, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3850                 break;
3851         default:
3852                 /* No compression by default */
3853                 break;
3854         }
3855
3856         DLB2_CSR_WR(hw, DLB2_AQED_QID_HID_WIDTH(queue->id.phys_id), reg);
3857
3858         reg = 0;
3859         /* Don't timestamp QEs that pass through this queue */
3860         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_ITS(queue->id.phys_id), reg);
3861
3862         DLB2_BITS_SET(reg, args->depth_threshold,
3863                       DLB2_LSP_QID_ATM_DEPTH_THRSH_THRESH);
3864         DLB2_CSR_WR(hw,
3865                     DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver,
3866                                                  queue->id.phys_id), reg);
3867
3868         reg = 0;
3869         DLB2_BITS_SET(reg, args->depth_threshold,
3870                       DLB2_LSP_QID_NALDB_DEPTH_THRSH_THRESH);
3871         DLB2_CSR_WR(hw,
3872                     DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue->id.phys_id),
3873                     reg);
3874
3875         /*
3876          * This register limits the number of inflight flows a queue can have
3877          * at one time.  It has an upper bound of 2048, but can be
3878          * over-subscribed. 512 is chosen so that a single queue does not use
3879          * the entire atomic storage, but can use a substantial portion if
3880          * needed.
3881          */
3882         reg = 0;
3883         DLB2_BITS_SET(reg, 512, DLB2_AQED_QID_FID_LIM_QID_FID_LIMIT);
3884         DLB2_CSR_WR(hw, DLB2_AQED_QID_FID_LIM(queue->id.phys_id), reg);
3885
3886         /* Configure SNs */
3887         reg = 0;
3888         sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3889         DLB2_BITS_SET(reg, sn_group->mode, DLB2_CHP_ORD_QID_SN_MAP_MODE);
3890         DLB2_BITS_SET(reg, queue->sn_slot, DLB2_CHP_ORD_QID_SN_MAP_SLOT);
3891         DLB2_BITS_SET(reg, sn_group->id, DLB2_CHP_ORD_QID_SN_MAP_GRP);
3892
3893         DLB2_CSR_WR(hw,
3894                     DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue->id.phys_id), reg);
3895
3896         reg = 0;
3897         DLB2_BITS_SET(reg, (args->num_sequence_numbers != 0),
3898                  DLB2_SYS_LDB_QID_CFG_V_SN_CFG_V);
3899         DLB2_BITS_SET(reg, (args->num_atomic_inflights != 0),
3900                  DLB2_SYS_LDB_QID_CFG_V_FID_CFG_V);
3901
3902         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), reg);
3903
3904         if (vdev_req) {
3905                 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3906
3907                 reg = 0;
3908                 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VQID_V_VQID_V);
3909                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), reg);
3910
3911                 reg = 0;
3912                 DLB2_BITS_SET(reg, queue->id.phys_id,
3913                               DLB2_SYS_VF_LDB_VQID2QID_QID);
3914                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), reg);
3915
3916                 reg = 0;
3917                 DLB2_BITS_SET(reg, queue->id.virt_id,
3918                               DLB2_SYS_LDB_QID2VQID_VQID);
3919                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID2VQID(queue->id.phys_id), reg);
3920         }
3921
3922         reg = 0;
3923         DLB2_BIT_SET(reg, DLB2_SYS_LDB_QID_V_QID_V);
3924         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), reg);
3925 }
3926
3927 /**
3928  * dlb2_hw_create_ldb_queue() - create a load-balanced queue
3929  * @hw: dlb2_hw handle for a particular device.
3930  * @domain_id: domain ID.
3931  * @args: queue creation arguments.
3932  * @resp: response structure.
3933  * @vdev_req: indicates whether this request came from a vdev.
3934  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3935  *
3936  * This function creates a load-balanced queue.
3937  *
3938  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3939  * device.
3940  *
3941  * Return:
3942  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
3943  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
3944  * contains the queue ID.
3945  *
3946  * resp->id contains a virtual ID if vdev_req is true.
3947  *
3948  * Errors:
3949  * EINVAL - A requested resource is unavailable, the domain is not configured,
3950  *          the domain has already been started, or the requested queue name is
3951  *          already in use.
3952  * EFAULT - Internal error (resp->status not set).
3953  */
3954 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3955                              u32 domain_id,
3956                              struct dlb2_create_ldb_queue_args *args,
3957                              struct dlb2_cmd_response *resp,
3958                              bool vdev_req,
3959                              unsigned int vdev_id)
3960 {
3961         struct dlb2_hw_domain *domain;
3962         struct dlb2_ldb_queue *queue;
3963         int ret;
3964
3965         dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3966
3967         /*
3968          * Verify that hardware resources are available before attempting to
3969          * satisfy the request. This simplifies the error unwinding code.
3970          */
3971         ret = dlb2_verify_create_ldb_queue_args(hw,
3972                                                 domain_id,
3973                                                 args,
3974                                                 resp,
3975                                                 vdev_req,
3976                                                 vdev_id,
3977                                                 &domain,
3978                                                 &queue);
3979         if (ret)
3980                 return ret;
3981
3982         ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3983
3984         if (ret) {
3985                 DLB2_HW_ERR(hw,
3986                             "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3987                             __func__, __LINE__);
3988                 return ret;
3989         }
3990
3991         dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3992
3993         queue->num_mappings = 0;
3994
3995         queue->configured = true;
3996
3997         /*
3998          * Configuration succeeded, so move the resource from the 'avail' to
3999          * the 'used' list.
4000          */
4001         dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
4002
4003         dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
4004
4005         resp->status = 0;
4006         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
4007
4008         return 0;
4009 }
4010
4011 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
4012                                        struct dlb2_hw_domain *domain,
4013                                        struct dlb2_ldb_port *port,
4014                                        bool vdev_req,
4015                                        unsigned int vdev_id)
4016 {
4017         u32 reg = 0;
4018
4019         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
4020         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
4021
4022         if (vdev_req) {
4023                 unsigned int offs;
4024                 u32 virt_id;
4025
4026                 /*
4027                  * DLB uses producer port address bits 17:12 to determine the
4028                  * producer port ID. In Scalable IOV mode, PP accesses come
4029                  * through the PF MMIO window for the physical producer port,
4030                  * so for translation purposes the virtual and physical port
4031                  * IDs are equal.
4032                  */
4033                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4034                         virt_id = port->id.virt_id;
4035                 else
4036                         virt_id = port->id.phys_id;
4037
4038                 reg = 0;
4039                 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
4040                 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4041                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
4042
4043                 reg = 0;
4044                 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
4045                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
4046
4047                 reg = 0;
4048                 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
4049                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
4050         }
4051
4052         reg = 0;
4053         DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
4054         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
4055 }
4056
4057 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4058                                       struct dlb2_hw_domain *domain,
4059                                       struct dlb2_ldb_port *port,
4060                                       uintptr_t cq_dma_base,
4061                                       struct dlb2_create_ldb_port_args *args,
4062                                       bool vdev_req,
4063                                       unsigned int vdev_id)
4064 {
4065         u32 hl_base = 0;
4066         u32 reg = 0;
4067         u32 ds = 0;
4068
4069         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4070         DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
4071         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
4072
4073         reg = cq_dma_base >> 32;
4074         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
4075
4076         /*
4077          * 'ro' == relaxed ordering. This setting allows DLB2 to write
4078          * cache lines out-of-order (but QEs within a cache line are always
4079          * updated in-order).
4080          */
4081         reg = 0;
4082         DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
4083         DLB2_BITS_SET(reg,
4084                  !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4085                  DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
4086         DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
4087
4088         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
4089
4090         port->cq_depth = args->cq_depth;
4091
4092         if (args->cq_depth <= 8) {
4093                 ds = 1;
4094         } else if (args->cq_depth == 16) {
4095                 ds = 2;
4096         } else if (args->cq_depth == 32) {
4097                 ds = 3;
4098         } else if (args->cq_depth == 64) {
4099                 ds = 4;
4100         } else if (args->cq_depth == 128) {
4101                 ds = 5;
4102         } else if (args->cq_depth == 256) {
4103                 ds = 6;
4104         } else if (args->cq_depth == 512) {
4105                 ds = 7;
4106         } else if (args->cq_depth == 1024) {
4107                 ds = 8;
4108         } else {
4109                 DLB2_HW_ERR(hw,
4110                             "[%s():%d] Internal error: invalid CQ depth\n",
4111                             __func__, __LINE__);
4112                 return -EFAULT;
4113         }
4114
4115         reg = 0;
4116         DLB2_BITS_SET(reg, ds,
4117                       DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4118         DLB2_CSR_WR(hw,
4119                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4120                     reg);
4121
4122         /*
4123          * To support CQs with depth less than 8, program the token count
4124          * register with a non-zero initial value. Operations such as domain
4125          * reset must take this initial value into account when quiescing the
4126          * CQ.
4127          */
4128         port->init_tkn_cnt = 0;
4129
4130         if (args->cq_depth < 8) {
4131                 reg = 0;
4132                 port->init_tkn_cnt = 8 - args->cq_depth;
4133
4134                 DLB2_BITS_SET(reg,
4135                               port->init_tkn_cnt,
4136                               DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
4137                 DLB2_CSR_WR(hw,
4138                             DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4139                             reg);
4140         } else {
4141                 DLB2_CSR_WR(hw,
4142                             DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4143                             DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4144         }
4145
4146         reg = 0;
4147         DLB2_BITS_SET(reg, ds,
4148                       DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
4149         DLB2_CSR_WR(hw,
4150                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4151                     reg);
4152
4153         /* Reset the CQ write pointer */
4154         DLB2_CSR_WR(hw,
4155                     DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
4156                     DLB2_CHP_LDB_CQ_WPTR_RST);
4157
4158         reg = 0;
4159         DLB2_BITS_SET(reg,
4160                       port->hist_list_entry_limit - 1,
4161                       DLB2_CHP_HIST_LIST_LIM_LIMIT);
4162         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
4163
4164         DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
4165                       DLB2_CHP_HIST_LIST_BASE_BASE);
4166         DLB2_CSR_WR(hw,
4167                     DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
4168                     hl_base);
4169
4170         /*
4171          * The inflight limit sets a cap on the number of QEs for which this CQ
4172          * can owe completions at one time.
4173          */
4174         reg = 0;
4175         DLB2_BITS_SET(reg, args->cq_history_list_size,
4176                       DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
4177         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
4178                     reg);
4179
4180         reg = 0;
4181         DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4182                       DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
4183         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
4184                     reg);
4185
4186         reg = 0;
4187         DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4188                       DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
4189         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
4190                     reg);
4191
4192         /*
4193          * Address translation (AT) settings: 0: untranslated, 2: translated
4194          * (see ATS spec regarding Address Type field for more details)
4195          */
4196
4197         if (hw->ver == DLB2_HW_V2) {
4198                 reg = 0;
4199                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
4200         }
4201
4202         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4203                 reg = 0;
4204                 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4205                               DLB2_SYS_LDB_CQ_PASID_PASID);
4206                 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
4207         }
4208
4209         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
4210
4211         reg = 0;
4212         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
4213         DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
4214
4215         /* Disable the port's QID mappings */
4216         reg = 0;
4217         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
4218
4219         return 0;
4220 }
4221
4222 static bool
4223 dlb2_cq_depth_is_valid(u32 depth)
4224 {
4225         if (depth != 1 && depth != 2 &&
4226             depth != 4 && depth != 8 &&
4227             depth != 16 && depth != 32 &&
4228             depth != 64 && depth != 128 &&
4229             depth != 256 && depth != 512 &&
4230             depth != 1024)
4231                 return false;
4232
4233         return true;
4234 }
4235
4236 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4237                                    struct dlb2_hw_domain *domain,
4238                                    struct dlb2_ldb_port *port,
4239                                    uintptr_t cq_dma_base,
4240                                    struct dlb2_create_ldb_port_args *args,
4241                                    bool vdev_req,
4242                                    unsigned int vdev_id)
4243 {
4244         int ret, i;
4245
4246         port->hist_list_entry_base = domain->hist_list_entry_base +
4247                                      domain->hist_list_entry_offset;
4248         port->hist_list_entry_limit = port->hist_list_entry_base +
4249                                       args->cq_history_list_size;
4250
4251         domain->hist_list_entry_offset += args->cq_history_list_size;
4252         domain->avail_hist_list_entries -= args->cq_history_list_size;
4253
4254         ret = dlb2_ldb_port_configure_cq(hw,
4255                                          domain,
4256                                          port,
4257                                          cq_dma_base,
4258                                          args,
4259                                          vdev_req,
4260                                          vdev_id);
4261         if (ret)
4262                 return ret;
4263
4264         dlb2_ldb_port_configure_pp(hw,
4265                                    domain,
4266                                    port,
4267                                    vdev_req,
4268                                    vdev_id);
4269
4270         dlb2_ldb_port_cq_enable(hw, port);
4271
4272         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4273                 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4274         port->num_mappings = 0;
4275
4276         port->enabled = true;
4277
4278         port->configured = true;
4279
4280         return 0;
4281 }
4282
4283 static void
4284 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4285                               u32 domain_id,
4286                               uintptr_t cq_dma_base,
4287                               struct dlb2_create_ldb_port_args *args,
4288                               bool vdev_req,
4289                               unsigned int vdev_id)
4290 {
4291         DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4292         if (vdev_req)
4293                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4294         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4295                     domain_id);
4296         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4297                     args->cq_depth);
4298         DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
4299                     args->cq_history_list_size);
4300         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4301                     cq_dma_base);
4302         DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
4303         DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
4304                     args->cos_strict);
4305 }
4306
4307 static int
4308 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4309                                  u32 domain_id,
4310                                  uintptr_t cq_dma_base,
4311                                  struct dlb2_create_ldb_port_args *args,
4312                                  struct dlb2_cmd_response *resp,
4313                                  bool vdev_req,
4314                                  unsigned int vdev_id,
4315                                  struct dlb2_hw_domain **out_domain,
4316                                  struct dlb2_ldb_port **out_port,
4317                                  int *out_cos_id)
4318 {
4319         struct dlb2_hw_domain *domain;
4320         struct dlb2_ldb_port *port;
4321         int i, id;
4322
4323         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4324
4325         if (!domain) {
4326                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4327                 return -EINVAL;
4328         }
4329
4330         if (!domain->configured) {
4331                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4332                 return -EINVAL;
4333         }
4334
4335         if (domain->started) {
4336                 resp->status = DLB2_ST_DOMAIN_STARTED;
4337                 return -EINVAL;
4338         }
4339
4340         if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4341                 resp->status = DLB2_ST_INVALID_COS_ID;
4342                 return -EINVAL;
4343         }
4344
4345         if (args->cos_strict) {
4346                 id = args->cos_id;
4347                 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4348                                           typeof(*port));
4349         } else {
4350                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4351                         id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4352
4353                         port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4354                                                   typeof(*port));
4355                         if (port)
4356                                 break;
4357                 }
4358         }
4359
4360         if (!port) {
4361                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4362                 return -EINVAL;
4363         }
4364
4365         /* Check cache-line alignment */
4366         if ((cq_dma_base & 0x3F) != 0) {
4367                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4368                 return -EINVAL;
4369         }
4370
4371         if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4372                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4373                 return -EINVAL;
4374         }
4375
4376         /* The history list size must be >= 1 */
4377         if (!args->cq_history_list_size) {
4378                 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4379                 return -EINVAL;
4380         }
4381
4382         if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4383                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4384                 return -EINVAL;
4385         }
4386
4387         *out_domain = domain;
4388         *out_port = port;
4389         *out_cos_id = id;
4390
4391         return 0;
4392 }
4393
4394 /**
4395  * dlb2_hw_create_ldb_port() - create a load-balanced port
4396  * @hw: dlb2_hw handle for a particular device.
4397  * @domain_id: domain ID.
4398  * @args: port creation arguments.
4399  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4400  * @resp: response structure.
4401  * @vdev_req: indicates whether this request came from a vdev.
4402  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4403  *
4404  * This function creates a load-balanced port.
4405  *
4406  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4407  * device.
4408  *
4409  * Return:
4410  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4411  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4412  * contains the port ID.
4413  *
4414  * resp->id contains a virtual ID if vdev_req is true.
4415  *
4416  * Errors:
4417  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4418  *          pointer address is not properly aligned, the domain is not
4419  *          configured, or the domain has already been started.
4420  * EFAULT - Internal error (resp->status not set).
4421  */
4422 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4423                             u32 domain_id,
4424                             struct dlb2_create_ldb_port_args *args,
4425                             uintptr_t cq_dma_base,
4426                             struct dlb2_cmd_response *resp,
4427                             bool vdev_req,
4428                             unsigned int vdev_id)
4429 {
4430         struct dlb2_hw_domain *domain;
4431         struct dlb2_ldb_port *port;
4432         int ret, cos_id;
4433
4434         dlb2_log_create_ldb_port_args(hw,
4435                                       domain_id,
4436                                       cq_dma_base,
4437                                       args,
4438                                       vdev_req,
4439                                       vdev_id);
4440
4441         /*
4442          * Verify that hardware resources are available before attempting to
4443          * satisfy the request. This simplifies the error unwinding code.
4444          */
4445         ret = dlb2_verify_create_ldb_port_args(hw,
4446                                                domain_id,
4447                                                cq_dma_base,
4448                                                args,
4449                                                resp,
4450                                                vdev_req,
4451                                                vdev_id,
4452                                                &domain,
4453                                                &port,
4454                                                &cos_id);
4455         if (ret)
4456                 return ret;
4457
4458         ret = dlb2_configure_ldb_port(hw,
4459                                       domain,
4460                                       port,
4461                                       cq_dma_base,
4462                                       args,
4463                                       vdev_req,
4464                                       vdev_id);
4465         if (ret)
4466                 return ret;
4467
4468         /*
4469          * Configuration succeeded, so move the resource from the 'avail' to
4470          * the 'used' list.
4471          */
4472         dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4473
4474         dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4475
4476         resp->status = 0;
4477         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4478
4479         return 0;
4480 }
4481
4482 static void
4483 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4484                               u32 domain_id,
4485                               uintptr_t cq_dma_base,
4486                               struct dlb2_create_dir_port_args *args,
4487                               bool vdev_req,
4488                               unsigned int vdev_id)
4489 {
4490         DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4491         if (vdev_req)
4492                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4493         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4494                     domain_id);
4495         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4496                     args->cq_depth);
4497         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4498                     cq_dma_base);
4499 }
4500
4501 static struct dlb2_dir_pq_pair *
4502 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
4503                             u32 id,
4504                             bool vdev_req,
4505                             struct dlb2_hw_domain *domain)
4506 {
4507         struct dlb2_list_entry *iter;
4508         struct dlb2_dir_pq_pair *port;
4509         RTE_SET_USED(iter);
4510
4511         if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
4512                 return NULL;
4513
4514         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
4515                 if ((!vdev_req && port->id.phys_id == id) ||
4516                     (vdev_req && port->id.virt_id == id))
4517                         return port;
4518         }
4519
4520         return NULL;
4521 }
4522
4523 static int
4524 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4525                                  u32 domain_id,
4526                                  uintptr_t cq_dma_base,
4527                                  struct dlb2_create_dir_port_args *args,
4528                                  struct dlb2_cmd_response *resp,
4529                                  bool vdev_req,
4530                                  unsigned int vdev_id,
4531                                  struct dlb2_hw_domain **out_domain,
4532                                  struct dlb2_dir_pq_pair **out_port)
4533 {
4534         struct dlb2_hw_domain *domain;
4535         struct dlb2_dir_pq_pair *pq;
4536
4537         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4538
4539         if (!domain) {
4540                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4541                 return -EINVAL;
4542         }
4543
4544         if (!domain->configured) {
4545                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4546                 return -EINVAL;
4547         }
4548
4549         if (domain->started) {
4550                 resp->status = DLB2_ST_DOMAIN_STARTED;
4551                 return -EINVAL;
4552         }
4553
4554         if (args->queue_id != -1) {
4555                 /*
4556                  * If the user claims the queue is already configured, validate
4557                  * the queue ID, its domain, and whether the queue is
4558                  * configured.
4559                  */
4560                 pq = dlb2_get_domain_used_dir_pq(hw,
4561                                                  args->queue_id,
4562                                                  vdev_req,
4563                                                  domain);
4564
4565                 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4566                     !pq->queue_configured) {
4567                         resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4568                         return -EINVAL;
4569                 }
4570         } else {
4571                 /*
4572                  * If the port's queue is not configured, validate that a free
4573                  * port-queue pair is available.
4574                  */
4575                 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4576                                         typeof(*pq));
4577                 if (!pq) {
4578                         resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4579                         return -EINVAL;
4580                 }
4581         }
4582
4583         /* Check cache-line alignment */
4584         if ((cq_dma_base & 0x3F) != 0) {
4585                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4586                 return -EINVAL;
4587         }
4588
4589         if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4590                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4591                 return -EINVAL;
4592         }
4593
4594         *out_domain = domain;
4595         *out_port = pq;
4596
4597         return 0;
4598 }
4599
4600 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4601                                        struct dlb2_hw_domain *domain,
4602                                        struct dlb2_dir_pq_pair *port,
4603                                        bool vdev_req,
4604                                        unsigned int vdev_id)
4605 {
4606         u32 reg = 0;
4607
4608         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_DIR_PP2VAS_VAS);
4609         DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), reg);
4610
4611         if (vdev_req) {
4612                 unsigned int offs;
4613                 u32 virt_id;
4614
4615                 /*
4616                  * DLB uses producer port address bits 17:12 to determine the
4617                  * producer port ID. In Scalable IOV mode, PP accesses come
4618                  * through the PF MMIO window for the physical producer port,
4619                  * so for translation purposes the virtual and physical port
4620                  * IDs are equal.
4621                  */
4622                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4623                         virt_id = port->id.virt_id;
4624                 else
4625                         virt_id = port->id.phys_id;
4626
4627                 reg = 0;
4628                 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_DIR_VPP2PP_PP);
4629                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
4630                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), reg);
4631
4632                 reg = 0;
4633                 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_PP2VDEV_VDEV);
4634                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VDEV(port->id.phys_id), reg);
4635
4636                 reg = 0;
4637                 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VPP_V_VPP_V);
4638                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), reg);
4639         }
4640
4641         reg = 0;
4642         DLB2_BIT_SET(reg, DLB2_SYS_DIR_PP_V_PP_V);
4643         DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP_V(port->id.phys_id), reg);
4644 }
4645
4646 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4647                                       struct dlb2_hw_domain *domain,
4648                                       struct dlb2_dir_pq_pair *port,
4649                                       uintptr_t cq_dma_base,
4650                                       struct dlb2_create_dir_port_args *args,
4651                                       bool vdev_req,
4652                                       unsigned int vdev_id)
4653 {
4654         u32 reg = 0;
4655         u32 ds = 0;
4656
4657         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4658         DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_DIR_CQ_ADDR_L_ADDR_L);
4659         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), reg);
4660
4661         reg = cq_dma_base >> 32;
4662         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), reg);
4663
4664         /*
4665          * 'ro' == relaxed ordering. This setting allows DLB2 to write
4666          * cache lines out-of-order (but QEs within a cache line are always
4667          * updated in-order).
4668          */
4669         reg = 0;
4670         DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_CQ2VF_PF_RO_VF);
4671         DLB2_BITS_SET(reg, !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4672                  DLB2_SYS_DIR_CQ2VF_PF_RO_IS_PF);
4673         DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ2VF_PF_RO_RO);
4674
4675         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), reg);
4676
4677         if (args->cq_depth <= 8) {
4678                 ds = 1;
4679         } else if (args->cq_depth == 16) {
4680                 ds = 2;
4681         } else if (args->cq_depth == 32) {
4682                 ds = 3;
4683         } else if (args->cq_depth == 64) {
4684                 ds = 4;
4685         } else if (args->cq_depth == 128) {
4686                 ds = 5;
4687         } else if (args->cq_depth == 256) {
4688                 ds = 6;
4689         } else if (args->cq_depth == 512) {
4690                 ds = 7;
4691         } else if (args->cq_depth == 1024) {
4692                 ds = 8;
4693         } else {
4694                 DLB2_HW_ERR(hw,
4695                             "[%s():%d] Internal error: invalid CQ depth\n",
4696                             __func__, __LINE__);
4697                 return -EFAULT;
4698         }
4699
4700         reg = 0;
4701         DLB2_BITS_SET(reg, ds,
4702                       DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4703         DLB2_CSR_WR(hw,
4704                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4705                     reg);
4706
4707         /*
4708          * To support CQs with depth less than 8, program the token count
4709          * register with a non-zero initial value. Operations such as domain
4710          * reset must take this initial value into account when quiescing the
4711          * CQ.
4712          */
4713         port->init_tkn_cnt = 0;
4714
4715         if (args->cq_depth < 8) {
4716                 reg = 0;
4717                 port->init_tkn_cnt = 8 - args->cq_depth;
4718
4719                 DLB2_BITS_SET(reg, port->init_tkn_cnt,
4720                               DLB2_LSP_CQ_DIR_TKN_CNT_COUNT);
4721                 DLB2_CSR_WR(hw,
4722                             DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4723                             reg);
4724         } else {
4725                 DLB2_CSR_WR(hw,
4726                             DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4727                             DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4728         }
4729
4730         reg = 0;
4731         DLB2_BITS_SET(reg, ds,
4732                       DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_TOKEN_DEPTH_SELECT_V2);
4733         DLB2_CSR_WR(hw,
4734                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
4735                                                       port->id.phys_id),
4736                     reg);
4737
4738         /* Reset the CQ write pointer */
4739         DLB2_CSR_WR(hw,
4740                     DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
4741                     DLB2_CHP_DIR_CQ_WPTR_RST);
4742
4743         /* Virtualize the PPID */
4744         reg = 0;
4745         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), reg);
4746
4747         /*
4748          * Address translation (AT) settings: 0: untranslated, 2: translated
4749          * (see ATS spec regarding Address Type field for more details)
4750          */
4751         if (hw->ver == DLB2_HW_V2) {
4752                 reg = 0;
4753                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), reg);
4754         }
4755
4756         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4757                 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4758                               DLB2_SYS_DIR_CQ_PASID_PASID);
4759                 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ_PASID_FMT2);
4760         }
4761
4762         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id), reg);
4763
4764         reg = 0;
4765         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_DIR_CQ2VAS_CQ2VAS);
4766         DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id), reg);
4767
4768         return 0;
4769 }
4770
4771 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
4772                                    struct dlb2_hw_domain *domain,
4773                                    struct dlb2_dir_pq_pair *port,
4774                                    uintptr_t cq_dma_base,
4775                                    struct dlb2_create_dir_port_args *args,
4776                                    bool vdev_req,
4777                                    unsigned int vdev_id)
4778 {
4779         int ret;
4780
4781         ret = dlb2_dir_port_configure_cq(hw,
4782                                          domain,
4783                                          port,
4784                                          cq_dma_base,
4785                                          args,
4786                                          vdev_req,
4787                                          vdev_id);
4788
4789         if (ret)
4790                 return ret;
4791
4792         dlb2_dir_port_configure_pp(hw,
4793                                    domain,
4794                                    port,
4795                                    vdev_req,
4796                                    vdev_id);
4797
4798         dlb2_dir_port_cq_enable(hw, port);
4799
4800         port->enabled = true;
4801
4802         port->port_configured = true;
4803
4804         return 0;
4805 }
4806
4807 /**
4808  * dlb2_hw_create_dir_port() - create a directed port
4809  * @hw: dlb2_hw handle for a particular device.
4810  * @domain_id: domain ID.
4811  * @args: port creation arguments.
4812  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4813  * @resp: response structure.
4814  * @vdev_req: indicates whether this request came from a vdev.
4815  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4816  *
4817  * This function creates a directed port.
4818  *
4819  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4820  * device.
4821  *
4822  * Return:
4823  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4824  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4825  * contains the port ID.
4826  *
4827  * resp->id contains a virtual ID if vdev_req is true.
4828  *
4829  * Errors:
4830  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4831  *          pointer address is not properly aligned, the domain is not
4832  *          configured, or the domain has already been started.
4833  * EFAULT - Internal error (resp->status not set).
4834  */
4835 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
4836                             u32 domain_id,
4837                             struct dlb2_create_dir_port_args *args,
4838                             uintptr_t cq_dma_base,
4839                             struct dlb2_cmd_response *resp,
4840                             bool vdev_req,
4841                             unsigned int vdev_id)
4842 {
4843         struct dlb2_dir_pq_pair *port;
4844         struct dlb2_hw_domain *domain;
4845         int ret;
4846
4847         dlb2_log_create_dir_port_args(hw,
4848                                       domain_id,
4849                                       cq_dma_base,
4850                                       args,
4851                                       vdev_req,
4852                                       vdev_id);
4853
4854         /*
4855          * Verify that hardware resources are available before attempting to
4856          * satisfy the request. This simplifies the error unwinding code.
4857          */
4858         ret = dlb2_verify_create_dir_port_args(hw,
4859                                                domain_id,
4860                                                cq_dma_base,
4861                                                args,
4862                                                resp,
4863                                                vdev_req,
4864                                                vdev_id,
4865                                                &domain,
4866                                                &port);
4867         if (ret)
4868                 return ret;
4869
4870         ret = dlb2_configure_dir_port(hw,
4871                                       domain,
4872                                       port,
4873                                       cq_dma_base,
4874                                       args,
4875                                       vdev_req,
4876                                       vdev_id);
4877         if (ret)
4878                 return ret;
4879
4880         /*
4881          * Configuration succeeded, so move the resource from the 'avail' to
4882          * the 'used' list (if it's not already there).
4883          */
4884         if (args->queue_id == -1) {
4885                 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
4886
4887                 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
4888         }
4889
4890         resp->status = 0;
4891         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4892
4893         return 0;
4894 }
4895
4896 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
4897                                      struct dlb2_hw_domain *domain,
4898                                      struct dlb2_dir_pq_pair *queue,
4899                                      struct dlb2_create_dir_queue_args *args,
4900                                      bool vdev_req,
4901                                      unsigned int vdev_id)
4902 {
4903         unsigned int offs;
4904         u32 reg = 0;
4905
4906         /* QID write permissions are turned on when the domain is started */
4907         offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4908                 queue->id.phys_id;
4909
4910         DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), reg);
4911
4912         /* Don't timestamp QEs that pass through this queue */
4913         DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_ITS(queue->id.phys_id), reg);
4914
4915         reg = 0;
4916         DLB2_BITS_SET(reg, args->depth_threshold,
4917                       DLB2_LSP_QID_DIR_DEPTH_THRSH_THRESH);
4918         DLB2_CSR_WR(hw,
4919                     DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver, queue->id.phys_id),
4920                     reg);
4921
4922         if (vdev_req) {
4923                 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4924                         queue->id.virt_id;
4925
4926                 reg = 0;
4927                 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VQID_V_VQID_V);
4928                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), reg);
4929
4930                 reg = 0;
4931                 DLB2_BITS_SET(reg, queue->id.phys_id,
4932                               DLB2_SYS_VF_DIR_VQID2QID_QID);
4933                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), reg);
4934         }
4935
4936         reg = 0;
4937         DLB2_BIT_SET(reg, DLB2_SYS_DIR_QID_V_QID_V);
4938         DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), reg);
4939
4940         queue->queue_configured = true;
4941 }
4942
4943 static void
4944 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
4945                                u32 domain_id,
4946                                struct dlb2_create_dir_queue_args *args,
4947                                bool vdev_req,
4948                                unsigned int vdev_id)
4949 {
4950         DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
4951         if (vdev_req)
4952                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4953         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
4954         DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
4955 }
4956
4957 static int
4958 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
4959                                   u32 domain_id,
4960                                   struct dlb2_create_dir_queue_args *args,
4961                                   struct dlb2_cmd_response *resp,
4962                                   bool vdev_req,
4963                                   unsigned int vdev_id,
4964                                   struct dlb2_hw_domain **out_domain,
4965                                   struct dlb2_dir_pq_pair **out_queue)
4966 {
4967         struct dlb2_hw_domain *domain;
4968         struct dlb2_dir_pq_pair *pq;
4969
4970         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4971
4972         if (!domain) {
4973                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4974                 return -EINVAL;
4975         }
4976
4977         if (!domain->configured) {
4978                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4979                 return -EINVAL;
4980         }
4981
4982         if (domain->started) {
4983                 resp->status = DLB2_ST_DOMAIN_STARTED;
4984                 return -EINVAL;
4985         }
4986
4987         /*
4988          * If the user claims the port is already configured, validate the port
4989          * ID, its domain, and whether the port is configured.
4990          */
4991         if (args->port_id != -1) {
4992                 pq = dlb2_get_domain_used_dir_pq(hw,
4993                                                  args->port_id,
4994                                                  vdev_req,
4995                                                  domain);
4996
4997                 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4998                     !pq->port_configured) {
4999                         resp->status = DLB2_ST_INVALID_PORT_ID;
5000                         return -EINVAL;
5001                 }
5002         } else {
5003                 /*
5004                  * If the queue's port is not configured, validate that a free
5005                  * port-queue pair is available.
5006                  */
5007                 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
5008                                         typeof(*pq));
5009                 if (!pq) {
5010                         resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
5011                         return -EINVAL;
5012                 }
5013         }
5014
5015         *out_domain = domain;
5016         *out_queue = pq;
5017
5018         return 0;
5019 }
5020
5021 /**
5022  * dlb2_hw_create_dir_queue() - create a directed queue
5023  * @hw: dlb2_hw handle for a particular device.
5024  * @domain_id: domain ID.
5025  * @args: queue creation arguments.
5026  * @resp: response structure.
5027  * @vdev_req: indicates whether this request came from a vdev.
5028  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5029  *
5030  * This function creates a directed queue.
5031  *
5032  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5033  * device.
5034  *
5035  * Return:
5036  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5037  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5038  * contains the queue ID.
5039  *
5040  * resp->id contains a virtual ID if vdev_req is true.
5041  *
5042  * Errors:
5043  * EINVAL - A requested resource is unavailable, the domain is not configured,
5044  *          or the domain has already been started.
5045  * EFAULT - Internal error (resp->status not set).
5046  */
5047 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5048                              u32 domain_id,
5049                              struct dlb2_create_dir_queue_args *args,
5050                              struct dlb2_cmd_response *resp,
5051                              bool vdev_req,
5052                              unsigned int vdev_id)
5053 {
5054         struct dlb2_dir_pq_pair *queue;
5055         struct dlb2_hw_domain *domain;
5056         int ret;
5057
5058         dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5059
5060         /*
5061          * Verify that hardware resources are available before attempting to
5062          * satisfy the request. This simplifies the error unwinding code.
5063          */
5064         ret = dlb2_verify_create_dir_queue_args(hw,
5065                                                 domain_id,
5066                                                 args,
5067                                                 resp,
5068                                                 vdev_req,
5069                                                 vdev_id,
5070                                                 &domain,
5071                                                 &queue);
5072         if (ret)
5073                 return ret;
5074
5075         dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5076
5077         /*
5078          * Configuration succeeded, so move the resource from the 'avail' to
5079          * the 'used' list (if it's not already there).
5080          */
5081         if (args->port_id == -1) {
5082                 dlb2_list_del(&domain->avail_dir_pq_pairs,
5083                               &queue->domain_list);
5084
5085                 dlb2_list_add(&domain->used_dir_pq_pairs,
5086                               &queue->domain_list);
5087         }
5088
5089         resp->status = 0;
5090
5091         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5092
5093         return 0;
5094 }
5095
5096 static bool
5097 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5098                                            struct dlb2_ldb_queue *queue,
5099                                            int *slot)
5100 {
5101         int i;
5102
5103         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5104                 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5105
5106                 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5107                     map->pending_qid == queue->id.phys_id)
5108                         break;
5109         }
5110
5111         *slot = i;
5112
5113         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5114 }
5115
5116 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5117                                               struct dlb2_ldb_queue *queue,
5118                                               struct dlb2_cmd_response *resp)
5119 {
5120         enum dlb2_qid_map_state state;
5121         int i;
5122
5123         /* Unused slot available? */
5124         if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5125                 return 0;
5126
5127         /*
5128          * If the queue is already mapped (from the application's perspective),
5129          * this is simply a priority update.
5130          */
5131         state = DLB2_QUEUE_MAPPED;
5132         if (dlb2_port_find_slot_queue(port, state, queue, &i))
5133                 return 0;
5134
5135         state = DLB2_QUEUE_MAP_IN_PROG;
5136         if (dlb2_port_find_slot_queue(port, state, queue, &i))
5137                 return 0;
5138
5139         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5140                 return 0;
5141
5142         /*
5143          * If the slot contains an unmap in progress, it's considered
5144          * available.
5145          */
5146         state = DLB2_QUEUE_UNMAP_IN_PROG;
5147         if (dlb2_port_find_slot(port, state, &i))
5148                 return 0;
5149
5150         state = DLB2_QUEUE_UNMAPPED;
5151         if (dlb2_port_find_slot(port, state, &i))
5152                 return 0;
5153
5154         resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5155         return -EINVAL;
5156 }
5157
5158 static struct dlb2_ldb_queue *
5159 dlb2_get_domain_ldb_queue(u32 id,
5160                           bool vdev_req,
5161                           struct dlb2_hw_domain *domain)
5162 {
5163         struct dlb2_list_entry *iter;
5164         struct dlb2_ldb_queue *queue;
5165         RTE_SET_USED(iter);
5166
5167         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5168                 return NULL;
5169
5170         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
5171                 if ((!vdev_req && queue->id.phys_id == id) ||
5172                     (vdev_req && queue->id.virt_id == id))
5173                         return queue;
5174         }
5175
5176         return NULL;
5177 }
5178
5179 static struct dlb2_ldb_port *
5180 dlb2_get_domain_used_ldb_port(u32 id,
5181                               bool vdev_req,
5182                               struct dlb2_hw_domain *domain)
5183 {
5184         struct dlb2_list_entry *iter;
5185         struct dlb2_ldb_port *port;
5186         int i;
5187         RTE_SET_USED(iter);
5188
5189         if (id >= DLB2_MAX_NUM_LDB_PORTS)
5190                 return NULL;
5191
5192         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5193                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
5194                         if ((!vdev_req && port->id.phys_id == id) ||
5195                             (vdev_req && port->id.virt_id == id))
5196                                 return port;
5197                 }
5198
5199                 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter) {
5200                         if ((!vdev_req && port->id.phys_id == id) ||
5201                             (vdev_req && port->id.virt_id == id))
5202                                 return port;
5203                 }
5204         }
5205
5206         return NULL;
5207 }
5208
5209 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5210                                               struct dlb2_ldb_port *port,
5211                                               int slot,
5212                                               struct dlb2_map_qid_args *args)
5213 {
5214         u32 cq2priov;
5215
5216         /* Read-modify-write the priority and valid bit register */
5217         cq2priov = DLB2_CSR_RD(hw,
5218                                DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id));
5219
5220         cq2priov |= (1 << (slot + DLB2_LSP_CQ2PRIOV_V_LOC)) &
5221                     DLB2_LSP_CQ2PRIOV_V;
5222         cq2priov |= ((args->priority & 0x7) << slot * 3) &
5223                     DLB2_LSP_CQ2PRIOV_PRIO;
5224
5225         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), cq2priov);
5226
5227         dlb2_flush_csr(hw);
5228
5229         port->qid_map[slot].priority = args->priority;
5230 }
5231
5232 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5233                                     u32 domain_id,
5234                                     struct dlb2_map_qid_args *args,
5235                                     struct dlb2_cmd_response *resp,
5236                                     bool vdev_req,
5237                                     unsigned int vdev_id,
5238                                     struct dlb2_hw_domain **out_domain,
5239                                     struct dlb2_ldb_port **out_port,
5240                                     struct dlb2_ldb_queue **out_queue)
5241 {
5242         struct dlb2_hw_domain *domain;
5243         struct dlb2_ldb_queue *queue;
5244         struct dlb2_ldb_port *port;
5245         int id;
5246
5247         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5248
5249         if (!domain) {
5250                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5251                 return -EINVAL;
5252         }
5253
5254         if (!domain->configured) {
5255                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5256                 return -EINVAL;
5257         }
5258
5259         id = args->port_id;
5260
5261         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5262
5263         if (!port || !port->configured) {
5264                 resp->status = DLB2_ST_INVALID_PORT_ID;
5265                 return -EINVAL;
5266         }
5267
5268         if (args->priority >= DLB2_QID_PRIORITIES) {
5269                 resp->status = DLB2_ST_INVALID_PRIORITY;
5270                 return -EINVAL;
5271         }
5272
5273         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5274
5275         if (!queue || !queue->configured) {
5276                 resp->status = DLB2_ST_INVALID_QID;
5277                 return -EINVAL;
5278         }
5279
5280         if (queue->domain_id.phys_id != domain->id.phys_id) {
5281                 resp->status = DLB2_ST_INVALID_QID;
5282                 return -EINVAL;
5283         }
5284
5285         if (port->domain_id.phys_id != domain->id.phys_id) {
5286                 resp->status = DLB2_ST_INVALID_PORT_ID;
5287                 return -EINVAL;
5288         }
5289
5290         *out_domain = domain;
5291         *out_queue = queue;
5292         *out_port = port;
5293
5294         return 0;
5295 }
5296
5297 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5298                              u32 domain_id,
5299                              struct dlb2_map_qid_args *args,
5300                              bool vdev_req,
5301                              unsigned int vdev_id)
5302 {
5303         DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5304         if (vdev_req)
5305                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5306         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5307                     domain_id);
5308         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5309                     args->port_id);
5310         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5311                     args->qid);
5312         DLB2_HW_DBG(hw, "\tPriority:  %d\n",
5313                     args->priority);
5314 }
5315
5316 /**
5317  * dlb2_hw_map_qid() - map a load-balanced queue to a load-balanced port
5318  * @hw: dlb2_hw handle for a particular device.
5319  * @domain_id: domain ID.
5320  * @args: map QID arguments.
5321  * @resp: response structure.
5322  * @vdev_req: indicates whether this request came from a vdev.
5323  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5324  *
5325  * This function configures the DLB to schedule QEs from the specified queue
5326  * to the specified port. Each load-balanced port can be mapped to up to 8
5327  * queues; each load-balanced queue can potentially map to all the
5328  * load-balanced ports.
5329  *
5330  * A successful return does not necessarily mean the mapping was configured. If
5331  * this function is unable to immediately map the queue to the port, it will
5332  * add the requested operation to a per-port list of pending map/unmap
5333  * operations, and (if it's not already running) launch a kernel thread that
5334  * periodically attempts to process all pending operations. In a sense, this is
5335  * an asynchronous function.
5336  *
5337  * This asynchronicity creates two views of the state of hardware: the actual
5338  * hardware state and the requested state (as if every request completed
5339  * immediately). If there are any pending map/unmap operations, the requested
5340  * state will differ from the actual state. All validation is performed with
5341  * respect to the pending state; for instance, if there are 8 pending map
5342  * operations for port X, a request for a 9th will fail because a load-balanced
5343  * port can only map up to 8 queues.
5344  *
5345  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5346  * device.
5347  *
5348  * Return:
5349  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5350  * assigned a detailed error code from enum dlb2_error.
5351  *
5352  * Errors:
5353  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5354  *          the domain is not configured.
5355  * EFAULT - Internal error (resp->status not set).
5356  * EBUSY  - The requested port has outstanding detach operations.
5357  */
5358 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5359                     u32 domain_id,
5360                     struct dlb2_map_qid_args *args,
5361                     struct dlb2_cmd_response *resp,
5362                     bool vdev_req,
5363                     unsigned int vdev_id)
5364 {
5365         struct dlb2_hw_domain *domain;
5366         struct dlb2_ldb_queue *queue;
5367         enum dlb2_qid_map_state st;
5368         struct dlb2_ldb_port *port;
5369         int ret, i;
5370         u8 prio;
5371
5372         dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5373
5374         /*
5375          * Verify that hardware resources are available before attempting to
5376          * satisfy the request. This simplifies the error unwinding code.
5377          */
5378         ret = dlb2_verify_map_qid_args(hw,
5379                                        domain_id,
5380                                        args,
5381                                        resp,
5382                                        vdev_req,
5383                                        vdev_id,
5384                                        &domain,
5385                                        &port,
5386                                        &queue);
5387         if (ret)
5388                 return ret;
5389
5390         prio = args->priority;
5391
5392         /*
5393          * If there are any outstanding detach operations for this port,
5394          * attempt to complete them. This may be necessary to free up a QID
5395          * slot for this requested mapping.
5396          */
5397         if (port->num_pending_removals) {
5398                 bool bool_ret;
5399                 bool_ret = dlb2_domain_finish_unmap_port(hw, domain, port);
5400                 if (!bool_ret)
5401                         return -EBUSY;
5402         }
5403
5404         ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5405         if (ret)
5406                 return ret;
5407
5408         /* Hardware requires disabling the CQ before mapping QIDs. */
5409         if (port->enabled)
5410                 dlb2_ldb_port_cq_disable(hw, port);
5411
5412         /*
5413          * If this is only a priority change, don't perform the full QID->CQ
5414          * mapping procedure
5415          */
5416         st = DLB2_QUEUE_MAPPED;
5417         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5418                 if (prio != port->qid_map[i].priority) {
5419                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5420                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5421                 }
5422
5423                 st = DLB2_QUEUE_MAPPED;
5424                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5425                 if (ret)
5426                         return ret;
5427
5428                 goto map_qid_done;
5429         }
5430
5431         st = DLB2_QUEUE_UNMAP_IN_PROG;
5432         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5433                 if (prio != port->qid_map[i].priority) {
5434                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5435                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5436                 }
5437
5438                 st = DLB2_QUEUE_MAPPED;
5439                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5440                 if (ret)
5441                         return ret;
5442
5443                 goto map_qid_done;
5444         }
5445
5446         /*
5447          * If this is a priority change on an in-progress mapping, don't
5448          * perform the full QID->CQ mapping procedure.
5449          */
5450         st = DLB2_QUEUE_MAP_IN_PROG;
5451         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5452                 port->qid_map[i].priority = prio;
5453
5454                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5455
5456                 goto map_qid_done;
5457         }
5458
5459         /*
5460          * If this is a priority change on a pending mapping, update the
5461          * pending priority
5462          */
5463         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5464                 port->qid_map[i].pending_priority = prio;
5465
5466                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5467
5468                 goto map_qid_done;
5469         }
5470
5471         /*
5472          * If all the CQ's slots are in use, then there's an unmap in progress
5473          * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5474          * mapping to pending_map and return. When the removal is completed for
5475          * the slot's current occupant, this mapping will be performed.
5476          */
5477         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5478                 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5479                         enum dlb2_qid_map_state new_st;
5480
5481                         port->qid_map[i].pending_qid = queue->id.phys_id;
5482                         port->qid_map[i].pending_priority = prio;
5483
5484                         new_st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5485
5486                         ret = dlb2_port_slot_state_transition(hw, port, queue,
5487                                                               i, new_st);
5488                         if (ret)
5489                                 return ret;
5490
5491                         DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5492
5493                         goto map_qid_done;
5494                 }
5495         }
5496
5497         /*
5498          * If the domain has started, a special "dynamic" CQ->queue mapping
5499          * procedure is required in order to safely update the CQ<->QID tables.
5500          * The "static" procedure cannot be used when traffic is flowing,
5501          * because the CQ<->QID tables cannot be updated atomically and the
5502          * scheduler won't see the new mapping unless the queue's if_status
5503          * changes, which isn't guaranteed.
5504          */
5505         ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5506
5507         /* If ret is less than zero, it's due to an internal error */
5508         if (ret < 0)
5509                 return ret;
5510
5511 map_qid_done:
5512         if (port->enabled)
5513                 dlb2_ldb_port_cq_enable(hw, port);
5514
5515         resp->status = 0;
5516
5517         return 0;
5518 }
5519
5520 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5521                                u32 domain_id,
5522                                struct dlb2_unmap_qid_args *args,
5523                                bool vdev_req,
5524                                unsigned int vdev_id)
5525 {
5526         DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5527         if (vdev_req)
5528                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5529         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5530                     domain_id);
5531         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5532                     args->port_id);
5533         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5534                     args->qid);
5535         if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5536                 DLB2_HW_DBG(hw, "\tQueue's num mappings:  %d\n",
5537                             hw->rsrcs.ldb_queues[args->qid].num_mappings);
5538 }
5539
5540 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5541                                       u32 domain_id,
5542                                       struct dlb2_unmap_qid_args *args,
5543                                       struct dlb2_cmd_response *resp,
5544                                       bool vdev_req,
5545                                       unsigned int vdev_id,
5546                                       struct dlb2_hw_domain **out_domain,
5547                                       struct dlb2_ldb_port **out_port,
5548                                       struct dlb2_ldb_queue **out_queue)
5549 {
5550         enum dlb2_qid_map_state state;
5551         struct dlb2_hw_domain *domain;
5552         struct dlb2_ldb_queue *queue;
5553         struct dlb2_ldb_port *port;
5554         int slot;
5555         int id;
5556
5557         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5558
5559         if (!domain) {
5560                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5561                 return -EINVAL;
5562         }
5563
5564         if (!domain->configured) {
5565                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5566                 return -EINVAL;
5567         }
5568
5569         id = args->port_id;
5570
5571         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5572
5573         if (!port || !port->configured) {
5574                 resp->status = DLB2_ST_INVALID_PORT_ID;
5575                 return -EINVAL;
5576         }
5577
5578         if (port->domain_id.phys_id != domain->id.phys_id) {
5579                 resp->status = DLB2_ST_INVALID_PORT_ID;
5580                 return -EINVAL;
5581         }
5582
5583         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5584
5585         if (!queue || !queue->configured) {
5586                 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5587                             __func__, args->qid);
5588                 resp->status = DLB2_ST_INVALID_QID;
5589                 return -EINVAL;
5590         }
5591
5592         /*
5593          * Verify that the port has the queue mapped. From the application's
5594          * perspective a queue is mapped if it is actually mapped, the map is
5595          * in progress, or the map is blocked pending an unmap.
5596          */
5597         state = DLB2_QUEUE_MAPPED;
5598         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5599                 goto done;
5600
5601         state = DLB2_QUEUE_MAP_IN_PROG;
5602         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5603                 goto done;
5604
5605         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5606                 goto done;
5607
5608         resp->status = DLB2_ST_INVALID_QID;
5609         return -EINVAL;
5610
5611 done:
5612         *out_domain = domain;
5613         *out_port = port;
5614         *out_queue = queue;
5615
5616         return 0;
5617 }
5618
5619 /**
5620  * dlb2_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
5621  * @hw: dlb2_hw handle for a particular device.
5622  * @domain_id: domain ID.
5623  * @args: unmap QID arguments.
5624  * @resp: response structure.
5625  * @vdev_req: indicates whether this request came from a vdev.
5626  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5627  *
5628  * This function configures the DLB to stop scheduling QEs from the specified
5629  * queue to the specified port.
5630  *
5631  * A successful return does not necessarily mean the mapping was removed. If
5632  * this function is unable to immediately unmap the queue from the port, it
5633  * will add the requested operation to a per-port list of pending map/unmap
5634  * operations, and (if it's not already running) launch a kernel thread that
5635  * periodically attempts to process all pending operations. See
5636  * dlb2_hw_map_qid() for more details.
5637  *
5638  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5639  * device.
5640  *
5641  * Return:
5642  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5643  * assigned a detailed error code from enum dlb2_error.
5644  *
5645  * Errors:
5646  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5647  *          the domain is not configured.
5648  * EFAULT - Internal error (resp->status not set).
5649  */
5650 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5651                       u32 domain_id,
5652                       struct dlb2_unmap_qid_args *args,
5653                       struct dlb2_cmd_response *resp,
5654                       bool vdev_req,
5655                       unsigned int vdev_id)
5656 {
5657         struct dlb2_hw_domain *domain;
5658         struct dlb2_ldb_queue *queue;
5659         enum dlb2_qid_map_state st;
5660         struct dlb2_ldb_port *port;
5661         bool unmap_complete;
5662         int i, ret;
5663
5664         dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5665
5666         /*
5667          * Verify that hardware resources are available before attempting to
5668          * satisfy the request. This simplifies the error unwinding code.
5669          */
5670         ret = dlb2_verify_unmap_qid_args(hw,
5671                                          domain_id,
5672                                          args,
5673                                          resp,
5674                                          vdev_req,
5675                                          vdev_id,
5676                                          &domain,
5677                                          &port,
5678                                          &queue);
5679         if (ret)
5680                 return ret;
5681
5682         /*
5683          * If the queue hasn't been mapped yet, we need to update the slot's
5684          * state and re-enable the queue's inflights.
5685          */
5686         st = DLB2_QUEUE_MAP_IN_PROG;
5687         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5688                 /*
5689                  * Since the in-progress map was aborted, re-enable the QID's
5690                  * inflights.
5691                  */
5692                 if (queue->num_pending_additions == 0)
5693                         dlb2_ldb_queue_set_inflight_limit(hw, queue);
5694
5695                 st = DLB2_QUEUE_UNMAPPED;
5696                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5697                 if (ret)
5698                         return ret;
5699
5700                 goto unmap_qid_done;
5701         }
5702
5703         /*
5704          * If the queue mapping is on hold pending an unmap, we simply need to
5705          * update the slot's state.
5706          */
5707         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5708                 st = DLB2_QUEUE_UNMAP_IN_PROG;
5709                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5710                 if (ret)
5711                         return ret;
5712
5713                 goto unmap_qid_done;
5714         }
5715
5716         st = DLB2_QUEUE_MAPPED;
5717         if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5718                 DLB2_HW_ERR(hw,
5719                             "[%s()] Internal error: no available CQ slots\n",
5720                             __func__);
5721                 return -EFAULT;
5722         }
5723
5724         /*
5725          * QID->CQ mapping removal is an asynchronous procedure. It requires
5726          * stopping the DLB2 from scheduling this CQ, draining all inflights
5727          * from the CQ, then unmapping the queue from the CQ. This function
5728          * simply marks the port as needing the queue unmapped, and (if
5729          * necessary) starts the unmapping worker thread.
5730          */
5731         dlb2_ldb_port_cq_disable(hw, port);
5732
5733         st = DLB2_QUEUE_UNMAP_IN_PROG;
5734         ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5735         if (ret)
5736                 return ret;
5737
5738         /*
5739          * Attempt to finish the unmapping now, in case the port has no
5740          * outstanding inflights. If that's not the case, this will fail and
5741          * the unmapping will be completed at a later time.
5742          */
5743         unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
5744
5745         /*
5746          * If the unmapping couldn't complete immediately, launch the worker
5747          * thread (if it isn't already launched) to finish it later.
5748          */
5749         if (!unmap_complete && !os_worker_active(hw))
5750                 os_schedule_work(hw);
5751
5752 unmap_qid_done:
5753         resp->status = 0;
5754
5755         return 0;
5756 }
5757
5758 static void
5759 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
5760                                   struct dlb2_pending_port_unmaps_args *args,
5761                                   bool vdev_req,
5762                                   unsigned int vdev_id)
5763 {
5764         DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
5765         if (vdev_req)
5766                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
5767         DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
5768 }
5769
5770 /**
5771  * dlb2_hw_pending_port_unmaps() - returns the number of unmap operations in
5772  *      progress.
5773  * @hw: dlb2_hw handle for a particular device.
5774  * @domain_id: domain ID.
5775  * @args: number of unmaps in progress args
5776  * @resp: response structure.
5777  * @vdev_req: indicates whether this request came from a vdev.
5778  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5779  *
5780  * Return:
5781  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5782  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5783  * contains the number of unmaps in progress.
5784  *
5785  * Errors:
5786  * EINVAL - Invalid port ID.
5787  */
5788 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
5789                                 u32 domain_id,
5790                                 struct dlb2_pending_port_unmaps_args *args,
5791                                 struct dlb2_cmd_response *resp,
5792                                 bool vdev_req,
5793                                 unsigned int vdev_id)
5794 {
5795         struct dlb2_hw_domain *domain;
5796         struct dlb2_ldb_port *port;
5797
5798         dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
5799
5800         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5801
5802         if (!domain) {
5803                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5804                 return -EINVAL;
5805         }
5806
5807         port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
5808         if (!port || !port->configured) {
5809                 resp->status = DLB2_ST_INVALID_PORT_ID;
5810                 return -EINVAL;
5811         }
5812
5813         resp->id = port->num_pending_removals;
5814
5815         return 0;
5816 }
5817
5818 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
5819                                          u32 domain_id,
5820                                          struct dlb2_cmd_response *resp,
5821                                          bool vdev_req,
5822                                          unsigned int vdev_id,
5823                                          struct dlb2_hw_domain **out_domain)
5824 {
5825         struct dlb2_hw_domain *domain;
5826
5827         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5828
5829         if (!domain) {
5830                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5831                 return -EINVAL;
5832         }
5833
5834         if (!domain->configured) {
5835                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5836                 return -EINVAL;
5837         }
5838
5839         if (domain->started) {
5840                 resp->status = DLB2_ST_DOMAIN_STARTED;
5841                 return -EINVAL;
5842         }
5843
5844         *out_domain = domain;
5845
5846         return 0;
5847 }
5848
5849 static void dlb2_log_start_domain(struct dlb2_hw *hw,
5850                                   u32 domain_id,
5851                                   bool vdev_req,
5852                                   unsigned int vdev_id)
5853 {
5854         DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
5855         if (vdev_req)
5856                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5857         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5858 }
5859
5860 /**
5861  * dlb2_hw_start_domain() - start a scheduling domain
5862  * @hw: dlb2_hw handle for a particular device.
5863  * @domain_id: domain ID.
5864  * @arg: start domain arguments.
5865  * @resp: response structure.
5866  * @vdev_req: indicates whether this request came from a vdev.
5867  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5868  *
5869  * This function starts a scheduling domain, which allows applications to send
5870  * traffic through it. Once a domain is started, its resources can no longer be
5871  * configured (besides QID remapping and port enable/disable).
5872  *
5873  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5874  * device.
5875  *
5876  * Return:
5877  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5878  * assigned a detailed error code from enum dlb2_error.
5879  *
5880  * Errors:
5881  * EINVAL - the domain is not configured, or the domain is already started.
5882  */
5883 int
5884 dlb2_hw_start_domain(struct dlb2_hw *hw,
5885                      u32 domain_id,
5886                      struct dlb2_start_domain_args *args,
5887                      struct dlb2_cmd_response *resp,
5888                      bool vdev_req,
5889                      unsigned int vdev_id)
5890 {
5891         struct dlb2_list_entry *iter;
5892         struct dlb2_dir_pq_pair *dir_queue;
5893         struct dlb2_ldb_queue *ldb_queue;
5894         struct dlb2_hw_domain *domain;
5895         int ret;
5896         RTE_SET_USED(args);
5897         RTE_SET_USED(iter);
5898
5899         dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
5900
5901         ret = dlb2_verify_start_domain_args(hw,
5902                                             domain_id,
5903                                             resp,
5904                                             vdev_req,
5905                                             vdev_id,
5906                                             &domain);
5907         if (ret)
5908                 return ret;
5909
5910         /*
5911          * Enable load-balanced and directed queue write permissions for the
5912          * queues this domain owns. Without this, the DLB2 will drop all
5913          * incoming traffic to those queues.
5914          */
5915         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
5916                 u32 vasqid_v = 0;
5917                 unsigned int offs;
5918
5919                 DLB2_BIT_SET(vasqid_v, DLB2_SYS_LDB_VASQID_V_VASQID_V);
5920
5921                 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
5922                         ldb_queue->id.phys_id;
5923
5924                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), vasqid_v);
5925         }
5926
5927         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
5928                 u32 vasqid_v = 0;
5929                 unsigned int offs;
5930
5931                 DLB2_BIT_SET(vasqid_v, DLB2_SYS_DIR_VASQID_V_VASQID_V);
5932
5933                 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
5934                         dir_queue->id.phys_id;
5935
5936                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), vasqid_v);
5937         }
5938
5939         dlb2_flush_csr(hw);
5940
5941         domain->started = true;
5942
5943         resp->status = 0;
5944
5945         return 0;
5946 }
5947
5948 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
5949                                          u32 domain_id,
5950                                          u32 queue_id,
5951                                          bool vdev_req,
5952                                          unsigned int vf_id)
5953 {
5954         DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
5955         if (vdev_req)
5956                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5957         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5958         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5959 }
5960
5961 /**
5962  * dlb2_hw_get_dir_queue_depth() - returns the depth of a directed queue
5963  * @hw: dlb2_hw handle for a particular device.
5964  * @domain_id: domain ID.
5965  * @args: queue depth args
5966  * @resp: response structure.
5967  * @vdev_req: indicates whether this request came from a vdev.
5968  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5969  *
5970  * This function returns the depth of a directed queue.
5971  *
5972  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5973  * device.
5974  *
5975  * Return:
5976  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5977  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5978  * contains the depth.
5979  *
5980  * Errors:
5981  * EINVAL - Invalid domain ID or queue ID.
5982  */
5983 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
5984                                 u32 domain_id,
5985                                 struct dlb2_get_dir_queue_depth_args *args,
5986                                 struct dlb2_cmd_response *resp,
5987                                 bool vdev_req,
5988                                 unsigned int vdev_id)
5989 {
5990         struct dlb2_dir_pq_pair *queue;
5991         struct dlb2_hw_domain *domain;
5992         int id;
5993
5994         id = domain_id;
5995
5996         dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
5997                                      vdev_req, vdev_id);
5998
5999         domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
6000         if (!domain) {
6001                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6002                 return -EINVAL;
6003         }
6004
6005         id = args->queue_id;
6006
6007         queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
6008         if (!queue) {
6009                 resp->status = DLB2_ST_INVALID_QID;
6010                 return -EINVAL;
6011         }
6012
6013         resp->id = dlb2_dir_queue_depth(hw, queue);
6014
6015         return 0;
6016 }
6017
6018 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
6019                                          u32 domain_id,
6020                                          u32 queue_id,
6021                                          bool vdev_req,
6022                                          unsigned int vf_id)
6023 {
6024         DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
6025         if (vdev_req)
6026                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
6027         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
6028         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
6029 }
6030
6031 /**
6032  * dlb2_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
6033  * @hw: dlb2_hw handle for a particular device.
6034  * @domain_id: domain ID.
6035  * @args: queue depth args
6036  * @resp: response structure.
6037  * @vdev_req: indicates whether this request came from a vdev.
6038  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
6039  *
6040  * This function returns the depth of a load-balanced queue.
6041  *
6042  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
6043  * device.
6044  *
6045  * Return:
6046  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6047  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
6048  * contains the depth.
6049  *
6050  * Errors:
6051  * EINVAL - Invalid domain ID or queue ID.
6052  */
6053 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
6054                                 u32 domain_id,
6055                                 struct dlb2_get_ldb_queue_depth_args *args,
6056                                 struct dlb2_cmd_response *resp,
6057                                 bool vdev_req,
6058                                 unsigned int vdev_id)
6059 {
6060         struct dlb2_hw_domain *domain;
6061         struct dlb2_ldb_queue *queue;
6062
6063         dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
6064                                      vdev_req, vdev_id);
6065
6066         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6067         if (!domain) {
6068                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6069                 return -EINVAL;
6070         }
6071
6072         queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
6073         if (!queue) {
6074                 resp->status = DLB2_ST_INVALID_QID;
6075                 return -EINVAL;
6076         }
6077
6078         resp->id = dlb2_ldb_queue_depth(hw, queue);
6079
6080         return 0;
6081 }
6082
6083 /**
6084  * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures
6085  * @hw: dlb2_hw handle for a particular device.
6086  *
6087  * This function attempts to finish any outstanding unmap procedures.
6088  * This function should be called by the kernel thread responsible for
6089  * finishing map/unmap procedures.
6090  *
6091  * Return:
6092  * Returns the number of procedures that weren't completed.
6093  */
6094 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
6095 {
6096         int i, num = 0;
6097
6098         /* Finish queue unmap jobs for any domain that needs it */
6099         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6100                 struct dlb2_hw_domain *domain = &hw->domains[i];
6101
6102                 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
6103         }
6104
6105         return num;
6106 }
6107
6108 /**
6109  * dlb2_finish_map_qid_procedures() - finish any pending map procedures
6110  * @hw: dlb2_hw handle for a particular device.
6111  *
6112  * This function attempts to finish any outstanding map procedures.
6113  * This function should be called by the kernel thread responsible for
6114  * finishing map/unmap procedures.
6115  *
6116  * Return:
6117  * Returns the number of procedures that weren't completed.
6118  */
6119 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
6120 {
6121         int i, num = 0;
6122
6123         /* Finish queue map jobs for any domain that needs it */
6124         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6125                 struct dlb2_hw_domain *domain = &hw->domains[i];
6126
6127                 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
6128         }
6129
6130         return num;
6131 }
6132
6133 /**
6134  * dlb2_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports.
6135  * @hw: dlb2_hw handle for a particular device.
6136  *
6137  * This function must be called prior to configuring scheduling domains.
6138  */
6139
6140 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
6141 {
6142         u32 ctrl;
6143
6144         ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6145
6146         DLB2_BIT_SET(ctrl,
6147                      DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_DIR_CQ_MODE);
6148
6149         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6150 }
6151
6152 /**
6153  * dlb2_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
6154  *      ports.
6155  * @hw: dlb2_hw handle for a particular device.
6156  *
6157  * This function must be called prior to configuring scheduling domains.
6158  */
6159 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
6160 {
6161         u32 ctrl;
6162
6163         ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6164
6165         DLB2_BIT_SET(ctrl,
6166                      DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_LDB_CQ_MODE);
6167
6168         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6169 }
6170
6171 /**
6172  * dlb2_get_group_sequence_numbers() - return a group's number of SNs per queue
6173  * @hw: dlb2_hw handle for a particular device.
6174  * @group_id: sequence number group ID.
6175  *
6176  * This function returns the configured number of sequence numbers per queue
6177  * for the specified group.
6178  *
6179  * Return:
6180  * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6181  */
6182 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, u32 group_id)
6183 {
6184         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6185                 return -EINVAL;
6186
6187         return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
6188 }
6189
6190 /**
6191  * dlb2_get_group_sequence_number_occupancy() - return a group's in-use slots
6192  * @hw: dlb2_hw handle for a particular device.
6193  * @group_id: sequence number group ID.
6194  *
6195  * This function returns the group's number of in-use slots (i.e. load-balanced
6196  * queues using the specified group).
6197  *
6198  * Return:
6199  * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6200  */
6201 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw, u32 group_id)
6202 {
6203         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6204                 return -EINVAL;
6205
6206         return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
6207 }
6208
6209 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
6210                                                 u32 group_id,
6211                                                 u32 val)
6212 {
6213         DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
6214         DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
6215         DLB2_HW_DBG(hw, "\tValue:    %u\n", val);
6216 }
6217
6218 /**
6219  * dlb2_set_group_sequence_numbers() - assign a group's number of SNs per queue
6220  * @hw: dlb2_hw handle for a particular device.
6221  * @group_id: sequence number group ID.
6222  * @val: requested amount of sequence numbers per queue.
6223  *
6224  * This function configures the group's number of sequence numbers per queue.
6225  * val can be a power-of-two between 32 and 1024, inclusive. This setting can
6226  * be configured until the first ordered load-balanced queue is configured, at
6227  * which point the configuration is locked.
6228  *
6229  * Return:
6230  * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
6231  * ordered queue is configured.
6232  */
6233 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
6234                                     u32 group_id,
6235                                     u32 val)
6236 {
6237         const u32 valid_allocations[] = {64, 128, 256, 512, 1024};
6238         struct dlb2_sn_group *group;
6239         u32 sn_mode = 0;
6240         int mode;
6241
6242         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6243                 return -EINVAL;
6244
6245         group = &hw->rsrcs.sn_groups[group_id];
6246
6247         /*
6248          * Once the first load-balanced queue using an SN group is configured,
6249          * the group cannot be changed.
6250          */
6251         if (group->slot_use_bitmap != 0)
6252                 return -EPERM;
6253
6254         for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
6255                 if (val == valid_allocations[mode])
6256                         break;
6257
6258         if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
6259                 return -EINVAL;
6260
6261         group->mode = mode;
6262         group->sequence_numbers_per_queue = val;
6263
6264         DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[0].mode,
6265                  DLB2_RO_GRP_SN_MODE_SN_MODE_0);
6266         DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[1].mode,
6267                  DLB2_RO_GRP_SN_MODE_SN_MODE_1);
6268
6269         DLB2_CSR_WR(hw, DLB2_RO_GRP_SN_MODE(hw->ver), sn_mode);
6270
6271         dlb2_log_set_group_sequence_numbers(hw, group_id, val);
6272
6273         return 0;
6274 }
6275
6276 /**
6277  * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights
6278  * @hw: dlb2_hw handle for a particular device.
6279  * @weight: 8-entry array of arbiter weights.
6280  *
6281  * weight[N] programs priority N's weight. In cases where the 8 priorities are
6282  * reduced to 4 bins, the mapping is:
6283  * - weight[1] programs bin 0
6284  * - weight[3] programs bin 1
6285  * - weight[5] programs bin 2
6286  * - weight[7] programs bin 3
6287  */
6288 void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8])
6289 {
6290         u32 reg = 0;
6291
6292         DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN0);
6293         DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN1);
6294         DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN2);
6295         DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN3);
6296         DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN, reg);
6297
6298         reg = 0;
6299         DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI0);
6300         DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI1);
6301         DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI2);
6302         DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI3);
6303         DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0(hw->ver), reg);
6304
6305         reg = 0;
6306         DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0);
6307         DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1);
6308         DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2);
6309         DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3);
6310         DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0(hw->ver), reg);
6311
6312         reg = 0;
6313         DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0);
6314         DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1);
6315         DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2);
6316         DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3);
6317         DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0, reg);
6318
6319         reg = 0;
6320         DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI0);
6321         DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI1);
6322         DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI2);
6323         DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI3);
6324         DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0, reg);
6325
6326         reg = 0;
6327         DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI0);
6328         DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI1);
6329         DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI2);
6330         DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI3);
6331         DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0(hw->ver), reg);
6332
6333         reg = 0;
6334         DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN0);
6335         DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN1);
6336         DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN2);
6337         DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN3);
6338         DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN, reg);
6339
6340         reg = 0;
6341         DLB2_BITS_SET(reg, weight[1], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI0);
6342         DLB2_BITS_SET(reg, weight[3], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI1);
6343         DLB2_BITS_SET(reg, weight[5], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI2);
6344         DLB2_BITS_SET(reg, weight[7], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI3);
6345         DLB2_CSR_WR(hw, DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0, reg);
6346 }
6347
6348 /**
6349  * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights
6350  * @hw: dlb2_hw handle for a particular device.
6351  * @weight: 8-entry array of arbiter weights.
6352  *
6353  * weight[N] programs priority N's weight. In cases where the 8 priorities are
6354  * reduced to 4 bins, the mapping is:
6355  * - weight[1] programs bin 0
6356  * - weight[3] programs bin 1
6357  * - weight[5] programs bin 2
6358  * - weight[7] programs bin 3
6359  */
6360 void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8])
6361 {
6362         u32 reg = 0;
6363
6364         DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI0_WEIGHT);
6365         DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI1_WEIGHT);
6366         DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI2_WEIGHT);
6367         DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI3_WEIGHT);
6368         DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0(hw->ver), reg);
6369
6370         reg = 0;
6371         DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI0_WEIGHT);
6372         DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI1_WEIGHT);
6373         DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI2_WEIGHT);
6374         DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI3_WEIGHT);
6375         DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0(hw->ver), reg);
6376 }
6377
6378 static void dlb2_log_enable_cq_weight(struct dlb2_hw *hw,
6379                                       u32 domain_id,
6380                                       struct dlb2_enable_cq_weight_args *args,
6381                                       bool vdev_req,
6382                                       unsigned int vdev_id)
6383 {
6384         DLB2_HW_DBG(hw, "DLB2 enable CQ weight arguments:\n");
6385         DLB2_HW_DBG(hw, "\tvdev_req %d, vdev_id %d\n", vdev_req, vdev_id);
6386         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
6387         DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
6388         DLB2_HW_DBG(hw, "\tLimit:   %d\n", args->limit);
6389 }
6390
6391 static int
6392 dlb2_verify_enable_cq_weight_args(struct dlb2_hw *hw,
6393                                   u32 domain_id,
6394                                   struct dlb2_enable_cq_weight_args *args,
6395                                   struct dlb2_cmd_response *resp,
6396                                   bool vdev_req,
6397                                   unsigned int vdev_id)
6398 {
6399         struct dlb2_hw_domain *domain;
6400         struct dlb2_ldb_port *port;
6401
6402         if (hw->ver == DLB2_HW_V2) {
6403                 resp->status = DLB2_ST_FEATURE_UNAVAILABLE;
6404                 return -EINVAL;
6405         }
6406
6407         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6408
6409         if (!domain) {
6410                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6411                 return -EINVAL;
6412         }
6413
6414         if (!domain->configured) {
6415                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
6416                 return -EINVAL;
6417         }
6418
6419         if (domain->started) {
6420                 resp->status = DLB2_ST_DOMAIN_STARTED;
6421                 return -EINVAL;
6422         }
6423
6424         port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
6425         if (!port || !port->configured) {
6426                 resp->status = DLB2_ST_INVALID_PORT_ID;
6427                 return -EINVAL;
6428         }
6429
6430         if (args->limit == 0 || args->limit > port->cq_depth) {
6431                 resp->status = DLB2_ST_INVALID_CQ_WEIGHT_LIMIT;
6432                 return -EINVAL;
6433         }
6434
6435         return 0;
6436 }
6437
6438 int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw,
6439                              u32 domain_id,
6440                              struct dlb2_enable_cq_weight_args *args,
6441                              struct dlb2_cmd_response *resp,
6442                              bool vdev_req,
6443                              unsigned int vdev_id)
6444 {
6445         struct dlb2_hw_domain *domain;
6446         struct dlb2_ldb_port *port;
6447         int ret, id;
6448         u32 reg = 0;
6449
6450         dlb2_log_enable_cq_weight(hw, domain_id, args, vdev_req, vdev_id);
6451
6452         /*
6453          * Verify that hardware resources are available before attempting to
6454          * satisfy the request. This simplifies the error unwinding code.
6455          */
6456         ret = dlb2_verify_enable_cq_weight_args(hw,
6457                                                 domain_id,
6458                                                 args,
6459                                                 resp,
6460                                                 vdev_req,
6461                                                 vdev_id);
6462         if (ret)
6463                 return ret;
6464
6465         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6466         if (!domain) {
6467                 DLB2_HW_ERR(hw,
6468                             "[%s():%d] Internal error: domain not found\n",
6469                             __func__, __LINE__);
6470                 return -EFAULT;
6471         }
6472
6473         id = args->port_id;
6474
6475         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
6476         if (!port) {
6477                 DLB2_HW_ERR(hw,
6478                             "[%s():     %d] Internal error: port not found\n",
6479                             __func__, __LINE__);
6480                 return -EFAULT;
6481         }
6482
6483         DLB2_BIT_SET(reg, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_V);
6484         DLB2_BITS_SET(reg, args->limit, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_LIMIT);
6485
6486         DLB2_CSR_WR(hw, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id), reg);
6487
6488         resp->status = 0;
6489
6490         return 0;
6491 }
6492
6493 static void dlb2_log_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bw)
6494 {
6495         DLB2_HW_DBG(hw, "DLB2 set port CoS bandwidth:\n");
6496         DLB2_HW_DBG(hw, "\tCoS ID:    %u\n", cos_id);
6497         DLB2_HW_DBG(hw, "\tBandwidth: %u\n", bw);
6498 }
6499
6500 #define DLB2_MAX_BW_PCT 100
6501
6502 /**
6503  * dlb2_hw_set_cos_bandwidth() - set a bandwidth allocation percentage for a
6504  *      port class-of-service.
6505  * @hw: dlb2_hw handle for a particular device.
6506  * @cos_id: class-of-service ID.
6507  * @bandwidth: class-of-service bandwidth.
6508  *
6509  * Return:
6510  * Returns 0 upon success, < 0 otherwise.
6511  *
6512  * Errors:
6513  * EINVAL - Invalid cos ID, bandwidth is greater than 100, or bandwidth would
6514  *          cause the total bandwidth across all classes of service to exceed
6515  *          100%.
6516  */
6517 int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth)
6518 {
6519         unsigned int i;
6520         u32 reg;
6521         u8 total;
6522
6523         if (cos_id >= DLB2_NUM_COS_DOMAINS)
6524                 return -EINVAL;
6525
6526         if (bandwidth > DLB2_MAX_BW_PCT)
6527                 return -EINVAL;
6528
6529         total = 0;
6530
6531         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
6532                 total += (i == cos_id) ? bandwidth : hw->cos_reservation[i];
6533
6534         if (total > DLB2_MAX_BW_PCT)
6535                 return -EINVAL;
6536
6537         reg = DLB2_CSR_RD(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id));
6538
6539         /*
6540          * Normalize the bandwidth to a value in the range 0-255. Integer
6541          * division may leave unreserved scheduling slots; these will be
6542          * divided among the 4 classes of service.
6543          */
6544         DLB2_BITS_SET(reg, (bandwidth * 256) / 100, DLB2_LSP_CFG_SHDW_RANGE_COS_BW_RANGE);
6545         DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id), reg);
6546
6547         reg = 0;
6548         DLB2_BIT_SET(reg, DLB2_LSP_CFG_SHDW_CTRL_TRANSFER);
6549         /* Atomically transfer the newly configured service weight */
6550         DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_CTRL(hw->ver), reg);
6551
6552         dlb2_log_set_cos_bandwidth(hw, cos_id, bandwidth);
6553
6554         hw->cos_reservation[cos_id] = bandwidth;
6555
6556         return 0;
6557 }