event/cnxk: add timer arm routine
[dpdk.git] / drivers / event / dlb2 / pf / base / dlb2_resource.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include "dlb2_user.h"
6
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
13
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
16
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18         DLB2_LIST_HEAD((head), type, domain_list)
19
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21         DLB2_LIST_HEAD((head), type, func_list)
22
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24         DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
25
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27         DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
28
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
31
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
34
35 /*
36  * The PF driver cannot assume that a register write will affect subsequent HCW
37  * writes. To ensure a write completes, the driver must read back a CSR. This
38  * function only need be called for configuration that can occur after the
39  * domain has started; prior to starting, applications can't send HCWs.
40  */
41 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
42 {
43         DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
44 }
45
46 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
47 {
48         int i;
49
50         dlb2_list_init_head(&domain->used_ldb_queues);
51         dlb2_list_init_head(&domain->used_dir_pq_pairs);
52         dlb2_list_init_head(&domain->avail_ldb_queues);
53         dlb2_list_init_head(&domain->avail_dir_pq_pairs);
54
55         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
56                 dlb2_list_init_head(&domain->used_ldb_ports[i]);
57         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
58                 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
59 }
60
61 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
62 {
63         int i;
64         dlb2_list_init_head(&rsrc->avail_domains);
65         dlb2_list_init_head(&rsrc->used_domains);
66         dlb2_list_init_head(&rsrc->avail_ldb_queues);
67         dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
68
69         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
70                 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
71 }
72
73 /**
74  * dlb2_resource_free() - free device state memory
75  * @hw: dlb2_hw handle for a particular device.
76  *
77  * This function frees software state pointed to by dlb2_hw. This function
78  * should be called when resetting the device or unloading the driver.
79  */
80 void dlb2_resource_free(struct dlb2_hw *hw)
81 {
82         int i;
83
84         if (hw->pf.avail_hist_list_entries)
85                 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
86
87         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
88                 if (hw->vdev[i].avail_hist_list_entries)
89                         dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
90         }
91 }
92
93 /**
94  * dlb2_resource_init() - initialize the device
95  * @hw: pointer to struct dlb2_hw.
96  * @ver: device version.
97  *
98  * This function initializes the device's software state (pointed to by the hw
99  * argument) and programs global scheduling QoS registers. This function should
100  * be called during driver initialization, and the dlb2_hw structure should
101  * be zero-initialized before calling the function.
102  *
103  * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
104  * device is reset.
105  *
106  * Return:
107  * Returns 0 upon success, <0 otherwise.
108  */
109 int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
110 {
111         struct dlb2_list_entry *list;
112         unsigned int i;
113         int ret;
114
115         /*
116          * For optimal load-balancing, ports that map to one or more QIDs in
117          * common should not be in numerical sequence. The port->QID mapping is
118          * application dependent, but the driver interleaves port IDs as much
119          * as possible to reduce the likelihood of sequential ports mapping to
120          * the same QID(s). This initial allocation of port IDs maximizes the
121          * average distance between an ID and its immediate neighbors (i.e.
122          * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
123          * 3, etc.).
124          */
125         const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
126                 0,  7,  14,  5, 12,  3, 10,  1,  8, 15,  6, 13,  4, 11,  2,  9,
127                 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
128                 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
129                 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
130         };
131
132         hw->ver = ver;
133
134         dlb2_init_fn_rsrc_lists(&hw->pf);
135
136         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
137                 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
138
139         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
140                 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
141                 hw->domains[i].parent_func = &hw->pf;
142         }
143
144         /* Give all resources to the PF driver */
145         hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
146         for (i = 0; i < hw->pf.num_avail_domains; i++) {
147                 list = &hw->domains[i].func_list;
148
149                 dlb2_list_add(&hw->pf.avail_domains, list);
150         }
151
152         hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
153         for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
154                 list = &hw->rsrcs.ldb_queues[i].func_list;
155
156                 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
157         }
158
159         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
160                 hw->pf.num_avail_ldb_ports[i] =
161                         DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
162
163         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
164                 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
165                 struct dlb2_ldb_port *port;
166
167                 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
168
169                 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
170                               &port->func_list);
171         }
172
173         hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
174         for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
175                 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
176
177                 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
178         }
179
180         if (hw->ver == DLB2_HW_V2) {
181                 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
182                 hw->pf.num_avail_dqed_entries =
183                         DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
184         } else {
185                 hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
186         }
187
188         hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
189
190         ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
191                                 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
192         if (ret)
193                 goto unwind;
194
195         ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
196         if (ret)
197                 goto unwind;
198
199         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
200                 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
201                                         DLB2_MAX_NUM_HIST_LIST_ENTRIES);
202                 if (ret)
203                         goto unwind;
204
205                 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
206                 if (ret)
207                         goto unwind;
208         }
209
210         /* Initialize the hardware resource IDs */
211         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
212                 hw->domains[i].id.phys_id = i;
213                 hw->domains[i].id.vdev_owned = false;
214         }
215
216         for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
217                 hw->rsrcs.ldb_queues[i].id.phys_id = i;
218                 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
219         }
220
221         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
222                 hw->rsrcs.ldb_ports[i].id.phys_id = i;
223                 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
224         }
225
226         for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
227                 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
228                 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
229         }
230
231         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
232                 hw->rsrcs.sn_groups[i].id = i;
233                 /* Default mode (0) is 64 sequence numbers per queue */
234                 hw->rsrcs.sn_groups[i].mode = 0;
235                 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
236                 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
237         }
238
239         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
240                 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
241
242         return 0;
243
244 unwind:
245         dlb2_resource_free(hw);
246
247         return ret;
248 }
249
250 /**
251  * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
252  * @hw: dlb2_hw handle for a particular device.
253  * @ver: device version.
254  *
255  * Clearing the PMCSR must be done at initialization to make the device fully
256  * operational.
257  */
258 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
259 {
260         u32 pmcsr_dis;
261
262         pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
263
264         DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
265
266         DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
267 }
268
269 /**
270  * dlb2_hw_get_num_resources() - query the PCI function's available resources
271  * @hw: dlb2_hw handle for a particular device.
272  * @arg: pointer to resource counts.
273  * @vdev_req: indicates whether this request came from a vdev.
274  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
275  *
276  * This function returns the number of available resources for the PF or for a
277  * VF.
278  *
279  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
280  * device.
281  *
282  * Return:
283  * Returns 0 upon success, -EINVAL if vdev_req is true and vdev_id is
284  * invalid.
285  */
286 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
287                               struct dlb2_get_num_resources_args *arg,
288                               bool vdev_req,
289                               unsigned int vdev_id)
290 {
291         struct dlb2_function_resources *rsrcs;
292         struct dlb2_bitmap *map;
293         int i;
294
295         if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
296                 return -EINVAL;
297
298         if (vdev_req)
299                 rsrcs = &hw->vdev[vdev_id];
300         else
301                 rsrcs = &hw->pf;
302
303         arg->num_sched_domains = rsrcs->num_avail_domains;
304
305         arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
306
307         arg->num_ldb_ports = 0;
308         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
309                 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
310
311         arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
312         arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
313         arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
314         arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
315
316         arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
317
318         arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
319
320         map = rsrcs->avail_hist_list_entries;
321
322         arg->num_hist_list_entries = dlb2_bitmap_count(map);
323
324         arg->max_contiguous_hist_list_entries =
325                 dlb2_bitmap_longest_set_range(map);
326
327         if (hw->ver == DLB2_HW_V2) {
328                 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
329                 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
330         } else {
331                 arg->num_credits = rsrcs->num_avail_entries;
332         }
333         return 0;
334 }
335
336 static void dlb2_configure_domain_credits_v2_5(struct dlb2_hw *hw,
337                                                struct dlb2_hw_domain *domain)
338 {
339         u32 reg = 0;
340
341         DLB2_BITS_SET(reg, domain->num_credits, DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
342         DLB2_CSR_WR(hw, DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id), reg);
343 }
344
345 static void dlb2_configure_domain_credits_v2(struct dlb2_hw *hw,
346                                              struct dlb2_hw_domain *domain)
347 {
348         u32 reg = 0;
349
350         DLB2_BITS_SET(reg, domain->num_ldb_credits,
351                       DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
352         DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), reg);
353
354         reg = 0;
355         DLB2_BITS_SET(reg, domain->num_dir_credits,
356                       DLB2_CHP_CFG_DIR_VAS_CRD_COUNT);
357         DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), reg);
358 }
359
360 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
361                                           struct dlb2_hw_domain *domain)
362 {
363         if (hw->ver == DLB2_HW_V2)
364                 dlb2_configure_domain_credits_v2(hw, domain);
365         else
366                 dlb2_configure_domain_credits_v2_5(hw, domain);
367 }
368
369 static int dlb2_attach_credits(struct dlb2_function_resources *rsrcs,
370                                struct dlb2_hw_domain *domain,
371                                u32 num_credits,
372                                struct dlb2_cmd_response *resp)
373 {
374         if (rsrcs->num_avail_entries < num_credits) {
375                 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
376                 return -EINVAL;
377         }
378
379         rsrcs->num_avail_entries -= num_credits;
380         domain->num_credits += num_credits;
381         return 0;
382 }
383
384 static struct dlb2_ldb_port *
385 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
386                        struct dlb2_function_resources *rsrcs,
387                        u32 domain_id,
388                        u32 cos_id)
389 {
390         struct dlb2_list_entry *iter;
391         struct dlb2_ldb_port *port;
392         RTE_SET_USED(iter);
393
394         /*
395          * To reduce the odds of consecutive load-balanced ports mapping to the
396          * same queue(s), the driver attempts to allocate ports whose neighbors
397          * are owned by a different domain.
398          */
399         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
400                 u32 next, prev;
401                 u32 phys_id;
402
403                 phys_id = port->id.phys_id;
404                 next = phys_id + 1;
405                 prev = phys_id - 1;
406
407                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
408                         next = 0;
409                 if (phys_id == 0)
410                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
411
412                 if (!hw->rsrcs.ldb_ports[next].owned ||
413                     hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
414                         continue;
415
416                 if (!hw->rsrcs.ldb_ports[prev].owned ||
417                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
418                         continue;
419
420                 return port;
421         }
422
423         /*
424          * Failing that, the driver looks for a port with one neighbor owned by
425          * a different domain and the other unallocated.
426          */
427         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
428                 u32 next, prev;
429                 u32 phys_id;
430
431                 phys_id = port->id.phys_id;
432                 next = phys_id + 1;
433                 prev = phys_id - 1;
434
435                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
436                         next = 0;
437                 if (phys_id == 0)
438                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
439
440                 if (!hw->rsrcs.ldb_ports[prev].owned &&
441                     hw->rsrcs.ldb_ports[next].owned &&
442                     hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
443                         return port;
444
445                 if (!hw->rsrcs.ldb_ports[next].owned &&
446                     hw->rsrcs.ldb_ports[prev].owned &&
447                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
448                         return port;
449         }
450
451         /*
452          * Failing that, the driver looks for a port with both neighbors
453          * unallocated.
454          */
455         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
456                 u32 next, prev;
457                 u32 phys_id;
458
459                 phys_id = port->id.phys_id;
460                 next = phys_id + 1;
461                 prev = phys_id - 1;
462
463                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
464                         next = 0;
465                 if (phys_id == 0)
466                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
467
468                 if (!hw->rsrcs.ldb_ports[prev].owned &&
469                     !hw->rsrcs.ldb_ports[next].owned)
470                         return port;
471         }
472
473         /* If all else fails, the driver returns the next available port. */
474         return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
475                                    typeof(*port));
476 }
477
478 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
479                                    struct dlb2_function_resources *rsrcs,
480                                    struct dlb2_hw_domain *domain,
481                                    u32 num_ports,
482                                    u32 cos_id,
483                                    struct dlb2_cmd_response *resp)
484 {
485         unsigned int i;
486
487         if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
488                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
489                 return -EINVAL;
490         }
491
492         for (i = 0; i < num_ports; i++) {
493                 struct dlb2_ldb_port *port;
494
495                 port = dlb2_get_next_ldb_port(hw, rsrcs,
496                                               domain->id.phys_id, cos_id);
497                 if (port == NULL) {
498                         DLB2_HW_ERR(hw,
499                                     "[%s()] Internal error: domain validation failed\n",
500                                     __func__);
501                         return -EFAULT;
502                 }
503
504                 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
505                               &port->func_list);
506
507                 port->domain_id = domain->id;
508                 port->owned = true;
509
510                 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
511                               &port->domain_list);
512         }
513
514         rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
515
516         return 0;
517 }
518
519
520 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
521                                  struct dlb2_function_resources *rsrcs,
522                                  struct dlb2_hw_domain *domain,
523                                  struct dlb2_create_sched_domain_args *args,
524                                  struct dlb2_cmd_response *resp)
525 {
526         unsigned int i, j;
527         int ret;
528
529         if (args->cos_strict) {
530                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
531                         u32 num = args->num_cos_ldb_ports[i];
532
533                         /* Allocate ports from specific classes-of-service */
534                         ret = __dlb2_attach_ldb_ports(hw,
535                                                       rsrcs,
536                                                       domain,
537                                                       num,
538                                                       i,
539                                                       resp);
540                         if (ret)
541                                 return ret;
542                 }
543         } else {
544                 unsigned int k;
545                 u32 cos_id;
546
547                 /*
548                  * Attempt to allocate from specific class-of-service, but
549                  * fallback to the other classes if that fails.
550                  */
551                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
552                         for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
553                                 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
554                                         cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
555
556                                         ret = __dlb2_attach_ldb_ports(hw,
557                                                                       rsrcs,
558                                                                       domain,
559                                                                       1,
560                                                                       cos_id,
561                                                                       resp);
562                                         if (ret == 0)
563                                                 break;
564                                 }
565
566                                 if (ret)
567                                         return ret;
568                         }
569                 }
570         }
571
572         /* Allocate num_ldb_ports from any class-of-service */
573         for (i = 0; i < args->num_ldb_ports; i++) {
574                 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
575                         ret = __dlb2_attach_ldb_ports(hw,
576                                                       rsrcs,
577                                                       domain,
578                                                       1,
579                                                       j,
580                                                       resp);
581                         if (ret == 0)
582                                 break;
583                 }
584
585                 if (ret)
586                         return ret;
587         }
588
589         return 0;
590 }
591
592 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
593                                  struct dlb2_function_resources *rsrcs,
594                                  struct dlb2_hw_domain *domain,
595                                  u32 num_ports,
596                                  struct dlb2_cmd_response *resp)
597 {
598         unsigned int i;
599
600         if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
601                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
602                 return -EINVAL;
603         }
604
605         for (i = 0; i < num_ports; i++) {
606                 struct dlb2_dir_pq_pair *port;
607
608                 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
609                                            typeof(*port));
610                 if (port == NULL) {
611                         DLB2_HW_ERR(hw,
612                                     "[%s()] Internal error: domain validation failed\n",
613                                     __func__);
614                         return -EFAULT;
615                 }
616
617                 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
618
619                 port->domain_id = domain->id;
620                 port->owned = true;
621
622                 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
623         }
624
625         rsrcs->num_avail_dir_pq_pairs -= num_ports;
626
627         return 0;
628 }
629
630 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
631                                    struct dlb2_hw_domain *domain,
632                                    u32 num_credits,
633                                    struct dlb2_cmd_response *resp)
634 {
635         if (rsrcs->num_avail_qed_entries < num_credits) {
636                 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
637                 return -EINVAL;
638         }
639
640         rsrcs->num_avail_qed_entries -= num_credits;
641         domain->num_ldb_credits += num_credits;
642         return 0;
643 }
644
645 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
646                                    struct dlb2_hw_domain *domain,
647                                    u32 num_credits,
648                                    struct dlb2_cmd_response *resp)
649 {
650         if (rsrcs->num_avail_dqed_entries < num_credits) {
651                 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
652                 return -EINVAL;
653         }
654
655         rsrcs->num_avail_dqed_entries -= num_credits;
656         domain->num_dir_credits += num_credits;
657         return 0;
658 }
659
660
661 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
662                                         struct dlb2_hw_domain *domain,
663                                         u32 num_atomic_inflights,
664                                         struct dlb2_cmd_response *resp)
665 {
666         if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
667                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
668                 return -EINVAL;
669         }
670
671         rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
672         domain->num_avail_aqed_entries += num_atomic_inflights;
673         return 0;
674 }
675
676 static int
677 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
678                                      struct dlb2_hw_domain *domain,
679                                      u32 num_hist_list_entries,
680                                      struct dlb2_cmd_response *resp)
681 {
682         struct dlb2_bitmap *bitmap;
683         int base;
684
685         if (num_hist_list_entries) {
686                 bitmap = rsrcs->avail_hist_list_entries;
687
688                 base = dlb2_bitmap_find_set_bit_range(bitmap,
689                                                       num_hist_list_entries);
690                 if (base < 0)
691                         goto error;
692
693                 domain->total_hist_list_entries = num_hist_list_entries;
694                 domain->avail_hist_list_entries = num_hist_list_entries;
695                 domain->hist_list_entry_base = base;
696                 domain->hist_list_entry_offset = 0;
697
698                 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
699         }
700         return 0;
701
702 error:
703         resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
704         return -EINVAL;
705 }
706
707 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
708                                   struct dlb2_function_resources *rsrcs,
709                                   struct dlb2_hw_domain *domain,
710                                   u32 num_queues,
711                                   struct dlb2_cmd_response *resp)
712 {
713         unsigned int i;
714
715         if (rsrcs->num_avail_ldb_queues < num_queues) {
716                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
717                 return -EINVAL;
718         }
719
720         for (i = 0; i < num_queues; i++) {
721                 struct dlb2_ldb_queue *queue;
722
723                 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
724                                             typeof(*queue));
725                 if (queue == NULL) {
726                         DLB2_HW_ERR(hw,
727                                     "[%s()] Internal error: domain validation failed\n",
728                                     __func__);
729                         return -EFAULT;
730                 }
731
732                 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
733
734                 queue->domain_id = domain->id;
735                 queue->owned = true;
736
737                 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
738         }
739
740         rsrcs->num_avail_ldb_queues -= num_queues;
741
742         return 0;
743 }
744
745 static int
746 dlb2_domain_attach_resources(struct dlb2_hw *hw,
747                              struct dlb2_function_resources *rsrcs,
748                              struct dlb2_hw_domain *domain,
749                              struct dlb2_create_sched_domain_args *args,
750                              struct dlb2_cmd_response *resp)
751 {
752         int ret;
753
754         ret = dlb2_attach_ldb_queues(hw,
755                                      rsrcs,
756                                      domain,
757                                      args->num_ldb_queues,
758                                      resp);
759         if (ret)
760                 return ret;
761
762         ret = dlb2_attach_ldb_ports(hw,
763                                     rsrcs,
764                                     domain,
765                                     args,
766                                     resp);
767         if (ret)
768                 return ret;
769
770         ret = dlb2_attach_dir_ports(hw,
771                                     rsrcs,
772                                     domain,
773                                     args->num_dir_ports,
774                                     resp);
775         if (ret)
776                 return ret;
777
778         if (hw->ver == DLB2_HW_V2) {
779                 ret = dlb2_attach_ldb_credits(rsrcs,
780                                               domain,
781                                               args->num_ldb_credits,
782                                               resp);
783                 if (ret)
784                         return ret;
785
786                 ret = dlb2_attach_dir_credits(rsrcs,
787                                               domain,
788                                               args->num_dir_credits,
789                                               resp);
790                 if (ret)
791                         return ret;
792         } else {  /* DLB 2.5 */
793                 ret = dlb2_attach_credits(rsrcs,
794                                           domain,
795                                           args->num_credits,
796                                           resp);
797                 if (ret)
798                         return ret;
799         }
800
801         ret = dlb2_attach_domain_hist_list_entries(rsrcs,
802                                                    domain,
803                                                    args->num_hist_list_entries,
804                                                    resp);
805         if (ret)
806                 return ret;
807
808         ret = dlb2_attach_atomic_inflights(rsrcs,
809                                            domain,
810                                            args->num_atomic_inflights,
811                                            resp);
812         if (ret)
813                 return ret;
814
815         dlb2_configure_domain_credits(hw, domain);
816
817         domain->configured = true;
818
819         domain->started = false;
820
821         rsrcs->num_avail_domains--;
822
823         return 0;
824 }
825
826 static int
827 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
828                                   struct dlb2_create_sched_domain_args *args,
829                                   struct dlb2_cmd_response *resp,
830                                   struct dlb2_hw *hw,
831                                   struct dlb2_hw_domain **out_domain)
832 {
833         u32 num_avail_ldb_ports, req_ldb_ports;
834         struct dlb2_bitmap *avail_hl_entries;
835         unsigned int max_contig_hl_range;
836         struct dlb2_hw_domain *domain;
837         int i;
838
839         avail_hl_entries = rsrcs->avail_hist_list_entries;
840
841         max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
842
843         num_avail_ldb_ports = 0;
844         req_ldb_ports = 0;
845         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
846                 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
847
848                 req_ldb_ports += args->num_cos_ldb_ports[i];
849         }
850
851         req_ldb_ports += args->num_ldb_ports;
852
853         if (rsrcs->num_avail_domains < 1) {
854                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
855                 return -EINVAL;
856         }
857
858         domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
859         if (domain == NULL) {
860                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
861                 return -EFAULT;
862         }
863
864         if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
865                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
866                 return -EINVAL;
867         }
868
869         if (req_ldb_ports > num_avail_ldb_ports) {
870                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
871                 return -EINVAL;
872         }
873
874         for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
875                 if (args->num_cos_ldb_ports[i] >
876                     rsrcs->num_avail_ldb_ports[i]) {
877                         resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
878                         return -EINVAL;
879                 }
880         }
881
882         if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
883                 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
884                 return -EINVAL;
885         }
886
887         if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
888                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
889                 return -EINVAL;
890         }
891         if (hw->ver == DLB2_HW_V2_5) {
892                 if (rsrcs->num_avail_entries < args->num_credits) {
893                         resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
894                         return -EINVAL;
895                 }
896         } else {
897                 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
898                         resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
899                         return -EINVAL;
900                 }
901                 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
902                         resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
903                         return -EINVAL;
904                 }
905         }
906
907         if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
908                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
909                 return -EINVAL;
910         }
911
912         if (max_contig_hl_range < args->num_hist_list_entries) {
913                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
914                 return -EINVAL;
915         }
916
917         *out_domain = domain;
918
919         return 0;
920 }
921
922 static void
923 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
924                                   struct dlb2_create_sched_domain_args *args,
925                                   bool vdev_req,
926                                   unsigned int vdev_id)
927 {
928         DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
929         if (vdev_req)
930                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
931         DLB2_HW_DBG(hw, "\tNumber of LDB queues:          %d\n",
932                     args->num_ldb_queues);
933         DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
934                     args->num_ldb_ports);
935         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0):   %d\n",
936                     args->num_cos_ldb_ports[0]);
937         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1):   %d\n",
938                     args->num_cos_ldb_ports[1]);
939         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2):   %d\n",
940                     args->num_cos_ldb_ports[2]);
941         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3):   %d\n",
942                     args->num_cos_ldb_ports[3]);
943         DLB2_HW_DBG(hw, "\tStrict CoS allocation:         %d\n",
944                     args->cos_strict);
945         DLB2_HW_DBG(hw, "\tNumber of DIR ports:           %d\n",
946                     args->num_dir_ports);
947         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:       %d\n",
948                     args->num_atomic_inflights);
949         DLB2_HW_DBG(hw, "\tNumber of hist list entries:   %d\n",
950                     args->num_hist_list_entries);
951         if (hw->ver == DLB2_HW_V2) {
952                 DLB2_HW_DBG(hw, "\tNumber of LDB credits:         %d\n",
953                             args->num_ldb_credits);
954                 DLB2_HW_DBG(hw, "\tNumber of DIR credits:         %d\n",
955                             args->num_dir_credits);
956         } else {
957                 DLB2_HW_DBG(hw, "\tNumber of credits:         %d\n",
958                             args->num_credits);
959         }
960 }
961
962 /**
963  * dlb2_hw_create_sched_domain() - create a scheduling domain
964  * @hw: dlb2_hw handle for a particular device.
965  * @args: scheduling domain creation arguments.
966  * @resp: response structure.
967  * @vdev_req: indicates whether this request came from a vdev.
968  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
969  *
970  * This function creates a scheduling domain containing the resources specified
971  * in args. The individual resources (queues, ports, credits) can be configured
972  * after creating a scheduling domain.
973  *
974  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
975  * device.
976  *
977  * Return:
978  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
979  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
980  * contains the domain ID.
981  *
982  * resp->id contains a virtual ID if vdev_req is true.
983  *
984  * Errors:
985  * EINVAL - A requested resource is unavailable, or the requested domain name
986  *          is already in use.
987  * EFAULT - Internal error (resp->status not set).
988  */
989 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
990                                 struct dlb2_create_sched_domain_args *args,
991                                 struct dlb2_cmd_response *resp,
992                                 bool vdev_req,
993                                 unsigned int vdev_id)
994 {
995         struct dlb2_function_resources *rsrcs;
996         struct dlb2_hw_domain *domain;
997         int ret;
998
999         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1000
1001         dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
1002
1003         /*
1004          * Verify that hardware resources are available before attempting to
1005          * satisfy the request. This simplifies the error unwinding code.
1006          */
1007         ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp, hw, &domain);
1008         if (ret)
1009                 return ret;
1010
1011         dlb2_init_domain_rsrc_lists(domain);
1012
1013         ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
1014         if (ret) {
1015                 DLB2_HW_ERR(hw,
1016                             "[%s()] Internal error: failed to verify args.\n",
1017                             __func__);
1018
1019                 return ret;
1020         }
1021
1022         dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
1023
1024         dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
1025
1026         resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
1027         resp->status = 0;
1028
1029         return 0;
1030 }
1031
1032 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
1033                                      struct dlb2_dir_pq_pair *port)
1034 {
1035         u32 reg = 0;
1036
1037         DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
1038         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1039
1040         dlb2_flush_csr(hw);
1041 }
1042
1043 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
1044                                    struct dlb2_dir_pq_pair *port)
1045 {
1046         u32 cnt;
1047
1048         cnt = DLB2_CSR_RD(hw,
1049                           DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
1050
1051         /*
1052          * Account for the initial token count, which is used in order to
1053          * provide a CQ with depth less than 8.
1054          */
1055
1056         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
1057                port->init_tkn_cnt;
1058 }
1059
1060 static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
1061                               struct dlb2_dir_pq_pair *port)
1062 {
1063         unsigned int port_id = port->id.phys_id;
1064         u32 cnt;
1065
1066         /* Return any outstanding tokens */
1067         cnt = dlb2_dir_cq_token_count(hw, port);
1068
1069         if (cnt != 0) {
1070                 struct dlb2_hcw hcw_mem[8], *hcw;
1071                 void __iomem *pp_addr;
1072
1073                 pp_addr = os_map_producer_port(hw, port_id, false);
1074
1075                 /* Point hcw to a 64B-aligned location */
1076                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1077
1078                 /*
1079                  * Program the first HCW for a batch token return and
1080                  * the rest as NOOPS
1081                  */
1082                 memset(hcw, 0, 4 * sizeof(*hcw));
1083                 hcw->cq_token = 1;
1084                 hcw->lock_id = cnt - 1;
1085
1086                 dlb2_movdir64b(pp_addr, hcw);
1087
1088                 os_fence_hcw(hw, pp_addr);
1089
1090                 os_unmap_producer_port(hw, pp_addr);
1091         }
1092 }
1093
1094 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1095                                     struct dlb2_dir_pq_pair *port)
1096 {
1097         u32 reg = 0;
1098
1099         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1100
1101         dlb2_flush_csr(hw);
1102 }
1103
1104 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1105                                      struct dlb2_hw_domain *domain,
1106                                      bool toggle_port)
1107 {
1108         struct dlb2_list_entry *iter;
1109         struct dlb2_dir_pq_pair *port;
1110         RTE_SET_USED(iter);
1111
1112         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1113                 /*
1114                  * Can't drain a port if it's not configured, and there's
1115                  * nothing to drain if its queue is unconfigured.
1116                  */
1117                 if (!port->port_configured || !port->queue_configured)
1118                         continue;
1119
1120                 if (toggle_port)
1121                         dlb2_dir_port_cq_disable(hw, port);
1122
1123                 dlb2_drain_dir_cq(hw, port);
1124
1125                 if (toggle_port)
1126                         dlb2_dir_port_cq_enable(hw, port);
1127         }
1128
1129         return 0;
1130 }
1131
1132 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1133                                 struct dlb2_dir_pq_pair *queue)
1134 {
1135         u32 cnt;
1136
1137         cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
1138                                                       queue->id.phys_id));
1139
1140         return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
1141 }
1142
1143 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1144                                     struct dlb2_dir_pq_pair *queue)
1145 {
1146         return dlb2_dir_queue_depth(hw, queue) == 0;
1147 }
1148
1149 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1150                                          struct dlb2_hw_domain *domain)
1151 {
1152         struct dlb2_list_entry *iter;
1153         struct dlb2_dir_pq_pair *queue;
1154         RTE_SET_USED(iter);
1155
1156         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1157                 if (!dlb2_dir_queue_is_empty(hw, queue))
1158                         return false;
1159         }
1160
1161         return true;
1162 }
1163 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1164                                         struct dlb2_hw_domain *domain)
1165 {
1166         int i;
1167
1168         /* If the domain hasn't been started, there's no traffic to drain */
1169         if (!domain->started)
1170                 return 0;
1171
1172         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1173                 dlb2_domain_drain_dir_cqs(hw, domain, true);
1174
1175                 if (dlb2_domain_dir_queues_empty(hw, domain))
1176                         break;
1177         }
1178
1179         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1180                 DLB2_HW_ERR(hw,
1181                             "[%s()] Internal error: failed to empty queues\n",
1182                             __func__);
1183                 return -EFAULT;
1184         }
1185
1186         /*
1187          * Drain the CQs one more time. For the queues to go empty, they would
1188          * have scheduled one or more QEs.
1189          */
1190         dlb2_domain_drain_dir_cqs(hw, domain, true);
1191
1192         return 0;
1193 }
1194
1195 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1196                                     struct dlb2_ldb_port *port)
1197 {
1198         u32 reg = 0;
1199
1200         /*
1201          * Don't re-enable the port if a removal is pending. The caller should
1202          * mark this port as enabled (if it isn't already), and when the
1203          * removal completes the port will be enabled.
1204          */
1205         if (port->num_pending_removals)
1206                 return;
1207
1208         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1209
1210         dlb2_flush_csr(hw);
1211 }
1212
1213 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1214                                      struct dlb2_ldb_port *port)
1215 {
1216         u32 reg = 0;
1217
1218         DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
1219         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1220
1221         dlb2_flush_csr(hw);
1222 }
1223
1224 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1225                                       struct dlb2_ldb_port *port)
1226 {
1227         u32 cnt;
1228
1229         cnt = DLB2_CSR_RD(hw,
1230                           DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
1231
1232         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
1233 }
1234
1235 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1236                                    struct dlb2_ldb_port *port)
1237 {
1238         u32 cnt;
1239
1240         cnt = DLB2_CSR_RD(hw,
1241                           DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
1242
1243         /*
1244          * Account for the initial token count, which is used in order to
1245          * provide a CQ with depth less than 8.
1246          */
1247
1248         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
1249                 port->init_tkn_cnt;
1250 }
1251
1252 static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1253 {
1254         u32 infl_cnt, tkn_cnt;
1255         unsigned int i;
1256
1257         infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1258         tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1259
1260         if (infl_cnt || tkn_cnt) {
1261                 struct dlb2_hcw hcw_mem[8], *hcw;
1262                 void __iomem *pp_addr;
1263
1264                 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1265
1266                 /* Point hcw to a 64B-aligned location */
1267                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1268
1269                 /*
1270                  * Program the first HCW for a completion and token return and
1271                  * the other HCWs as NOOPS
1272                  */
1273
1274                 memset(hcw, 0, 4 * sizeof(*hcw));
1275                 hcw->qe_comp = (infl_cnt > 0);
1276                 hcw->cq_token = (tkn_cnt > 0);
1277                 hcw->lock_id = tkn_cnt - 1;
1278
1279                 /* Return tokens in the first HCW */
1280                 dlb2_movdir64b(pp_addr, hcw);
1281
1282                 hcw->cq_token = 0;
1283
1284                 /* Issue remaining completions (if any) */
1285                 for (i = 1; i < infl_cnt; i++)
1286                         dlb2_movdir64b(pp_addr, hcw);
1287
1288                 os_fence_hcw(hw, pp_addr);
1289
1290                 os_unmap_producer_port(hw, pp_addr);
1291         }
1292 }
1293
1294 static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1295                                       struct dlb2_hw_domain *domain,
1296                                       bool toggle_port)
1297 {
1298         struct dlb2_list_entry *iter;
1299         struct dlb2_ldb_port *port;
1300         int i;
1301         RTE_SET_USED(iter);
1302
1303         /* If the domain hasn't been started, there's no traffic to drain */
1304         if (!domain->started)
1305                 return;
1306
1307         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1308                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1309                         if (toggle_port)
1310                                 dlb2_ldb_port_cq_disable(hw, port);
1311
1312                         dlb2_drain_ldb_cq(hw, port);
1313
1314                         if (toggle_port)
1315                                 dlb2_ldb_port_cq_enable(hw, port);
1316                 }
1317         }
1318 }
1319
1320 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1321                                 struct dlb2_ldb_queue *queue)
1322 {
1323         u32 aqed, ldb, atm;
1324
1325         aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1326                                                        queue->id.phys_id));
1327         ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1328                                                       queue->id.phys_id));
1329         atm = DLB2_CSR_RD(hw,
1330                           DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
1331
1332         return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
1333                + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
1334                + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
1335 }
1336
1337 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1338                                     struct dlb2_ldb_queue *queue)
1339 {
1340         return dlb2_ldb_queue_depth(hw, queue) == 0;
1341 }
1342
1343 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1344                                             struct dlb2_hw_domain *domain)
1345 {
1346         struct dlb2_list_entry *iter;
1347         struct dlb2_ldb_queue *queue;
1348         RTE_SET_USED(iter);
1349
1350         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1351                 if (queue->num_mappings == 0)
1352                         continue;
1353
1354                 if (!dlb2_ldb_queue_is_empty(hw, queue))
1355                         return false;
1356         }
1357
1358         return true;
1359 }
1360
1361 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1362                                            struct dlb2_hw_domain *domain)
1363 {
1364         int i;
1365
1366         /* If the domain hasn't been started, there's no traffic to drain */
1367         if (!domain->started)
1368                 return 0;
1369
1370         if (domain->num_pending_removals > 0) {
1371                 DLB2_HW_ERR(hw,
1372                             "[%s()] Internal error: failed to unmap domain queues\n",
1373                             __func__);
1374                 return -EFAULT;
1375         }
1376
1377         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1378                 dlb2_domain_drain_ldb_cqs(hw, domain, true);
1379
1380                 if (dlb2_domain_mapped_queues_empty(hw, domain))
1381                         break;
1382         }
1383
1384         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1385                 DLB2_HW_ERR(hw,
1386                             "[%s()] Internal error: failed to empty queues\n",
1387                             __func__);
1388                 return -EFAULT;
1389         }
1390
1391         /*
1392          * Drain the CQs one more time. For the queues to go empty, they would
1393          * have scheduled one or more QEs.
1394          */
1395         dlb2_domain_drain_ldb_cqs(hw, domain, true);
1396
1397         return 0;
1398 }
1399
1400 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1401                                        struct dlb2_hw_domain *domain)
1402 {
1403         struct dlb2_list_entry *iter;
1404         struct dlb2_ldb_port *port;
1405         int i;
1406         RTE_SET_USED(iter);
1407
1408         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1409                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1410                         port->enabled = true;
1411
1412                         dlb2_ldb_port_cq_enable(hw, port);
1413                 }
1414         }
1415 }
1416
1417 static struct dlb2_ldb_queue *
1418 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1419                            u32 id,
1420                            bool vdev_req,
1421                            unsigned int vdev_id)
1422 {
1423         struct dlb2_list_entry *iter1;
1424         struct dlb2_list_entry *iter2;
1425         struct dlb2_function_resources *rsrcs;
1426         struct dlb2_hw_domain *domain;
1427         struct dlb2_ldb_queue *queue;
1428         RTE_SET_USED(iter1);
1429         RTE_SET_USED(iter2);
1430
1431         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1432                 return NULL;
1433
1434         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1435
1436         if (!vdev_req)
1437                 return &hw->rsrcs.ldb_queues[id];
1438
1439         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1440                 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
1441                         if (queue->id.virt_id == id)
1442                                 return queue;
1443                 }
1444         }
1445
1446         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
1447                 if (queue->id.virt_id == id)
1448                         return queue;
1449         }
1450
1451         return NULL;
1452 }
1453
1454 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1455                                                       u32 id,
1456                                                       bool vdev_req,
1457                                                       unsigned int vdev_id)
1458 {
1459         struct dlb2_list_entry *iteration;
1460         struct dlb2_function_resources *rsrcs;
1461         struct dlb2_hw_domain *domain;
1462         RTE_SET_USED(iteration);
1463
1464         if (id >= DLB2_MAX_NUM_DOMAINS)
1465                 return NULL;
1466
1467         if (!vdev_req)
1468                 return &hw->domains[id];
1469
1470         rsrcs = &hw->vdev[vdev_id];
1471
1472         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
1473                 if (domain->id.virt_id == id)
1474                         return domain;
1475         }
1476
1477         return NULL;
1478 }
1479
1480 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1481                                            struct dlb2_ldb_port *port,
1482                                            struct dlb2_ldb_queue *queue,
1483                                            int slot,
1484                                            enum dlb2_qid_map_state new_state)
1485 {
1486         enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1487         struct dlb2_hw_domain *domain;
1488         int domain_id;
1489
1490         domain_id = port->domain_id.phys_id;
1491
1492         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1493         if (domain == NULL) {
1494                 DLB2_HW_ERR(hw,
1495                             "[%s()] Internal error: unable to find domain %d\n",
1496                             __func__, domain_id);
1497                 return -EINVAL;
1498         }
1499
1500         switch (curr_state) {
1501         case DLB2_QUEUE_UNMAPPED:
1502                 switch (new_state) {
1503                 case DLB2_QUEUE_MAPPED:
1504                         queue->num_mappings++;
1505                         port->num_mappings++;
1506                         break;
1507                 case DLB2_QUEUE_MAP_IN_PROG:
1508                         queue->num_pending_additions++;
1509                         domain->num_pending_additions++;
1510                         break;
1511                 default:
1512                         goto error;
1513                 }
1514                 break;
1515         case DLB2_QUEUE_MAPPED:
1516                 switch (new_state) {
1517                 case DLB2_QUEUE_UNMAPPED:
1518                         queue->num_mappings--;
1519                         port->num_mappings--;
1520                         break;
1521                 case DLB2_QUEUE_UNMAP_IN_PROG:
1522                         port->num_pending_removals++;
1523                         domain->num_pending_removals++;
1524                         break;
1525                 case DLB2_QUEUE_MAPPED:
1526                         /* Priority change, nothing to update */
1527                         break;
1528                 default:
1529                         goto error;
1530                 }
1531                 break;
1532         case DLB2_QUEUE_MAP_IN_PROG:
1533                 switch (new_state) {
1534                 case DLB2_QUEUE_UNMAPPED:
1535                         queue->num_pending_additions--;
1536                         domain->num_pending_additions--;
1537                         break;
1538                 case DLB2_QUEUE_MAPPED:
1539                         queue->num_mappings++;
1540                         port->num_mappings++;
1541                         queue->num_pending_additions--;
1542                         domain->num_pending_additions--;
1543                         break;
1544                 default:
1545                         goto error;
1546                 }
1547                 break;
1548         case DLB2_QUEUE_UNMAP_IN_PROG:
1549                 switch (new_state) {
1550                 case DLB2_QUEUE_UNMAPPED:
1551                         port->num_pending_removals--;
1552                         domain->num_pending_removals--;
1553                         queue->num_mappings--;
1554                         port->num_mappings--;
1555                         break;
1556                 case DLB2_QUEUE_MAPPED:
1557                         port->num_pending_removals--;
1558                         domain->num_pending_removals--;
1559                         break;
1560                 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1561                         /* Nothing to update */
1562                         break;
1563                 default:
1564                         goto error;
1565                 }
1566                 break;
1567         case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1568                 switch (new_state) {
1569                 case DLB2_QUEUE_UNMAP_IN_PROG:
1570                         /* Nothing to update */
1571                         break;
1572                 case DLB2_QUEUE_UNMAPPED:
1573                         /*
1574                          * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1575                          * becomes UNMAPPED before it transitions to
1576                          * MAP_IN_PROG.
1577                          */
1578                         queue->num_mappings--;
1579                         port->num_mappings--;
1580                         port->num_pending_removals--;
1581                         domain->num_pending_removals--;
1582                         break;
1583                 default:
1584                         goto error;
1585                 }
1586                 break;
1587         default:
1588                 goto error;
1589         }
1590
1591         port->qid_map[slot].state = new_state;
1592
1593         DLB2_HW_DBG(hw,
1594                     "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1595                     __func__, queue->id.phys_id, port->id.phys_id,
1596                     curr_state, new_state);
1597         return 0;
1598
1599 error:
1600         DLB2_HW_ERR(hw,
1601                     "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1602                     __func__, queue->id.phys_id, port->id.phys_id,
1603                     curr_state, new_state);
1604         return -EFAULT;
1605 }
1606
1607 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1608                                 enum dlb2_qid_map_state state,
1609                                 int *slot)
1610 {
1611         int i;
1612
1613         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1614                 if (port->qid_map[i].state == state)
1615                         break;
1616         }
1617
1618         *slot = i;
1619
1620         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1621 }
1622
1623 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1624                                       enum dlb2_qid_map_state state,
1625                                       struct dlb2_ldb_queue *queue,
1626                                       int *slot)
1627 {
1628         int i;
1629
1630         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1631                 if (port->qid_map[i].state == state &&
1632                     port->qid_map[i].qid == queue->id.phys_id)
1633                         break;
1634         }
1635
1636         *slot = i;
1637
1638         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1639 }
1640
1641 /*
1642  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1643  * their function names imply, and should only be called by the dynamic CQ
1644  * mapping code.
1645  */
1646 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1647                                               struct dlb2_hw_domain *domain,
1648                                               struct dlb2_ldb_queue *queue)
1649 {
1650         struct dlb2_list_entry *iter;
1651         struct dlb2_ldb_port *port;
1652         int slot, i;
1653         RTE_SET_USED(iter);
1654
1655         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1656                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1657                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1658
1659                         if (!dlb2_port_find_slot_queue(port, state,
1660                                                        queue, &slot))
1661                                 continue;
1662
1663                         if (port->enabled)
1664                                 dlb2_ldb_port_cq_disable(hw, port);
1665                 }
1666         }
1667 }
1668
1669 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1670                                              struct dlb2_hw_domain *domain,
1671                                              struct dlb2_ldb_queue *queue)
1672 {
1673         struct dlb2_list_entry *iter;
1674         struct dlb2_ldb_port *port;
1675         int slot, i;
1676         RTE_SET_USED(iter);
1677
1678         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1679                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1680                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1681
1682                         if (!dlb2_port_find_slot_queue(port, state,
1683                                                        queue, &slot))
1684                                 continue;
1685
1686                         if (port->enabled)
1687                                 dlb2_ldb_port_cq_enable(hw, port);
1688                 }
1689         }
1690 }
1691
1692 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1693                                                 struct dlb2_ldb_port *port,
1694                                                 int slot)
1695 {
1696         u32 ctrl = 0;
1697
1698         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1699         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1700         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1701
1702         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1703
1704         dlb2_flush_csr(hw);
1705 }
1706
1707 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1708                                               struct dlb2_ldb_port *port,
1709                                               int slot)
1710 {
1711         u32 ctrl = 0;
1712
1713         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1714         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1715         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1716         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1717
1718         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1719
1720         dlb2_flush_csr(hw);
1721 }
1722
1723 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1724                                         struct dlb2_ldb_port *p,
1725                                         struct dlb2_ldb_queue *q,
1726                                         u8 priority)
1727 {
1728         enum dlb2_qid_map_state state;
1729         u32 lsp_qid2cq2;
1730         u32 lsp_qid2cq;
1731         u32 atm_qid2cq;
1732         u32 cq2priov;
1733         u32 cq2qid;
1734         int i;
1735
1736         /* Look for a pending or already mapped slot, else an unused slot */
1737         if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1738             !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1739             !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1740                 DLB2_HW_ERR(hw,
1741                             "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1742                             __func__, __LINE__);
1743                 return -EFAULT;
1744         }
1745
1746         /* Read-modify-write the priority and valid bit register */
1747         cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
1748
1749         cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
1750         cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
1751                     & DLB2_LSP_CQ2PRIOV_PRIO;
1752
1753         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
1754
1755         /* Read-modify-write the QID map register */
1756         if (i < 4)
1757                 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
1758                                                           p->id.phys_id));
1759         else
1760                 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
1761                                                           p->id.phys_id));
1762
1763         if (i == 0 || i == 4)
1764                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
1765         if (i == 1 || i == 5)
1766                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
1767         if (i == 2 || i == 6)
1768                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
1769         if (i == 3 || i == 7)
1770                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
1771
1772         if (i < 4)
1773                 DLB2_CSR_WR(hw,
1774                             DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
1775         else
1776                 DLB2_CSR_WR(hw,
1777                             DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
1778
1779         atm_qid2cq = DLB2_CSR_RD(hw,
1780                                  DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1781                                                 p->id.phys_id / 4));
1782
1783         lsp_qid2cq = DLB2_CSR_RD(hw,
1784                                  DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
1785                                                 p->id.phys_id / 4));
1786
1787         lsp_qid2cq2 = DLB2_CSR_RD(hw,
1788                                   DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
1789                                                   p->id.phys_id / 4));
1790
1791         switch (p->id.phys_id % 4) {
1792         case 0:
1793                 DLB2_BIT_SET(atm_qid2cq,
1794                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
1795                 DLB2_BIT_SET(lsp_qid2cq,
1796                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
1797                 DLB2_BIT_SET(lsp_qid2cq2,
1798                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
1799                 break;
1800
1801         case 1:
1802                 DLB2_BIT_SET(atm_qid2cq,
1803                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
1804                 DLB2_BIT_SET(lsp_qid2cq,
1805                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
1806                 DLB2_BIT_SET(lsp_qid2cq2,
1807                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
1808                 break;
1809
1810         case 2:
1811                 DLB2_BIT_SET(atm_qid2cq,
1812                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
1813                 DLB2_BIT_SET(lsp_qid2cq,
1814                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
1815                 DLB2_BIT_SET(lsp_qid2cq2,
1816                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
1817                 break;
1818
1819         case 3:
1820                 DLB2_BIT_SET(atm_qid2cq,
1821                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
1822                 DLB2_BIT_SET(lsp_qid2cq,
1823                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
1824                 DLB2_BIT_SET(lsp_qid2cq2,
1825                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
1826                 break;
1827         }
1828
1829         DLB2_CSR_WR(hw,
1830                     DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1831                     atm_qid2cq);
1832
1833         DLB2_CSR_WR(hw,
1834                     DLB2_LSP_QID2CQIDIX(hw->ver,
1835                                         q->id.phys_id, p->id.phys_id / 4),
1836                     lsp_qid2cq);
1837
1838         DLB2_CSR_WR(hw,
1839                     DLB2_LSP_QID2CQIDIX2(hw->ver,
1840                                          q->id.phys_id, p->id.phys_id / 4),
1841                     lsp_qid2cq2);
1842
1843         dlb2_flush_csr(hw);
1844
1845         p->qid_map[i].qid = q->id.phys_id;
1846         p->qid_map[i].priority = priority;
1847
1848         state = DLB2_QUEUE_MAPPED;
1849
1850         return dlb2_port_slot_state_transition(hw, p, q, i, state);
1851 }
1852
1853 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1854                                            struct dlb2_ldb_port *port,
1855                                            struct dlb2_ldb_queue *queue,
1856                                            int slot)
1857 {
1858         u32 ctrl = 0;
1859         u32 active;
1860         u32 enq;
1861
1862         /* Set the atomic scheduling haswork bit */
1863         active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1864                                                          queue->id.phys_id));
1865
1866         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1867         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1868         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1869         DLB2_BITS_SET(ctrl,
1870                       DLB2_BITS_GET(active,
1871                                     DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
1872                                     DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1873
1874         /* Set the non-atomic scheduling haswork bit */
1875         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1876
1877         enq = DLB2_CSR_RD(hw,
1878                           DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1879                                                        queue->id.phys_id));
1880
1881         memset(&ctrl, 0, sizeof(ctrl));
1882
1883         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1884         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1885         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1886         DLB2_BITS_SET(ctrl,
1887                       DLB2_BITS_GET(enq,
1888                                     DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
1889                       DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1890
1891         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1892
1893         dlb2_flush_csr(hw);
1894
1895         return 0;
1896 }
1897
1898 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1899                                               struct dlb2_ldb_port *port,
1900                                               u8 slot)
1901 {
1902         u32 ctrl = 0;
1903
1904         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1905         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1906         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1907
1908         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1909
1910         memset(&ctrl, 0, sizeof(ctrl));
1911
1912         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1913         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1914         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1915
1916         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1917
1918         dlb2_flush_csr(hw);
1919 }
1920
1921
1922 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1923                                               struct dlb2_ldb_queue *queue)
1924 {
1925         u32 infl_lim = 0;
1926
1927         DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
1928                  DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
1929
1930         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1931                     infl_lim);
1932 }
1933
1934 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1935                                                 struct dlb2_ldb_queue *queue)
1936 {
1937         DLB2_CSR_WR(hw,
1938                     DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1939                     DLB2_LSP_QID_LDB_INFL_LIM_RST);
1940 }
1941
1942 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1943                                                 struct dlb2_hw_domain *domain,
1944                                                 struct dlb2_ldb_port *port,
1945                                                 struct dlb2_ldb_queue *queue)
1946 {
1947         struct dlb2_list_entry *iter;
1948         enum dlb2_qid_map_state state;
1949         int slot, ret, i;
1950         u32 infl_cnt;
1951         u8 prio;
1952         RTE_SET_USED(iter);
1953
1954         infl_cnt = DLB2_CSR_RD(hw,
1955                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
1956                                                     queue->id.phys_id));
1957
1958         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
1959                 DLB2_HW_ERR(hw,
1960                             "[%s()] Internal error: non-zero QID inflight count\n",
1961                             __func__);
1962                 return -EINVAL;
1963         }
1964
1965         /*
1966          * Static map the port and set its corresponding has_work bits.
1967          */
1968         state = DLB2_QUEUE_MAP_IN_PROG;
1969         if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1970                 return -EINVAL;
1971
1972         prio = port->qid_map[slot].priority;
1973
1974         /*
1975          * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1976          * the port's qid_map state.
1977          */
1978         ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1979         if (ret)
1980                 return ret;
1981
1982         ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1983         if (ret)
1984                 return ret;
1985
1986         /*
1987          * Ensure IF_status(cq,qid) is 0 before enabling the port to
1988          * prevent spurious schedules to cause the queue's inflight
1989          * count to increase.
1990          */
1991         dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1992
1993         /* Reset the queue's inflight status */
1994         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1995                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1996                         state = DLB2_QUEUE_MAPPED;
1997                         if (!dlb2_port_find_slot_queue(port, state,
1998                                                        queue, &slot))
1999                                 continue;
2000
2001                         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2002                 }
2003         }
2004
2005         dlb2_ldb_queue_set_inflight_limit(hw, queue);
2006
2007         /* Re-enable CQs mapped to this queue */
2008         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2009
2010         /* If this queue has other mappings pending, clear its inflight limit */
2011         if (queue->num_pending_additions > 0)
2012                 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
2013
2014         return 0;
2015 }
2016
2017 /**
2018  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
2019  * @hw: dlb2_hw handle for a particular device.
2020  * @port: load-balanced port
2021  * @queue: load-balanced queue
2022  * @priority: queue servicing priority
2023  *
2024  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
2025  * at a later point, and <0 if an error occurred.
2026  */
2027 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
2028                                          struct dlb2_ldb_port *port,
2029                                          struct dlb2_ldb_queue *queue,
2030                                          u8 priority)
2031 {
2032         enum dlb2_qid_map_state state;
2033         struct dlb2_hw_domain *domain;
2034         int domain_id, slot, ret;
2035         u32 infl_cnt;
2036
2037         domain_id = port->domain_id.phys_id;
2038
2039         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
2040         if (domain == NULL) {
2041                 DLB2_HW_ERR(hw,
2042                             "[%s()] Internal error: unable to find domain %d\n",
2043                             __func__, port->domain_id.phys_id);
2044                 return -EINVAL;
2045         }
2046
2047         /*
2048          * Set the QID inflight limit to 0 to prevent further scheduling of the
2049          * queue.
2050          */
2051         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
2052                                                   queue->id.phys_id), 0);
2053
2054         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
2055                 DLB2_HW_ERR(hw,
2056                             "Internal error: No available unmapped slots\n");
2057                 return -EFAULT;
2058         }
2059
2060         port->qid_map[slot].qid = queue->id.phys_id;
2061         port->qid_map[slot].priority = priority;
2062
2063         state = DLB2_QUEUE_MAP_IN_PROG;
2064         ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
2065         if (ret)
2066                 return ret;
2067
2068         infl_cnt = DLB2_CSR_RD(hw,
2069                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2070                                                     queue->id.phys_id));
2071
2072         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2073                 /*
2074                  * The queue is owed completions so it's not safe to map it
2075                  * yet. Schedule a kernel thread to complete the mapping later,
2076                  * once software has completed all the queue's inflight events.
2077                  */
2078                 if (!os_worker_active(hw))
2079                         os_schedule_work(hw);
2080
2081                 return 1;
2082         }
2083
2084         /*
2085          * Disable the affected CQ, and the CQs already mapped to the QID,
2086          * before reading the QID's inflight count a second time. There is an
2087          * unlikely race in which the QID may schedule one more QE after we
2088          * read an inflight count of 0, and disabling the CQs guarantees that
2089          * the race will not occur after a re-read of the inflight count
2090          * register.
2091          */
2092         if (port->enabled)
2093                 dlb2_ldb_port_cq_disable(hw, port);
2094
2095         dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2096
2097         infl_cnt = DLB2_CSR_RD(hw,
2098                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2099                                                     queue->id.phys_id));
2100
2101         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2102                 if (port->enabled)
2103                         dlb2_ldb_port_cq_enable(hw, port);
2104
2105                 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2106
2107                 /*
2108                  * The queue is owed completions so it's not safe to map it
2109                  * yet. Schedule a kernel thread to complete the mapping later,
2110                  * once software has completed all the queue's inflight events.
2111                  */
2112                 if (!os_worker_active(hw))
2113                         os_schedule_work(hw);
2114
2115                 return 1;
2116         }
2117
2118         return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2119 }
2120
2121 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2122                                         struct dlb2_hw_domain *domain,
2123                                         struct dlb2_ldb_port *port)
2124 {
2125         int i;
2126
2127         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2128                 u32 infl_cnt;
2129                 struct dlb2_ldb_queue *queue;
2130                 int qid;
2131
2132                 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2133                         continue;
2134
2135                 qid = port->qid_map[i].qid;
2136
2137                 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2138
2139                 if (queue == NULL) {
2140                         DLB2_HW_ERR(hw,
2141                                     "[%s()] Internal error: unable to find queue %d\n",
2142                                     __func__, qid);
2143                         continue;
2144                 }
2145
2146                 infl_cnt = DLB2_CSR_RD(hw,
2147                                        DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2148
2149                 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
2150                         continue;
2151
2152                 /*
2153                  * Disable the affected CQ, and the CQs already mapped to the
2154                  * QID, before reading the QID's inflight count a second time.
2155                  * There is an unlikely race in which the QID may schedule one
2156                  * more QE after we read an inflight count of 0, and disabling
2157                  * the CQs guarantees that the race will not occur after a
2158                  * re-read of the inflight count register.
2159                  */
2160                 if (port->enabled)
2161                         dlb2_ldb_port_cq_disable(hw, port);
2162
2163                 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2164
2165                 infl_cnt = DLB2_CSR_RD(hw,
2166                                        DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2167
2168                 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2169                         if (port->enabled)
2170                                 dlb2_ldb_port_cq_enable(hw, port);
2171
2172                         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2173
2174                         continue;
2175                 }
2176
2177                 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2178         }
2179 }
2180
2181 static unsigned int
2182 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2183                                       struct dlb2_hw_domain *domain)
2184 {
2185         struct dlb2_list_entry *iter;
2186         struct dlb2_ldb_port *port;
2187         int i;
2188         RTE_SET_USED(iter);
2189
2190         if (!domain->configured || domain->num_pending_additions == 0)
2191                 return 0;
2192
2193         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2194                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2195                         dlb2_domain_finish_map_port(hw, domain, port);
2196         }
2197
2198         return domain->num_pending_additions;
2199 }
2200
2201 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2202                                    struct dlb2_ldb_port *port,
2203                                    struct dlb2_ldb_queue *queue)
2204 {
2205         enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2206         u32 lsp_qid2cq2;
2207         u32 lsp_qid2cq;
2208         u32 atm_qid2cq;
2209         u32 cq2priov;
2210         u32 queue_id;
2211         u32 port_id;
2212         int i;
2213
2214         /* Find the queue's slot */
2215         mapped = DLB2_QUEUE_MAPPED;
2216         in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2217         pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2218
2219         if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2220             !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2221             !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2222                 DLB2_HW_ERR(hw,
2223                             "[%s():%d] Internal error: QID %d isn't mapped\n",
2224                             __func__, __LINE__, queue->id.phys_id);
2225                 return -EFAULT;
2226         }
2227
2228         port_id = port->id.phys_id;
2229         queue_id = queue->id.phys_id;
2230
2231         /* Read-modify-write the priority and valid bit register */
2232         cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
2233
2234         cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
2235
2236         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
2237
2238         atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
2239                                                          port_id / 4));
2240
2241         lsp_qid2cq = DLB2_CSR_RD(hw,
2242                                  DLB2_LSP_QID2CQIDIX(hw->ver,
2243                                                 queue_id, port_id / 4));
2244
2245         lsp_qid2cq2 = DLB2_CSR_RD(hw,
2246                                   DLB2_LSP_QID2CQIDIX2(hw->ver,
2247                                                   queue_id, port_id / 4));
2248
2249         switch (port_id % 4) {
2250         case 0:
2251                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2252                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2253                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2254                 break;
2255
2256         case 1:
2257                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2258                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2259                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2260                 break;
2261
2262         case 2:
2263                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2264                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2265                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2266                 break;
2267
2268         case 3:
2269                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2270                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2271                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2272                 break;
2273         }
2274
2275         DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
2276
2277         DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
2278                     lsp_qid2cq);
2279
2280         DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
2281                     lsp_qid2cq2);
2282
2283         dlb2_flush_csr(hw);
2284
2285         unmapped = DLB2_QUEUE_UNMAPPED;
2286
2287         return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2288 }
2289
2290 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2291                                  struct dlb2_hw_domain *domain,
2292                                  struct dlb2_ldb_port *port,
2293                                  struct dlb2_ldb_queue *queue,
2294                                  u8 prio)
2295 {
2296         if (domain->started)
2297                 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2298         else
2299                 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2300 }
2301
2302 static void
2303 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2304                                    struct dlb2_hw_domain *domain,
2305                                    struct dlb2_ldb_port *port,
2306                                    int slot)
2307 {
2308         enum dlb2_qid_map_state state;
2309         struct dlb2_ldb_queue *queue;
2310
2311         queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2312
2313         state = port->qid_map[slot].state;
2314
2315         /* Update the QID2CQIDX and CQ2QID vectors */
2316         dlb2_ldb_port_unmap_qid(hw, port, queue);
2317
2318         /*
2319          * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2320          * the has_work bits
2321          */
2322         dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2323
2324         /* Reset the {CQ, slot} to its default state */
2325         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2326
2327         /* Re-enable the CQ if it was not manually disabled by the user */
2328         if (port->enabled)
2329                 dlb2_ldb_port_cq_enable(hw, port);
2330
2331         /*
2332          * If there is a mapping that is pending this slot's removal, perform
2333          * the mapping now.
2334          */
2335         if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2336                 struct dlb2_ldb_port_qid_map *map;
2337                 struct dlb2_ldb_queue *map_queue;
2338                 u8 prio;
2339
2340                 map = &port->qid_map[slot];
2341
2342                 map->qid = map->pending_qid;
2343                 map->priority = map->pending_priority;
2344
2345                 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2346                 prio = map->priority;
2347
2348                 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2349         }
2350 }
2351
2352
2353 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2354                                           struct dlb2_hw_domain *domain,
2355                                           struct dlb2_ldb_port *port)
2356 {
2357         u32 infl_cnt;
2358         int i;
2359
2360         if (port->num_pending_removals == 0)
2361                 return false;
2362
2363         /*
2364          * The unmap requires all the CQ's outstanding inflights to be
2365          * completed.
2366          */
2367         infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
2368                                                        port->id.phys_id));
2369         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
2370                 return false;
2371
2372         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2373                 struct dlb2_ldb_port_qid_map *map;
2374
2375                 map = &port->qid_map[i];
2376
2377                 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2378                     map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2379                         continue;
2380
2381                 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2382         }
2383
2384         return true;
2385 }
2386
2387 static unsigned int
2388 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2389                                         struct dlb2_hw_domain *domain)
2390 {
2391         struct dlb2_list_entry *iter;
2392         struct dlb2_ldb_port *port;
2393         int i;
2394         RTE_SET_USED(iter);
2395
2396         if (!domain->configured || domain->num_pending_removals == 0)
2397                 return 0;
2398
2399         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2400                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2401                         dlb2_domain_finish_unmap_port(hw, domain, port);
2402         }
2403
2404         return domain->num_pending_removals;
2405 }
2406
2407 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2408                                         struct dlb2_hw_domain *domain)
2409 {
2410         struct dlb2_list_entry *iter;
2411         struct dlb2_ldb_port *port;
2412         int i;
2413         RTE_SET_USED(iter);
2414
2415         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2416                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2417                         port->enabled = false;
2418
2419                         dlb2_ldb_port_cq_disable(hw, port);
2420                 }
2421         }
2422 }
2423
2424
2425 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2426                                   u32 domain_id,
2427                                   bool vdev_req,
2428                                   unsigned int vdev_id)
2429 {
2430         DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2431         if (vdev_req)
2432                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2433         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2434 }
2435
2436 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2437                                          struct dlb2_hw_domain *domain,
2438                                          unsigned int vdev_id)
2439 {
2440         struct dlb2_list_entry *iter;
2441         struct dlb2_dir_pq_pair *port;
2442         u32 vpp_v = 0;
2443         RTE_SET_USED(iter);
2444
2445         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2446                 unsigned int offs;
2447                 u32 virt_id;
2448
2449                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2450                         virt_id = port->id.virt_id;
2451                 else
2452                         virt_id = port->id.phys_id;
2453
2454                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2455
2456                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
2457         }
2458 }
2459
2460 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2461                                          struct dlb2_hw_domain *domain,
2462                                          unsigned int vdev_id)
2463 {
2464         struct dlb2_list_entry *iter;
2465         struct dlb2_ldb_port *port;
2466         u32 vpp_v = 0;
2467         int i;
2468         RTE_SET_USED(iter);
2469
2470         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2471                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2472                         unsigned int offs;
2473                         u32 virt_id;
2474
2475                         if (hw->virt_mode == DLB2_VIRT_SRIOV)
2476                                 virt_id = port->id.virt_id;
2477                         else
2478                                 virt_id = port->id.phys_id;
2479
2480                         offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2481
2482                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
2483                 }
2484         }
2485 }
2486
2487 static void
2488 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2489                                         struct dlb2_hw_domain *domain)
2490 {
2491         struct dlb2_list_entry *iter;
2492         struct dlb2_ldb_port *port;
2493         u32 int_en = 0;
2494         u32 wd_en = 0;
2495         int i;
2496         RTE_SET_USED(iter);
2497
2498         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2499                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2500                         DLB2_CSR_WR(hw,
2501                                     DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
2502                                                        port->id.phys_id),
2503                                     int_en);
2504
2505                         DLB2_CSR_WR(hw,
2506                                     DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
2507                                                       port->id.phys_id),
2508                                     wd_en);
2509                 }
2510         }
2511 }
2512
2513 static void
2514 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2515                                         struct dlb2_hw_domain *domain)
2516 {
2517         struct dlb2_list_entry *iter;
2518         struct dlb2_dir_pq_pair *port;
2519         u32 int_en = 0;
2520         u32 wd_en = 0;
2521         RTE_SET_USED(iter);
2522
2523         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2524                 DLB2_CSR_WR(hw,
2525                             DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2526                             int_en);
2527
2528                 DLB2_CSR_WR(hw,
2529                             DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
2530                             wd_en);
2531         }
2532 }
2533
2534 static void
2535 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2536                                           struct dlb2_hw_domain *domain)
2537 {
2538         int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2539         struct dlb2_list_entry *iter;
2540         struct dlb2_ldb_queue *queue;
2541         RTE_SET_USED(iter);
2542
2543         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2544                 int idx = domain_offset + queue->id.phys_id;
2545
2546                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
2547
2548                 if (queue->id.vdev_owned) {
2549                         DLB2_CSR_WR(hw,
2550                                     DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2551                                     0);
2552
2553                         idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2554                                 queue->id.virt_id;
2555
2556                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
2557
2558                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
2559                 }
2560         }
2561 }
2562
2563 static void
2564 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2565                                           struct dlb2_hw_domain *domain)
2566 {
2567         struct dlb2_list_entry *iter;
2568         struct dlb2_dir_pq_pair *queue;
2569         unsigned long max_ports;
2570         int domain_offset;
2571         RTE_SET_USED(iter);
2572
2573         max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
2574
2575         domain_offset = domain->id.phys_id * max_ports;
2576
2577         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2578                 int idx = domain_offset + queue->id.phys_id;
2579
2580                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
2581
2582                 if (queue->id.vdev_owned) {
2583                         idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
2584
2585                         DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
2586
2587                         DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
2588                 }
2589         }
2590 }
2591
2592 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2593                                                struct dlb2_hw_domain *domain)
2594 {
2595         struct dlb2_list_entry *iter;
2596         struct dlb2_ldb_port *port;
2597         u32 chk_en = 0;
2598         int i;
2599         RTE_SET_USED(iter);
2600
2601         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2602                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2603                         DLB2_CSR_WR(hw,
2604                                     DLB2_CHP_SN_CHK_ENBL(hw->ver,
2605                                                          port->id.phys_id),
2606                                     chk_en);
2607                 }
2608         }
2609 }
2610
2611 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2612                                                  struct dlb2_hw_domain *domain)
2613 {
2614         struct dlb2_list_entry *iter;
2615         struct dlb2_ldb_port *port;
2616         int i;
2617         RTE_SET_USED(iter);
2618
2619         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2620                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2621                         int j;
2622
2623                         for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
2624                                 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2625                                         break;
2626                         }
2627
2628                         if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2629                                 DLB2_HW_ERR(hw,
2630                                             "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2631                                             __func__, port->id.phys_id);
2632                                 return -EFAULT;
2633                         }
2634                 }
2635         }
2636
2637         return 0;
2638 }
2639
2640 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2641                                         struct dlb2_hw_domain *domain)
2642 {
2643         struct dlb2_list_entry *iter;
2644         struct dlb2_dir_pq_pair *port;
2645         RTE_SET_USED(iter);
2646
2647         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2648                 port->enabled = false;
2649
2650                 dlb2_dir_port_cq_disable(hw, port);
2651         }
2652 }
2653
2654 static void
2655 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2656                                        struct dlb2_hw_domain *domain)
2657 {
2658         struct dlb2_list_entry *iter;
2659         struct dlb2_dir_pq_pair *port;
2660         u32 pp_v = 0;
2661         RTE_SET_USED(iter);
2662
2663         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2664                 DLB2_CSR_WR(hw,
2665                             DLB2_SYS_DIR_PP_V(port->id.phys_id),
2666                             pp_v);
2667         }
2668 }
2669
2670 static void
2671 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2672                                        struct dlb2_hw_domain *domain)
2673 {
2674         struct dlb2_list_entry *iter;
2675         struct dlb2_ldb_port *port;
2676         u32 pp_v = 0;
2677         int i;
2678         RTE_SET_USED(iter);
2679
2680         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2681                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2682                         DLB2_CSR_WR(hw,
2683                                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2684                                     pp_v);
2685                 }
2686         }
2687 }
2688
2689 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2690                                             struct dlb2_hw_domain *domain)
2691 {
2692         struct dlb2_list_entry *iter;
2693         struct dlb2_dir_pq_pair *dir_port;
2694         struct dlb2_ldb_port *ldb_port;
2695         struct dlb2_ldb_queue *queue;
2696         int i;
2697         RTE_SET_USED(iter);
2698
2699         /*
2700          * Confirm that all the domain's queue's inflight counts and AQED
2701          * active counts are 0.
2702          */
2703         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2704                 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2705                         DLB2_HW_ERR(hw,
2706                                     "[%s()] Internal error: failed to empty ldb queue %d\n",
2707                                     __func__, queue->id.phys_id);
2708                         return -EFAULT;
2709                 }
2710         }
2711
2712         /* Confirm that all the domain's CQs inflight and token counts are 0. */
2713         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2714                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2715                         if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2716                             dlb2_ldb_cq_token_count(hw, ldb_port)) {
2717                                 DLB2_HW_ERR(hw,
2718                                             "[%s()] Internal error: failed to empty ldb port %d\n",
2719                                             __func__, ldb_port->id.phys_id);
2720                                 return -EFAULT;
2721                         }
2722                 }
2723         }
2724
2725         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2726                 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2727                         DLB2_HW_ERR(hw,
2728                                     "[%s()] Internal error: failed to empty dir queue %d\n",
2729                                     __func__, dir_port->id.phys_id);
2730                         return -EFAULT;
2731                 }
2732
2733                 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2734                         DLB2_HW_ERR(hw,
2735                                     "[%s()] Internal error: failed to empty dir port %d\n",
2736                                     __func__, dir_port->id.phys_id);
2737                         return -EFAULT;
2738                 }
2739         }
2740
2741         return 0;
2742 }
2743
2744 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2745                                                    struct dlb2_ldb_port *port)
2746 {
2747         DLB2_CSR_WR(hw,
2748                     DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2749                     DLB2_SYS_LDB_PP2VAS_RST);
2750
2751         DLB2_CSR_WR(hw,
2752                     DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
2753                     DLB2_CHP_LDB_CQ2VAS_RST);
2754
2755         DLB2_CSR_WR(hw,
2756                     DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2757                     DLB2_SYS_LDB_PP2VDEV_RST);
2758
2759         if (port->id.vdev_owned) {
2760                 unsigned int offs;
2761                 u32 virt_id;
2762
2763                 /*
2764                  * DLB uses producer port address bits 17:12 to determine the
2765                  * producer port ID. In Scalable IOV mode, PP accesses come
2766                  * through the PF MMIO window for the physical producer port,
2767                  * so for translation purposes the virtual and physical port
2768                  * IDs are equal.
2769                  */
2770                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2771                         virt_id = port->id.virt_id;
2772                 else
2773                         virt_id = port->id.phys_id;
2774
2775                 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2776
2777                 DLB2_CSR_WR(hw,
2778                             DLB2_SYS_VF_LDB_VPP2PP(offs),
2779                             DLB2_SYS_VF_LDB_VPP2PP_RST);
2780
2781                 DLB2_CSR_WR(hw,
2782                             DLB2_SYS_VF_LDB_VPP_V(offs),
2783                             DLB2_SYS_VF_LDB_VPP_V_RST);
2784         }
2785
2786         DLB2_CSR_WR(hw,
2787                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2788                     DLB2_SYS_LDB_PP_V_RST);
2789
2790         DLB2_CSR_WR(hw,
2791                     DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
2792                     DLB2_LSP_CQ_LDB_DSBL_RST);
2793
2794         DLB2_CSR_WR(hw,
2795                     DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
2796                     DLB2_CHP_LDB_CQ_DEPTH_RST);
2797
2798         if (hw->ver != DLB2_HW_V2)
2799                 DLB2_CSR_WR(hw,
2800                             DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
2801                             DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
2802
2803         DLB2_CSR_WR(hw,
2804                     DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
2805                     DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2806
2807         DLB2_CSR_WR(hw,
2808                     DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
2809                     DLB2_CHP_HIST_LIST_LIM_RST);
2810
2811         DLB2_CSR_WR(hw,
2812                     DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
2813                     DLB2_CHP_HIST_LIST_BASE_RST);
2814
2815         DLB2_CSR_WR(hw,
2816                     DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
2817                     DLB2_CHP_HIST_LIST_POP_PTR_RST);
2818
2819         DLB2_CSR_WR(hw,
2820                     DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
2821                     DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2822
2823         DLB2_CSR_WR(hw,
2824                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2825                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2826
2827         DLB2_CSR_WR(hw,
2828                     DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2829                     DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2830
2831         DLB2_CSR_WR(hw,
2832                     DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
2833                     DLB2_CHP_LDB_CQ_INT_ENB_RST);
2834
2835         DLB2_CSR_WR(hw,
2836                     DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2837                     DLB2_SYS_LDB_CQ_ISR_RST);
2838
2839         DLB2_CSR_WR(hw,
2840                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2841                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2842
2843         DLB2_CSR_WR(hw,
2844                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2845                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2846
2847         DLB2_CSR_WR(hw,
2848                     DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
2849                     DLB2_CHP_LDB_CQ_WPTR_RST);
2850
2851         DLB2_CSR_WR(hw,
2852                     DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
2853                     DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2854
2855         DLB2_CSR_WR(hw,
2856                     DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2857                     DLB2_SYS_LDB_CQ_ADDR_L_RST);
2858
2859         DLB2_CSR_WR(hw,
2860                     DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2861                     DLB2_SYS_LDB_CQ_ADDR_U_RST);
2862
2863         if (hw->ver == DLB2_HW_V2)
2864                 DLB2_CSR_WR(hw,
2865                             DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2866                             DLB2_SYS_LDB_CQ_AT_RST);
2867
2868         DLB2_CSR_WR(hw,
2869                     DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
2870                     DLB2_SYS_LDB_CQ_PASID_RST);
2871
2872         DLB2_CSR_WR(hw,
2873                     DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2874                     DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2875
2876         DLB2_CSR_WR(hw,
2877                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
2878                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2879
2880         DLB2_CSR_WR(hw,
2881                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
2882                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2883
2884         DLB2_CSR_WR(hw,
2885                     DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
2886                     DLB2_LSP_CQ2QID0_RST);
2887
2888         DLB2_CSR_WR(hw,
2889                     DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
2890                     DLB2_LSP_CQ2QID1_RST);
2891
2892         DLB2_CSR_WR(hw,
2893                     DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
2894                     DLB2_LSP_CQ2PRIOV_RST);
2895 }
2896
2897 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2898                                                  struct dlb2_hw_domain *domain)
2899 {
2900         struct dlb2_list_entry *iter;
2901         struct dlb2_ldb_port *port;
2902         int i;
2903         RTE_SET_USED(iter);
2904
2905         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2906                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2907                         __dlb2_domain_reset_ldb_port_registers(hw, port);
2908         }
2909 }
2910
2911 static void
2912 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2913                                        struct dlb2_dir_pq_pair *port)
2914 {
2915         u32 reg = 0;
2916
2917         DLB2_CSR_WR(hw,
2918                     DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
2919                     DLB2_CHP_DIR_CQ2VAS_RST);
2920
2921         DLB2_CSR_WR(hw,
2922                     DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
2923                     DLB2_LSP_CQ_DIR_DSBL_RST);
2924
2925         DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
2926
2927         if (hw->ver == DLB2_HW_V2)
2928                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2929         else
2930                 DLB2_CSR_WR(hw,
2931                             DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
2932
2933         DLB2_CSR_WR(hw,
2934                     DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
2935                     DLB2_CHP_DIR_CQ_DEPTH_RST);
2936
2937         DLB2_CSR_WR(hw,
2938                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2939                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2940
2941         DLB2_CSR_WR(hw,
2942                     DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2943                     DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2944
2945         DLB2_CSR_WR(hw,
2946                     DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2947                     DLB2_CHP_DIR_CQ_INT_ENB_RST);
2948
2949         DLB2_CSR_WR(hw,
2950                     DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2951                     DLB2_SYS_DIR_CQ_ISR_RST);
2952
2953         DLB2_CSR_WR(hw,
2954                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
2955                                                       port->id.phys_id),
2956                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2957
2958         DLB2_CSR_WR(hw,
2959                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2960                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2961
2962         DLB2_CSR_WR(hw,
2963                     DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
2964                     DLB2_CHP_DIR_CQ_WPTR_RST);
2965
2966         DLB2_CSR_WR(hw,
2967                     DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
2968                     DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2969
2970         DLB2_CSR_WR(hw,
2971                     DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2972                     DLB2_SYS_DIR_CQ_ADDR_L_RST);
2973
2974         DLB2_CSR_WR(hw,
2975                     DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2976                     DLB2_SYS_DIR_CQ_ADDR_U_RST);
2977
2978         DLB2_CSR_WR(hw,
2979                     DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2980                     DLB2_SYS_DIR_CQ_AT_RST);
2981
2982         if (hw->ver == DLB2_HW_V2)
2983                 DLB2_CSR_WR(hw,
2984                             DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2985                             DLB2_SYS_DIR_CQ_AT_RST);
2986
2987         DLB2_CSR_WR(hw,
2988                     DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
2989                     DLB2_SYS_DIR_CQ_PASID_RST);
2990
2991         DLB2_CSR_WR(hw,
2992                     DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2993                     DLB2_SYS_DIR_CQ_FMT_RST);
2994
2995         DLB2_CSR_WR(hw,
2996                     DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2997                     DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
2998
2999         DLB2_CSR_WR(hw,
3000                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3001                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
3002
3003         DLB2_CSR_WR(hw,
3004                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3005                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
3006
3007         DLB2_CSR_WR(hw,
3008                     DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
3009                     DLB2_SYS_DIR_PP2VAS_RST);
3010
3011         DLB2_CSR_WR(hw,
3012                     DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3013                     DLB2_CHP_DIR_CQ2VAS_RST);
3014
3015         DLB2_CSR_WR(hw,
3016                     DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
3017                     DLB2_SYS_DIR_PP2VDEV_RST);
3018
3019         if (port->id.vdev_owned) {
3020                 unsigned int offs;
3021                 u32 virt_id;
3022
3023                 /*
3024                  * DLB uses producer port address bits 17:12 to determine the
3025                  * producer port ID. In Scalable IOV mode, PP accesses come
3026                  * through the PF MMIO window for the physical producer port,
3027                  * so for translation purposes the virtual and physical port
3028                  * IDs are equal.
3029                  */
3030                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3031                         virt_id = port->id.virt_id;
3032                 else
3033                         virt_id = port->id.phys_id;
3034
3035                 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3036                         virt_id;
3037
3038                 DLB2_CSR_WR(hw,
3039                             DLB2_SYS_VF_DIR_VPP2PP(offs),
3040                             DLB2_SYS_VF_DIR_VPP2PP_RST);
3041
3042                 DLB2_CSR_WR(hw,
3043                             DLB2_SYS_VF_DIR_VPP_V(offs),
3044                             DLB2_SYS_VF_DIR_VPP_V_RST);
3045         }
3046
3047         DLB2_CSR_WR(hw,
3048                     DLB2_SYS_DIR_PP_V(port->id.phys_id),
3049                     DLB2_SYS_DIR_PP_V_RST);
3050 }
3051
3052 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3053                                                  struct dlb2_hw_domain *domain)
3054 {
3055         struct dlb2_list_entry *iter;
3056         struct dlb2_dir_pq_pair *port;
3057         RTE_SET_USED(iter);
3058
3059         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3060                 __dlb2_domain_reset_dir_port_registers(hw, port);
3061 }
3062
3063 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
3064                                                   struct dlb2_hw_domain *domain)
3065 {
3066         struct dlb2_list_entry *iter;
3067         struct dlb2_ldb_queue *queue;
3068         RTE_SET_USED(iter);
3069
3070         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3071                 unsigned int queue_id = queue->id.phys_id;
3072                 int i;
3073
3074                 DLB2_CSR_WR(hw,
3075                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
3076                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3077
3078                 DLB2_CSR_WR(hw,
3079                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
3080                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3081
3082                 DLB2_CSR_WR(hw,
3083                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
3084                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3085
3086                 DLB2_CSR_WR(hw,
3087                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
3088                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3089
3090                 DLB2_CSR_WR(hw,
3091                             DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
3092                             DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3093
3094                 DLB2_CSR_WR(hw,
3095                             DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
3096                             DLB2_LSP_QID_LDB_INFL_LIM_RST);
3097
3098                 DLB2_CSR_WR(hw,
3099                             DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
3100                             DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3101
3102                 DLB2_CSR_WR(hw,
3103                             DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
3104                             DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3105
3106                 DLB2_CSR_WR(hw,
3107                             DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
3108                             DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3109
3110                 DLB2_CSR_WR(hw,
3111                             DLB2_SYS_LDB_QID_ITS(queue_id),
3112                             DLB2_SYS_LDB_QID_ITS_RST);
3113
3114                 DLB2_CSR_WR(hw,
3115                             DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
3116                             DLB2_CHP_ORD_QID_SN_RST);
3117
3118                 DLB2_CSR_WR(hw,
3119                             DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
3120                             DLB2_CHP_ORD_QID_SN_MAP_RST);
3121
3122                 DLB2_CSR_WR(hw,
3123                             DLB2_SYS_LDB_QID_V(queue_id),
3124                             DLB2_SYS_LDB_QID_V_RST);
3125
3126                 DLB2_CSR_WR(hw,
3127                             DLB2_SYS_LDB_QID_CFG_V(queue_id),
3128                             DLB2_SYS_LDB_QID_CFG_V_RST);
3129
3130                 if (queue->sn_cfg_valid) {
3131                         u32 offs[2];
3132
3133                         offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
3134                                                          queue->sn_slot);
3135                         offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
3136                                                          queue->sn_slot);
3137
3138                         DLB2_CSR_WR(hw,
3139                                     offs[queue->sn_group],
3140                                     DLB2_RO_GRP_0_SLT_SHFT_RST);
3141                 }
3142
3143                 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3144                         DLB2_CSR_WR(hw,
3145                                     DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
3146                                     DLB2_LSP_QID2CQIDIX_00_RST);
3147
3148                         DLB2_CSR_WR(hw,
3149                                     DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
3150                                     DLB2_LSP_QID2CQIDIX2_00_RST);
3151
3152                         DLB2_CSR_WR(hw,
3153                                     DLB2_ATM_QID2CQIDIX(queue_id, i),
3154                                     DLB2_ATM_QID2CQIDIX_00_RST);
3155                 }
3156         }
3157 }
3158
3159 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3160                                                   struct dlb2_hw_domain *domain)
3161 {
3162         struct dlb2_list_entry *iter;
3163         struct dlb2_dir_pq_pair *queue;
3164         RTE_SET_USED(iter);
3165
3166         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3167                 DLB2_CSR_WR(hw,
3168                             DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
3169                                                        queue->id.phys_id),
3170                             DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3171
3172                 DLB2_CSR_WR(hw,
3173                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
3174                                                           queue->id.phys_id),
3175                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3176
3177                 DLB2_CSR_WR(hw,
3178                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
3179                                                           queue->id.phys_id),
3180                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3181
3182                 DLB2_CSR_WR(hw,
3183                             DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
3184                                                          queue->id.phys_id),
3185                             DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3186
3187                 DLB2_CSR_WR(hw,
3188                             DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3189                             DLB2_SYS_DIR_QID_ITS_RST);
3190
3191                 DLB2_CSR_WR(hw,
3192                             DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3193                             DLB2_SYS_DIR_QID_V_RST);
3194         }
3195 }
3196
3197
3198
3199
3200
3201 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3202                                         struct dlb2_hw_domain *domain)
3203 {
3204         dlb2_domain_reset_ldb_port_registers(hw, domain);
3205
3206         dlb2_domain_reset_dir_port_registers(hw, domain);
3207
3208         dlb2_domain_reset_ldb_queue_registers(hw, domain);
3209
3210         dlb2_domain_reset_dir_queue_registers(hw, domain);
3211
3212         if (hw->ver == DLB2_HW_V2) {
3213                 DLB2_CSR_WR(hw,
3214                             DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3215                             DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3216
3217                 DLB2_CSR_WR(hw,
3218                             DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3219                             DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3220         } else
3221                 DLB2_CSR_WR(hw,
3222                             DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
3223                             DLB2_CHP_CFG_VAS_CRD_RST);
3224 }
3225
3226 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3227                                             struct dlb2_hw_domain *domain)
3228 {
3229         struct dlb2_dir_pq_pair *tmp_dir_port;
3230         struct dlb2_ldb_queue *tmp_ldb_queue;
3231         struct dlb2_ldb_port *tmp_ldb_port;
3232         struct dlb2_list_entry *iter1;
3233         struct dlb2_list_entry *iter2;
3234         struct dlb2_function_resources *rsrcs;
3235         struct dlb2_dir_pq_pair *dir_port;
3236         struct dlb2_ldb_queue *ldb_queue;
3237         struct dlb2_ldb_port *ldb_port;
3238         struct dlb2_list_head *list;
3239         int ret, i;
3240         RTE_SET_USED(tmp_dir_port);
3241         RTE_SET_USED(tmp_ldb_queue);
3242         RTE_SET_USED(tmp_ldb_port);
3243         RTE_SET_USED(iter1);
3244         RTE_SET_USED(iter2);
3245
3246         rsrcs = domain->parent_func;
3247
3248         /* Move the domain's ldb queues to the function's avail list */
3249         list = &domain->used_ldb_queues;
3250         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3251                 if (ldb_queue->sn_cfg_valid) {
3252                         struct dlb2_sn_group *grp;
3253
3254                         grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3255
3256                         dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3257                         ldb_queue->sn_cfg_valid = false;
3258                 }
3259
3260                 ldb_queue->owned = false;
3261                 ldb_queue->num_mappings = 0;
3262                 ldb_queue->num_pending_additions = 0;
3263
3264                 dlb2_list_del(&domain->used_ldb_queues,
3265                               &ldb_queue->domain_list);
3266                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3267                               &ldb_queue->func_list);
3268                 rsrcs->num_avail_ldb_queues++;
3269         }
3270
3271         list = &domain->avail_ldb_queues;
3272         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3273                 ldb_queue->owned = false;
3274
3275                 dlb2_list_del(&domain->avail_ldb_queues,
3276                               &ldb_queue->domain_list);
3277                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3278                               &ldb_queue->func_list);
3279                 rsrcs->num_avail_ldb_queues++;
3280         }
3281
3282         /* Move the domain's ldb ports to the function's avail list */
3283         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3284                 list = &domain->used_ldb_ports[i];
3285                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3286                                        iter1, iter2) {
3287                         int j;
3288
3289                         ldb_port->owned = false;
3290                         ldb_port->configured = false;
3291                         ldb_port->num_pending_removals = 0;
3292                         ldb_port->num_mappings = 0;
3293                         ldb_port->init_tkn_cnt = 0;
3294                         ldb_port->cq_depth = 0;
3295                         for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3296                                 ldb_port->qid_map[j].state =
3297                                         DLB2_QUEUE_UNMAPPED;
3298
3299                         dlb2_list_del(&domain->used_ldb_ports[i],
3300                                       &ldb_port->domain_list);
3301                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3302                                       &ldb_port->func_list);
3303                         rsrcs->num_avail_ldb_ports[i]++;
3304                 }
3305
3306                 list = &domain->avail_ldb_ports[i];
3307                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3308                                        iter1, iter2) {
3309                         ldb_port->owned = false;
3310
3311                         dlb2_list_del(&domain->avail_ldb_ports[i],
3312                                       &ldb_port->domain_list);
3313                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3314                                       &ldb_port->func_list);
3315                         rsrcs->num_avail_ldb_ports[i]++;
3316                 }
3317         }
3318
3319         /* Move the domain's dir ports to the function's avail list */
3320         list = &domain->used_dir_pq_pairs;
3321         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3322                 dir_port->owned = false;
3323                 dir_port->port_configured = false;
3324                 dir_port->init_tkn_cnt = 0;
3325
3326                 dlb2_list_del(&domain->used_dir_pq_pairs,
3327                               &dir_port->domain_list);
3328
3329                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3330                               &dir_port->func_list);
3331                 rsrcs->num_avail_dir_pq_pairs++;
3332         }
3333
3334         list = &domain->avail_dir_pq_pairs;
3335         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3336                 dir_port->owned = false;
3337
3338                 dlb2_list_del(&domain->avail_dir_pq_pairs,
3339                               &dir_port->domain_list);
3340
3341                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3342                               &dir_port->func_list);
3343                 rsrcs->num_avail_dir_pq_pairs++;
3344         }
3345
3346         /* Return hist list entries to the function */
3347         ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3348                                     domain->hist_list_entry_base,
3349                                     domain->total_hist_list_entries);
3350         if (ret) {
3351                 DLB2_HW_ERR(hw,
3352                             "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3353                             __func__);
3354                 return ret;
3355         }
3356
3357         domain->total_hist_list_entries = 0;
3358         domain->avail_hist_list_entries = 0;
3359         domain->hist_list_entry_base = 0;
3360         domain->hist_list_entry_offset = 0;
3361
3362         if (hw->ver == DLB2_HW_V2_5) {
3363                 rsrcs->num_avail_entries += domain->num_credits;
3364                 domain->num_credits = 0;
3365         } else {
3366                 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3367                 domain->num_ldb_credits = 0;
3368
3369                 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3370                 domain->num_dir_credits = 0;
3371         }
3372         rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3373         rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3374         domain->num_avail_aqed_entries = 0;
3375         domain->num_used_aqed_entries = 0;
3376
3377         domain->num_pending_removals = 0;
3378         domain->num_pending_additions = 0;
3379         domain->configured = false;
3380         domain->started = false;
3381
3382         /*
3383          * Move the domain out of the used_domains list and back to the
3384          * function's avail_domains list.
3385          */
3386         dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3387         dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3388         rsrcs->num_avail_domains++;
3389
3390         return 0;
3391 }
3392
3393 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3394                                             struct dlb2_hw_domain *domain,
3395                                             struct dlb2_ldb_queue *queue)
3396 {
3397         struct dlb2_ldb_port *port = NULL;
3398         int ret, i;
3399
3400         /* If a domain has LDB queues, it must have LDB ports */
3401         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3402                 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
3403                                           typeof(*port));
3404                 if (port)
3405                         break;
3406         }
3407
3408         if (port == NULL) {
3409                 DLB2_HW_ERR(hw,
3410                             "[%s()] Internal error: No configured LDB ports\n",
3411                             __func__);
3412                 return -EFAULT;
3413         }
3414
3415         /* If necessary, free up a QID slot in this CQ */
3416         if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3417                 struct dlb2_ldb_queue *mapped_queue;
3418
3419                 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3420
3421                 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3422                 if (ret)
3423                         return ret;
3424         }
3425
3426         ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3427         if (ret)
3428                 return ret;
3429
3430         return dlb2_domain_drain_mapped_queues(hw, domain);
3431 }
3432
3433 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3434                                              struct dlb2_hw_domain *domain)
3435 {
3436         struct dlb2_list_entry *iter;
3437         struct dlb2_ldb_queue *queue;
3438         int ret;
3439         RTE_SET_USED(iter);
3440
3441         /* If the domain hasn't been started, there's no traffic to drain */
3442         if (!domain->started)
3443                 return 0;
3444
3445         /*
3446          * Pre-condition: the unattached queue must not have any outstanding
3447          * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3448          * prior to this in dlb2_domain_drain_mapped_queues().
3449          */
3450         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3451                 if (queue->num_mappings != 0 ||
3452                     dlb2_ldb_queue_is_empty(hw, queue))
3453                         continue;
3454
3455                 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3456                 if (ret)
3457                         return ret;
3458         }
3459
3460         return 0;
3461 }
3462
3463 /**
3464  * dlb2_reset_domain() - reset a scheduling domain
3465  * @hw: dlb2_hw handle for a particular device.
3466  * @domain_id: domain ID.
3467  * @vdev_req: indicates whether this request came from a vdev.
3468  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3469  *
3470  * This function resets and frees a DLB 2.0 scheduling domain and its associated
3471  * resources.
3472  *
3473  * Pre-condition: the driver must ensure software has stopped sending QEs
3474  * through this domain's producer ports before invoking this function, or
3475  * undefined behavior will result.
3476  *
3477  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3478  * device.
3479  *
3480  * Return:
3481  * Returns 0 upon success, -1 otherwise.
3482  *
3483  * EINVAL - Invalid domain ID, or the domain is not configured.
3484  * EFAULT - Internal error. (Possibly caused if software is the pre-condition
3485  *          is not met.)
3486  * ETIMEDOUT - Hardware component didn't reset in the expected time.
3487  */
3488 int dlb2_reset_domain(struct dlb2_hw *hw,
3489                       u32 domain_id,
3490                       bool vdev_req,
3491                       unsigned int vdev_id)
3492 {
3493         struct dlb2_hw_domain *domain;
3494         int ret;
3495
3496         dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3497
3498         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3499
3500         if (domain == NULL || !domain->configured)
3501                 return -EINVAL;
3502
3503         /* Disable VPPs */
3504         if (vdev_req) {
3505                 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3506
3507                 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3508         }
3509
3510         /* Disable CQ interrupts */
3511         dlb2_domain_disable_dir_port_interrupts(hw, domain);
3512
3513         dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3514
3515         /*
3516          * For each queue owned by this domain, disable its write permissions to
3517          * cause any traffic sent to it to be dropped. Well-behaved software
3518          * should not be sending QEs at this point.
3519          */
3520         dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3521
3522         dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3523
3524         /* Turn off completion tracking on all the domain's PPs. */
3525         dlb2_domain_disable_ldb_seq_checks(hw, domain);
3526
3527         /*
3528          * Disable the LDB CQs and drain them in order to complete the map and
3529          * unmap procedures, which require zero CQ inflights and zero QID
3530          * inflights respectively.
3531          */
3532         dlb2_domain_disable_ldb_cqs(hw, domain);
3533
3534         dlb2_domain_drain_ldb_cqs(hw, domain, false);
3535
3536         ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3537         if (ret)
3538                 return ret;
3539
3540         ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3541         if (ret)
3542                 return ret;
3543
3544         ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3545         if (ret)
3546                 return ret;
3547
3548         /* Re-enable the CQs in order to drain the mapped queues. */
3549         dlb2_domain_enable_ldb_cqs(hw, domain);
3550
3551         ret = dlb2_domain_drain_mapped_queues(hw, domain);
3552         if (ret)
3553                 return ret;
3554
3555         ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3556         if (ret)
3557                 return ret;
3558
3559         /* Done draining LDB QEs, so disable the CQs. */
3560         dlb2_domain_disable_ldb_cqs(hw, domain);
3561
3562         dlb2_domain_drain_dir_queues(hw, domain);
3563
3564         /* Done draining DIR QEs, so disable the CQs. */
3565         dlb2_domain_disable_dir_cqs(hw, domain);
3566
3567         /* Disable PPs */
3568         dlb2_domain_disable_dir_producer_ports(hw, domain);
3569
3570         dlb2_domain_disable_ldb_producer_ports(hw, domain);
3571
3572         ret = dlb2_domain_verify_reset_success(hw, domain);
3573         if (ret)
3574                 return ret;
3575
3576         /* Reset the QID and port state. */
3577         dlb2_domain_reset_registers(hw, domain);
3578
3579         /* Hardware reset complete. Reset the domain's software state */
3580         return dlb2_domain_reset_software_state(hw, domain);
3581 }
3582
3583 static void
3584 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3585                                u32 domain_id,
3586                                struct dlb2_create_ldb_queue_args *args,
3587                                bool vdev_req,
3588                                unsigned int vdev_id)
3589 {
3590         DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3591         if (vdev_req)
3592                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3593         DLB2_HW_DBG(hw, "\tDomain ID:                  %d\n",
3594                     domain_id);
3595         DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3596                     args->num_sequence_numbers);
3597         DLB2_HW_DBG(hw, "\tNumber of QID inflights:    %d\n",
3598                     args->num_qid_inflights);
3599         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:    %d\n",
3600                     args->num_atomic_inflights);
3601 }
3602
3603 static int
3604 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3605                                   struct dlb2_ldb_queue *queue,
3606                                   struct dlb2_create_ldb_queue_args *args)
3607 {
3608         int slot = -1;
3609         int i;
3610
3611         queue->sn_cfg_valid = false;
3612
3613         if (args->num_sequence_numbers == 0)
3614                 return 0;
3615
3616         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3617                 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3618
3619                 if (group->sequence_numbers_per_queue ==
3620                     args->num_sequence_numbers &&
3621                     !dlb2_sn_group_full(group)) {
3622                         slot = dlb2_sn_group_alloc_slot(group);
3623                         if (slot >= 0)
3624                                 break;
3625                 }
3626         }
3627
3628         if (slot == -1) {
3629                 DLB2_HW_ERR(hw,
3630                             "[%s():%d] Internal error: no sequence number slots available\n",
3631                             __func__, __LINE__);
3632                 return -EFAULT;
3633         }
3634
3635         queue->sn_cfg_valid = true;
3636         queue->sn_group = i;
3637         queue->sn_slot = slot;
3638         return 0;
3639 }
3640
3641 static int
3642 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3643                                   u32 domain_id,
3644                                   struct dlb2_create_ldb_queue_args *args,
3645                                   struct dlb2_cmd_response *resp,
3646                                   bool vdev_req,
3647                                   unsigned int vdev_id,
3648                                   struct dlb2_hw_domain **out_domain,
3649                                   struct dlb2_ldb_queue **out_queue)
3650 {
3651         struct dlb2_hw_domain *domain;
3652         struct dlb2_ldb_queue *queue;
3653         int i;
3654
3655         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3656
3657         if (!domain) {
3658                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3659                 return -EINVAL;
3660         }
3661
3662         if (!domain->configured) {
3663                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3664                 return -EINVAL;
3665         }
3666
3667         if (domain->started) {
3668                 resp->status = DLB2_ST_DOMAIN_STARTED;
3669                 return -EINVAL;
3670         }
3671
3672         queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3673         if (!queue) {
3674                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3675                 return -EINVAL;
3676         }
3677
3678         if (args->num_sequence_numbers) {
3679                 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3680                         struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3681
3682                         if (group->sequence_numbers_per_queue ==
3683                             args->num_sequence_numbers &&
3684                             !dlb2_sn_group_full(group))
3685                                 break;
3686                 }
3687
3688                 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3689                         resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3690                         return -EINVAL;
3691                 }
3692         }
3693
3694         if (args->num_qid_inflights > 4096) {
3695                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3696                 return -EINVAL;
3697         }
3698
3699         /* Inflights must be <= number of sequence numbers if ordered */
3700         if (args->num_sequence_numbers != 0 &&
3701             args->num_qid_inflights > args->num_sequence_numbers) {
3702                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3703                 return -EINVAL;
3704         }
3705
3706         if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3707                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3708                 return -EINVAL;
3709         }
3710
3711         if (args->num_atomic_inflights &&
3712             args->lock_id_comp_level != 0 &&
3713             args->lock_id_comp_level != 64 &&
3714             args->lock_id_comp_level != 128 &&
3715             args->lock_id_comp_level != 256 &&
3716             args->lock_id_comp_level != 512 &&
3717             args->lock_id_comp_level != 1024 &&
3718             args->lock_id_comp_level != 2048 &&
3719             args->lock_id_comp_level != 4096 &&
3720             args->lock_id_comp_level != 65536) {
3721                 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3722                 return -EINVAL;
3723         }
3724
3725         *out_domain = domain;
3726         *out_queue = queue;
3727
3728         return 0;
3729 }
3730
3731 static int
3732 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3733                                 struct dlb2_hw_domain *domain,
3734                                 struct dlb2_ldb_queue *queue,
3735                                 struct dlb2_create_ldb_queue_args *args)
3736 {
3737         int ret;
3738         ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3739         if (ret)
3740                 return ret;
3741
3742         /* Attach QID inflights */
3743         queue->num_qid_inflights = args->num_qid_inflights;
3744
3745         /* Attach atomic inflights */
3746         queue->aqed_limit = args->num_atomic_inflights;
3747
3748         domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3749         domain->num_used_aqed_entries += args->num_atomic_inflights;
3750
3751         return 0;
3752 }
3753
3754 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3755                                      struct dlb2_hw_domain *domain,
3756                                      struct dlb2_ldb_queue *queue,
3757                                      struct dlb2_create_ldb_queue_args *args,
3758                                      bool vdev_req,
3759                                      unsigned int vdev_id)
3760 {
3761         struct dlb2_sn_group *sn_group;
3762         unsigned int offs;
3763         u32 reg = 0;
3764         u32 alimit;
3765
3766         /* QID write permissions are turned on when the domain is started */
3767         offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.phys_id;
3768
3769         DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), reg);
3770
3771         /*
3772          * Unordered QIDs get 4K inflights, ordered get as many as the number
3773          * of sequence numbers.
3774          */
3775         DLB2_BITS_SET(reg, args->num_qid_inflights,
3776                       DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
3777         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
3778                                                   queue->id.phys_id), reg);
3779
3780         alimit = queue->aqed_limit;
3781
3782         if (alimit > DLB2_MAX_NUM_AQED_ENTRIES)
3783                 alimit = DLB2_MAX_NUM_AQED_ENTRIES;
3784
3785         reg = 0;
3786         DLB2_BITS_SET(reg, alimit, DLB2_LSP_QID_AQED_ACTIVE_LIM_LIMIT);
3787         DLB2_CSR_WR(hw,
3788                     DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver,
3789                                                  queue->id.phys_id), reg);
3790
3791         reg = 0;
3792         switch (args->lock_id_comp_level) {
3793         case 64:
3794                 DLB2_BITS_SET(reg, 1, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3795                 break;
3796         case 128:
3797                 DLB2_BITS_SET(reg, 2, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3798                 break;
3799         case 256:
3800                 DLB2_BITS_SET(reg, 3, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3801                 break;
3802         case 512:
3803                 DLB2_BITS_SET(reg, 4, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3804                 break;
3805         case 1024:
3806                 DLB2_BITS_SET(reg, 5, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3807                 break;
3808         case 2048:
3809                 DLB2_BITS_SET(reg, 6, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3810                 break;
3811         case 4096:
3812                 DLB2_BITS_SET(reg, 7, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3813                 break;
3814         default:
3815                 /* No compression by default */
3816                 break;
3817         }
3818
3819         DLB2_CSR_WR(hw, DLB2_AQED_QID_HID_WIDTH(queue->id.phys_id), reg);
3820
3821         reg = 0;
3822         /* Don't timestamp QEs that pass through this queue */
3823         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_ITS(queue->id.phys_id), reg);
3824
3825         DLB2_BITS_SET(reg, args->depth_threshold,
3826                       DLB2_LSP_QID_ATM_DEPTH_THRSH_THRESH);
3827         DLB2_CSR_WR(hw,
3828                     DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver,
3829                                                  queue->id.phys_id), reg);
3830
3831         reg = 0;
3832         DLB2_BITS_SET(reg, args->depth_threshold,
3833                       DLB2_LSP_QID_NALDB_DEPTH_THRSH_THRESH);
3834         DLB2_CSR_WR(hw,
3835                     DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue->id.phys_id),
3836                     reg);
3837
3838         /*
3839          * This register limits the number of inflight flows a queue can have
3840          * at one time.  It has an upper bound of 2048, but can be
3841          * over-subscribed. 512 is chosen so that a single queue does not use
3842          * the entire atomic storage, but can use a substantial portion if
3843          * needed.
3844          */
3845         reg = 0;
3846         DLB2_BITS_SET(reg, 512, DLB2_AQED_QID_FID_LIM_QID_FID_LIMIT);
3847         DLB2_CSR_WR(hw, DLB2_AQED_QID_FID_LIM(queue->id.phys_id), reg);
3848
3849         /* Configure SNs */
3850         reg = 0;
3851         sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3852         DLB2_BITS_SET(reg, sn_group->mode, DLB2_CHP_ORD_QID_SN_MAP_MODE);
3853         DLB2_BITS_SET(reg, queue->sn_slot, DLB2_CHP_ORD_QID_SN_MAP_SLOT);
3854         DLB2_BITS_SET(reg, sn_group->id, DLB2_CHP_ORD_QID_SN_MAP_GRP);
3855
3856         DLB2_CSR_WR(hw,
3857                     DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue->id.phys_id), reg);
3858
3859         reg = 0;
3860         DLB2_BITS_SET(reg, (args->num_sequence_numbers != 0),
3861                  DLB2_SYS_LDB_QID_CFG_V_SN_CFG_V);
3862         DLB2_BITS_SET(reg, (args->num_atomic_inflights != 0),
3863                  DLB2_SYS_LDB_QID_CFG_V_FID_CFG_V);
3864
3865         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), reg);
3866
3867         if (vdev_req) {
3868                 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3869
3870                 reg = 0;
3871                 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VQID_V_VQID_V);
3872                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), reg);
3873
3874                 reg = 0;
3875                 DLB2_BITS_SET(reg, queue->id.phys_id,
3876                               DLB2_SYS_VF_LDB_VQID2QID_QID);
3877                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), reg);
3878
3879                 reg = 0;
3880                 DLB2_BITS_SET(reg, queue->id.virt_id,
3881                               DLB2_SYS_LDB_QID2VQID_VQID);
3882                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID2VQID(queue->id.phys_id), reg);
3883         }
3884
3885         reg = 0;
3886         DLB2_BIT_SET(reg, DLB2_SYS_LDB_QID_V_QID_V);
3887         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), reg);
3888 }
3889
3890 /**
3891  * dlb2_hw_create_ldb_queue() - create a load-balanced queue
3892  * @hw: dlb2_hw handle for a particular device.
3893  * @domain_id: domain ID.
3894  * @args: queue creation arguments.
3895  * @resp: response structure.
3896  * @vdev_req: indicates whether this request came from a vdev.
3897  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3898  *
3899  * This function creates a load-balanced queue.
3900  *
3901  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3902  * device.
3903  *
3904  * Return:
3905  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
3906  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
3907  * contains the queue ID.
3908  *
3909  * resp->id contains a virtual ID if vdev_req is true.
3910  *
3911  * Errors:
3912  * EINVAL - A requested resource is unavailable, the domain is not configured,
3913  *          the domain has already been started, or the requested queue name is
3914  *          already in use.
3915  * EFAULT - Internal error (resp->status not set).
3916  */
3917 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3918                              u32 domain_id,
3919                              struct dlb2_create_ldb_queue_args *args,
3920                              struct dlb2_cmd_response *resp,
3921                              bool vdev_req,
3922                              unsigned int vdev_id)
3923 {
3924         struct dlb2_hw_domain *domain;
3925         struct dlb2_ldb_queue *queue;
3926         int ret;
3927
3928         dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3929
3930         /*
3931          * Verify that hardware resources are available before attempting to
3932          * satisfy the request. This simplifies the error unwinding code.
3933          */
3934         ret = dlb2_verify_create_ldb_queue_args(hw,
3935                                                 domain_id,
3936                                                 args,
3937                                                 resp,
3938                                                 vdev_req,
3939                                                 vdev_id,
3940                                                 &domain,
3941                                                 &queue);
3942         if (ret)
3943                 return ret;
3944
3945         ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3946
3947         if (ret) {
3948                 DLB2_HW_ERR(hw,
3949                             "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3950                             __func__, __LINE__);
3951                 return ret;
3952         }
3953
3954         dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3955
3956         queue->num_mappings = 0;
3957
3958         queue->configured = true;
3959
3960         /*
3961          * Configuration succeeded, so move the resource from the 'avail' to
3962          * the 'used' list.
3963          */
3964         dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
3965
3966         dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
3967
3968         resp->status = 0;
3969         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
3970
3971         return 0;
3972 }
3973
3974 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
3975                                        struct dlb2_hw_domain *domain,
3976                                        struct dlb2_ldb_port *port,
3977                                        bool vdev_req,
3978                                        unsigned int vdev_id)
3979 {
3980         u32 reg = 0;
3981
3982         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
3983         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
3984
3985         if (vdev_req) {
3986                 unsigned int offs;
3987                 u32 virt_id;
3988
3989                 /*
3990                  * DLB uses producer port address bits 17:12 to determine the
3991                  * producer port ID. In Scalable IOV mode, PP accesses come
3992                  * through the PF MMIO window for the physical producer port,
3993                  * so for translation purposes the virtual and physical port
3994                  * IDs are equal.
3995                  */
3996                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3997                         virt_id = port->id.virt_id;
3998                 else
3999                         virt_id = port->id.phys_id;
4000
4001                 reg = 0;
4002                 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
4003                 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4004                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
4005
4006                 reg = 0;
4007                 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
4008                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
4009
4010                 reg = 0;
4011                 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
4012                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
4013         }
4014
4015         reg = 0;
4016         DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
4017         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
4018 }
4019
4020 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4021                                       struct dlb2_hw_domain *domain,
4022                                       struct dlb2_ldb_port *port,
4023                                       uintptr_t cq_dma_base,
4024                                       struct dlb2_create_ldb_port_args *args,
4025                                       bool vdev_req,
4026                                       unsigned int vdev_id)
4027 {
4028         u32 hl_base = 0;
4029         u32 reg = 0;
4030         u32 ds = 0;
4031
4032         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4033         DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
4034         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
4035
4036         reg = cq_dma_base >> 32;
4037         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
4038
4039         /*
4040          * 'ro' == relaxed ordering. This setting allows DLB2 to write
4041          * cache lines out-of-order (but QEs within a cache line are always
4042          * updated in-order).
4043          */
4044         reg = 0;
4045         DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
4046         DLB2_BITS_SET(reg,
4047                  !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4048                  DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
4049         DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
4050
4051         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
4052
4053         port->cq_depth = args->cq_depth;
4054
4055         if (args->cq_depth <= 8) {
4056                 ds = 1;
4057         } else if (args->cq_depth == 16) {
4058                 ds = 2;
4059         } else if (args->cq_depth == 32) {
4060                 ds = 3;
4061         } else if (args->cq_depth == 64) {
4062                 ds = 4;
4063         } else if (args->cq_depth == 128) {
4064                 ds = 5;
4065         } else if (args->cq_depth == 256) {
4066                 ds = 6;
4067         } else if (args->cq_depth == 512) {
4068                 ds = 7;
4069         } else if (args->cq_depth == 1024) {
4070                 ds = 8;
4071         } else {
4072                 DLB2_HW_ERR(hw,
4073                             "[%s():%d] Internal error: invalid CQ depth\n",
4074                             __func__, __LINE__);
4075                 return -EFAULT;
4076         }
4077
4078         reg = 0;
4079         DLB2_BITS_SET(reg, ds,
4080                       DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4081         DLB2_CSR_WR(hw,
4082                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4083                     reg);
4084
4085         /*
4086          * To support CQs with depth less than 8, program the token count
4087          * register with a non-zero initial value. Operations such as domain
4088          * reset must take this initial value into account when quiescing the
4089          * CQ.
4090          */
4091         port->init_tkn_cnt = 0;
4092
4093         if (args->cq_depth < 8) {
4094                 reg = 0;
4095                 port->init_tkn_cnt = 8 - args->cq_depth;
4096
4097                 DLB2_BITS_SET(reg,
4098                               port->init_tkn_cnt,
4099                               DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
4100                 DLB2_CSR_WR(hw,
4101                             DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4102                             reg);
4103         } else {
4104                 DLB2_CSR_WR(hw,
4105                             DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4106                             DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4107         }
4108
4109         reg = 0;
4110         DLB2_BITS_SET(reg, ds,
4111                       DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
4112         DLB2_CSR_WR(hw,
4113                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4114                     reg);
4115
4116         /* Reset the CQ write pointer */
4117         DLB2_CSR_WR(hw,
4118                     DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
4119                     DLB2_CHP_LDB_CQ_WPTR_RST);
4120
4121         reg = 0;
4122         DLB2_BITS_SET(reg,
4123                       port->hist_list_entry_limit - 1,
4124                       DLB2_CHP_HIST_LIST_LIM_LIMIT);
4125         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
4126
4127         DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
4128                       DLB2_CHP_HIST_LIST_BASE_BASE);
4129         DLB2_CSR_WR(hw,
4130                     DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
4131                     hl_base);
4132
4133         /*
4134          * The inflight limit sets a cap on the number of QEs for which this CQ
4135          * can owe completions at one time.
4136          */
4137         reg = 0;
4138         DLB2_BITS_SET(reg, args->cq_history_list_size,
4139                       DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
4140         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
4141                     reg);
4142
4143         reg = 0;
4144         DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4145                       DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
4146         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
4147                     reg);
4148
4149         reg = 0;
4150         DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4151                       DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
4152         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
4153                     reg);
4154
4155         /*
4156          * Address translation (AT) settings: 0: untranslated, 2: translated
4157          * (see ATS spec regarding Address Type field for more details)
4158          */
4159
4160         if (hw->ver == DLB2_HW_V2) {
4161                 reg = 0;
4162                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
4163         }
4164
4165         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4166                 reg = 0;
4167                 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4168                               DLB2_SYS_LDB_CQ_PASID_PASID);
4169                 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
4170         }
4171
4172         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
4173
4174         reg = 0;
4175         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
4176         DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
4177
4178         /* Disable the port's QID mappings */
4179         reg = 0;
4180         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
4181
4182         return 0;
4183 }
4184
4185 static bool
4186 dlb2_cq_depth_is_valid(u32 depth)
4187 {
4188         if (depth != 1 && depth != 2 &&
4189             depth != 4 && depth != 8 &&
4190             depth != 16 && depth != 32 &&
4191             depth != 64 && depth != 128 &&
4192             depth != 256 && depth != 512 &&
4193             depth != 1024)
4194                 return false;
4195
4196         return true;
4197 }
4198
4199 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4200                                    struct dlb2_hw_domain *domain,
4201                                    struct dlb2_ldb_port *port,
4202                                    uintptr_t cq_dma_base,
4203                                    struct dlb2_create_ldb_port_args *args,
4204                                    bool vdev_req,
4205                                    unsigned int vdev_id)
4206 {
4207         int ret, i;
4208
4209         port->hist_list_entry_base = domain->hist_list_entry_base +
4210                                      domain->hist_list_entry_offset;
4211         port->hist_list_entry_limit = port->hist_list_entry_base +
4212                                       args->cq_history_list_size;
4213
4214         domain->hist_list_entry_offset += args->cq_history_list_size;
4215         domain->avail_hist_list_entries -= args->cq_history_list_size;
4216
4217         ret = dlb2_ldb_port_configure_cq(hw,
4218                                          domain,
4219                                          port,
4220                                          cq_dma_base,
4221                                          args,
4222                                          vdev_req,
4223                                          vdev_id);
4224         if (ret)
4225                 return ret;
4226
4227         dlb2_ldb_port_configure_pp(hw,
4228                                    domain,
4229                                    port,
4230                                    vdev_req,
4231                                    vdev_id);
4232
4233         dlb2_ldb_port_cq_enable(hw, port);
4234
4235         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4236                 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4237         port->num_mappings = 0;
4238
4239         port->enabled = true;
4240
4241         port->configured = true;
4242
4243         return 0;
4244 }
4245
4246 static void
4247 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4248                               u32 domain_id,
4249                               uintptr_t cq_dma_base,
4250                               struct dlb2_create_ldb_port_args *args,
4251                               bool vdev_req,
4252                               unsigned int vdev_id)
4253 {
4254         DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4255         if (vdev_req)
4256                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4257         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4258                     domain_id);
4259         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4260                     args->cq_depth);
4261         DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
4262                     args->cq_history_list_size);
4263         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4264                     cq_dma_base);
4265         DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
4266         DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
4267                     args->cos_strict);
4268 }
4269
4270 static int
4271 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4272                                  u32 domain_id,
4273                                  uintptr_t cq_dma_base,
4274                                  struct dlb2_create_ldb_port_args *args,
4275                                  struct dlb2_cmd_response *resp,
4276                                  bool vdev_req,
4277                                  unsigned int vdev_id,
4278                                  struct dlb2_hw_domain **out_domain,
4279                                  struct dlb2_ldb_port **out_port,
4280                                  int *out_cos_id)
4281 {
4282         struct dlb2_hw_domain *domain;
4283         struct dlb2_ldb_port *port;
4284         int i, id;
4285
4286         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4287
4288         if (!domain) {
4289                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4290                 return -EINVAL;
4291         }
4292
4293         if (!domain->configured) {
4294                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4295                 return -EINVAL;
4296         }
4297
4298         if (domain->started) {
4299                 resp->status = DLB2_ST_DOMAIN_STARTED;
4300                 return -EINVAL;
4301         }
4302
4303         if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4304                 resp->status = DLB2_ST_INVALID_COS_ID;
4305                 return -EINVAL;
4306         }
4307
4308         if (args->cos_strict) {
4309                 id = args->cos_id;
4310                 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4311                                           typeof(*port));
4312         } else {
4313                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4314                         id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4315
4316                         port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4317                                                   typeof(*port));
4318                         if (port)
4319                                 break;
4320                 }
4321         }
4322
4323         if (!port) {
4324                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4325                 return -EINVAL;
4326         }
4327
4328         /* Check cache-line alignment */
4329         if ((cq_dma_base & 0x3F) != 0) {
4330                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4331                 return -EINVAL;
4332         }
4333
4334         if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4335                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4336                 return -EINVAL;
4337         }
4338
4339         /* The history list size must be >= 1 */
4340         if (!args->cq_history_list_size) {
4341                 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4342                 return -EINVAL;
4343         }
4344
4345         if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4346                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4347                 return -EINVAL;
4348         }
4349
4350         *out_domain = domain;
4351         *out_port = port;
4352         *out_cos_id = id;
4353
4354         return 0;
4355 }
4356
4357 /**
4358  * dlb2_hw_create_ldb_port() - create a load-balanced port
4359  * @hw: dlb2_hw handle for a particular device.
4360  * @domain_id: domain ID.
4361  * @args: port creation arguments.
4362  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4363  * @resp: response structure.
4364  * @vdev_req: indicates whether this request came from a vdev.
4365  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4366  *
4367  * This function creates a load-balanced port.
4368  *
4369  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4370  * device.
4371  *
4372  * Return:
4373  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4374  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4375  * contains the port ID.
4376  *
4377  * resp->id contains a virtual ID if vdev_req is true.
4378  *
4379  * Errors:
4380  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4381  *          pointer address is not properly aligned, the domain is not
4382  *          configured, or the domain has already been started.
4383  * EFAULT - Internal error (resp->status not set).
4384  */
4385 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4386                             u32 domain_id,
4387                             struct dlb2_create_ldb_port_args *args,
4388                             uintptr_t cq_dma_base,
4389                             struct dlb2_cmd_response *resp,
4390                             bool vdev_req,
4391                             unsigned int vdev_id)
4392 {
4393         struct dlb2_hw_domain *domain;
4394         struct dlb2_ldb_port *port;
4395         int ret, cos_id;
4396
4397         dlb2_log_create_ldb_port_args(hw,
4398                                       domain_id,
4399                                       cq_dma_base,
4400                                       args,
4401                                       vdev_req,
4402                                       vdev_id);
4403
4404         /*
4405          * Verify that hardware resources are available before attempting to
4406          * satisfy the request. This simplifies the error unwinding code.
4407          */
4408         ret = dlb2_verify_create_ldb_port_args(hw,
4409                                                domain_id,
4410                                                cq_dma_base,
4411                                                args,
4412                                                resp,
4413                                                vdev_req,
4414                                                vdev_id,
4415                                                &domain,
4416                                                &port,
4417                                                &cos_id);
4418         if (ret)
4419                 return ret;
4420
4421         ret = dlb2_configure_ldb_port(hw,
4422                                       domain,
4423                                       port,
4424                                       cq_dma_base,
4425                                       args,
4426                                       vdev_req,
4427                                       vdev_id);
4428         if (ret)
4429                 return ret;
4430
4431         /*
4432          * Configuration succeeded, so move the resource from the 'avail' to
4433          * the 'used' list.
4434          */
4435         dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4436
4437         dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4438
4439         resp->status = 0;
4440         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4441
4442         return 0;
4443 }
4444
4445 static void
4446 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4447                               u32 domain_id,
4448                               uintptr_t cq_dma_base,
4449                               struct dlb2_create_dir_port_args *args,
4450                               bool vdev_req,
4451                               unsigned int vdev_id)
4452 {
4453         DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4454         if (vdev_req)
4455                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4456         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4457                     domain_id);
4458         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4459                     args->cq_depth);
4460         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4461                     cq_dma_base);
4462 }
4463
4464 static struct dlb2_dir_pq_pair *
4465 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
4466                             u32 id,
4467                             bool vdev_req,
4468                             struct dlb2_hw_domain *domain)
4469 {
4470         struct dlb2_list_entry *iter;
4471         struct dlb2_dir_pq_pair *port;
4472         RTE_SET_USED(iter);
4473
4474         if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
4475                 return NULL;
4476
4477         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
4478                 if ((!vdev_req && port->id.phys_id == id) ||
4479                     (vdev_req && port->id.virt_id == id))
4480                         return port;
4481         }
4482
4483         return NULL;
4484 }
4485
4486 static int
4487 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4488                                  u32 domain_id,
4489                                  uintptr_t cq_dma_base,
4490                                  struct dlb2_create_dir_port_args *args,
4491                                  struct dlb2_cmd_response *resp,
4492                                  bool vdev_req,
4493                                  unsigned int vdev_id,
4494                                  struct dlb2_hw_domain **out_domain,
4495                                  struct dlb2_dir_pq_pair **out_port)
4496 {
4497         struct dlb2_hw_domain *domain;
4498         struct dlb2_dir_pq_pair *pq;
4499
4500         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4501
4502         if (!domain) {
4503                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4504                 return -EINVAL;
4505         }
4506
4507         if (!domain->configured) {
4508                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4509                 return -EINVAL;
4510         }
4511
4512         if (domain->started) {
4513                 resp->status = DLB2_ST_DOMAIN_STARTED;
4514                 return -EINVAL;
4515         }
4516
4517         if (args->queue_id != -1) {
4518                 /*
4519                  * If the user claims the queue is already configured, validate
4520                  * the queue ID, its domain, and whether the queue is
4521                  * configured.
4522                  */
4523                 pq = dlb2_get_domain_used_dir_pq(hw,
4524                                                  args->queue_id,
4525                                                  vdev_req,
4526                                                  domain);
4527
4528                 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4529                     !pq->queue_configured) {
4530                         resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4531                         return -EINVAL;
4532                 }
4533         } else {
4534                 /*
4535                  * If the port's queue is not configured, validate that a free
4536                  * port-queue pair is available.
4537                  */
4538                 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4539                                         typeof(*pq));
4540                 if (!pq) {
4541                         resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4542                         return -EINVAL;
4543                 }
4544         }
4545
4546         /* Check cache-line alignment */
4547         if ((cq_dma_base & 0x3F) != 0) {
4548                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4549                 return -EINVAL;
4550         }
4551
4552         if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4553                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4554                 return -EINVAL;
4555         }
4556
4557         *out_domain = domain;
4558         *out_port = pq;
4559
4560         return 0;
4561 }
4562
4563 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4564                                        struct dlb2_hw_domain *domain,
4565                                        struct dlb2_dir_pq_pair *port,
4566                                        bool vdev_req,
4567                                        unsigned int vdev_id)
4568 {
4569         u32 reg = 0;
4570
4571         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_DIR_PP2VAS_VAS);
4572         DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), reg);
4573
4574         if (vdev_req) {
4575                 unsigned int offs;
4576                 u32 virt_id;
4577
4578                 /*
4579                  * DLB uses producer port address bits 17:12 to determine the
4580                  * producer port ID. In Scalable IOV mode, PP accesses come
4581                  * through the PF MMIO window for the physical producer port,
4582                  * so for translation purposes the virtual and physical port
4583                  * IDs are equal.
4584                  */
4585                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4586                         virt_id = port->id.virt_id;
4587                 else
4588                         virt_id = port->id.phys_id;
4589
4590                 reg = 0;
4591                 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_DIR_VPP2PP_PP);
4592                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
4593                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), reg);
4594
4595                 reg = 0;
4596                 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_PP2VDEV_VDEV);
4597                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VDEV(port->id.phys_id), reg);
4598
4599                 reg = 0;
4600                 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VPP_V_VPP_V);
4601                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), reg);
4602         }
4603
4604         reg = 0;
4605         DLB2_BIT_SET(reg, DLB2_SYS_DIR_PP_V_PP_V);
4606         DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP_V(port->id.phys_id), reg);
4607 }
4608
4609 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4610                                       struct dlb2_hw_domain *domain,
4611                                       struct dlb2_dir_pq_pair *port,
4612                                       uintptr_t cq_dma_base,
4613                                       struct dlb2_create_dir_port_args *args,
4614                                       bool vdev_req,
4615                                       unsigned int vdev_id)
4616 {
4617         u32 reg = 0;
4618         u32 ds = 0;
4619
4620         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4621         DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_DIR_CQ_ADDR_L_ADDR_L);
4622         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), reg);
4623
4624         reg = cq_dma_base >> 32;
4625         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), reg);
4626
4627         /*
4628          * 'ro' == relaxed ordering. This setting allows DLB2 to write
4629          * cache lines out-of-order (but QEs within a cache line are always
4630          * updated in-order).
4631          */
4632         reg = 0;
4633         DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_CQ2VF_PF_RO_VF);
4634         DLB2_BITS_SET(reg, !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4635                  DLB2_SYS_DIR_CQ2VF_PF_RO_IS_PF);
4636         DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ2VF_PF_RO_RO);
4637
4638         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), reg);
4639
4640         if (args->cq_depth <= 8) {
4641                 ds = 1;
4642         } else if (args->cq_depth == 16) {
4643                 ds = 2;
4644         } else if (args->cq_depth == 32) {
4645                 ds = 3;
4646         } else if (args->cq_depth == 64) {
4647                 ds = 4;
4648         } else if (args->cq_depth == 128) {
4649                 ds = 5;
4650         } else if (args->cq_depth == 256) {
4651                 ds = 6;
4652         } else if (args->cq_depth == 512) {
4653                 ds = 7;
4654         } else if (args->cq_depth == 1024) {
4655                 ds = 8;
4656         } else {
4657                 DLB2_HW_ERR(hw,
4658                             "[%s():%d] Internal error: invalid CQ depth\n",
4659                             __func__, __LINE__);
4660                 return -EFAULT;
4661         }
4662
4663         reg = 0;
4664         DLB2_BITS_SET(reg, ds,
4665                       DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4666         DLB2_CSR_WR(hw,
4667                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4668                     reg);
4669
4670         /*
4671          * To support CQs with depth less than 8, program the token count
4672          * register with a non-zero initial value. Operations such as domain
4673          * reset must take this initial value into account when quiescing the
4674          * CQ.
4675          */
4676         port->init_tkn_cnt = 0;
4677
4678         if (args->cq_depth < 8) {
4679                 reg = 0;
4680                 port->init_tkn_cnt = 8 - args->cq_depth;
4681
4682                 DLB2_BITS_SET(reg, port->init_tkn_cnt,
4683                               DLB2_LSP_CQ_DIR_TKN_CNT_COUNT);
4684                 DLB2_CSR_WR(hw,
4685                             DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4686                             reg);
4687         } else {
4688                 DLB2_CSR_WR(hw,
4689                             DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4690                             DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4691         }
4692
4693         reg = 0;
4694         DLB2_BITS_SET(reg, ds,
4695                       DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_TOKEN_DEPTH_SELECT_V2);
4696         DLB2_CSR_WR(hw,
4697                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
4698                                                       port->id.phys_id),
4699                     reg);
4700
4701         /* Reset the CQ write pointer */
4702         DLB2_CSR_WR(hw,
4703                     DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
4704                     DLB2_CHP_DIR_CQ_WPTR_RST);
4705
4706         /* Virtualize the PPID */
4707         reg = 0;
4708         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), reg);
4709
4710         /*
4711          * Address translation (AT) settings: 0: untranslated, 2: translated
4712          * (see ATS spec regarding Address Type field for more details)
4713          */
4714         if (hw->ver == DLB2_HW_V2) {
4715                 reg = 0;
4716                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), reg);
4717         }
4718
4719         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4720                 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4721                               DLB2_SYS_DIR_CQ_PASID_PASID);
4722                 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ_PASID_FMT2);
4723         }
4724
4725         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id), reg);
4726
4727         reg = 0;
4728         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_DIR_CQ2VAS_CQ2VAS);
4729         DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id), reg);
4730
4731         return 0;
4732 }
4733
4734 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
4735                                    struct dlb2_hw_domain *domain,
4736                                    struct dlb2_dir_pq_pair *port,
4737                                    uintptr_t cq_dma_base,
4738                                    struct dlb2_create_dir_port_args *args,
4739                                    bool vdev_req,
4740                                    unsigned int vdev_id)
4741 {
4742         int ret;
4743
4744         ret = dlb2_dir_port_configure_cq(hw,
4745                                          domain,
4746                                          port,
4747                                          cq_dma_base,
4748                                          args,
4749                                          vdev_req,
4750                                          vdev_id);
4751
4752         if (ret)
4753                 return ret;
4754
4755         dlb2_dir_port_configure_pp(hw,
4756                                    domain,
4757                                    port,
4758                                    vdev_req,
4759                                    vdev_id);
4760
4761         dlb2_dir_port_cq_enable(hw, port);
4762
4763         port->enabled = true;
4764
4765         port->port_configured = true;
4766
4767         return 0;
4768 }
4769
4770 /**
4771  * dlb2_hw_create_dir_port() - create a directed port
4772  * @hw: dlb2_hw handle for a particular device.
4773  * @domain_id: domain ID.
4774  * @args: port creation arguments.
4775  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4776  * @resp: response structure.
4777  * @vdev_req: indicates whether this request came from a vdev.
4778  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4779  *
4780  * This function creates a directed port.
4781  *
4782  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4783  * device.
4784  *
4785  * Return:
4786  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4787  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4788  * contains the port ID.
4789  *
4790  * resp->id contains a virtual ID if vdev_req is true.
4791  *
4792  * Errors:
4793  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4794  *          pointer address is not properly aligned, the domain is not
4795  *          configured, or the domain has already been started.
4796  * EFAULT - Internal error (resp->status not set).
4797  */
4798 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
4799                             u32 domain_id,
4800                             struct dlb2_create_dir_port_args *args,
4801                             uintptr_t cq_dma_base,
4802                             struct dlb2_cmd_response *resp,
4803                             bool vdev_req,
4804                             unsigned int vdev_id)
4805 {
4806         struct dlb2_dir_pq_pair *port;
4807         struct dlb2_hw_domain *domain;
4808         int ret;
4809
4810         dlb2_log_create_dir_port_args(hw,
4811                                       domain_id,
4812                                       cq_dma_base,
4813                                       args,
4814                                       vdev_req,
4815                                       vdev_id);
4816
4817         /*
4818          * Verify that hardware resources are available before attempting to
4819          * satisfy the request. This simplifies the error unwinding code.
4820          */
4821         ret = dlb2_verify_create_dir_port_args(hw,
4822                                                domain_id,
4823                                                cq_dma_base,
4824                                                args,
4825                                                resp,
4826                                                vdev_req,
4827                                                vdev_id,
4828                                                &domain,
4829                                                &port);
4830         if (ret)
4831                 return ret;
4832
4833         ret = dlb2_configure_dir_port(hw,
4834                                       domain,
4835                                       port,
4836                                       cq_dma_base,
4837                                       args,
4838                                       vdev_req,
4839                                       vdev_id);
4840         if (ret)
4841                 return ret;
4842
4843         /*
4844          * Configuration succeeded, so move the resource from the 'avail' to
4845          * the 'used' list (if it's not already there).
4846          */
4847         if (args->queue_id == -1) {
4848                 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
4849
4850                 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
4851         }
4852
4853         resp->status = 0;
4854         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4855
4856         return 0;
4857 }
4858
4859 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
4860                                      struct dlb2_hw_domain *domain,
4861                                      struct dlb2_dir_pq_pair *queue,
4862                                      struct dlb2_create_dir_queue_args *args,
4863                                      bool vdev_req,
4864                                      unsigned int vdev_id)
4865 {
4866         unsigned int offs;
4867         u32 reg = 0;
4868
4869         /* QID write permissions are turned on when the domain is started */
4870         offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4871                 queue->id.phys_id;
4872
4873         DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), reg);
4874
4875         /* Don't timestamp QEs that pass through this queue */
4876         DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_ITS(queue->id.phys_id), reg);
4877
4878         reg = 0;
4879         DLB2_BITS_SET(reg, args->depth_threshold,
4880                       DLB2_LSP_QID_DIR_DEPTH_THRSH_THRESH);
4881         DLB2_CSR_WR(hw,
4882                     DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver, queue->id.phys_id),
4883                     reg);
4884
4885         if (vdev_req) {
4886                 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4887                         queue->id.virt_id;
4888
4889                 reg = 0;
4890                 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VQID_V_VQID_V);
4891                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), reg);
4892
4893                 reg = 0;
4894                 DLB2_BITS_SET(reg, queue->id.phys_id,
4895                               DLB2_SYS_VF_DIR_VQID2QID_QID);
4896                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), reg);
4897         }
4898
4899         reg = 0;
4900         DLB2_BIT_SET(reg, DLB2_SYS_DIR_QID_V_QID_V);
4901         DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), reg);
4902
4903         queue->queue_configured = true;
4904 }
4905
4906 static void
4907 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
4908                                u32 domain_id,
4909                                struct dlb2_create_dir_queue_args *args,
4910                                bool vdev_req,
4911                                unsigned int vdev_id)
4912 {
4913         DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
4914         if (vdev_req)
4915                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4916         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
4917         DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
4918 }
4919
4920 static int
4921 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
4922                                   u32 domain_id,
4923                                   struct dlb2_create_dir_queue_args *args,
4924                                   struct dlb2_cmd_response *resp,
4925                                   bool vdev_req,
4926                                   unsigned int vdev_id,
4927                                   struct dlb2_hw_domain **out_domain,
4928                                   struct dlb2_dir_pq_pair **out_queue)
4929 {
4930         struct dlb2_hw_domain *domain;
4931         struct dlb2_dir_pq_pair *pq;
4932
4933         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4934
4935         if (!domain) {
4936                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4937                 return -EINVAL;
4938         }
4939
4940         if (!domain->configured) {
4941                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4942                 return -EINVAL;
4943         }
4944
4945         if (domain->started) {
4946                 resp->status = DLB2_ST_DOMAIN_STARTED;
4947                 return -EINVAL;
4948         }
4949
4950         /*
4951          * If the user claims the port is already configured, validate the port
4952          * ID, its domain, and whether the port is configured.
4953          */
4954         if (args->port_id != -1) {
4955                 pq = dlb2_get_domain_used_dir_pq(hw,
4956                                                  args->port_id,
4957                                                  vdev_req,
4958                                                  domain);
4959
4960                 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4961                     !pq->port_configured) {
4962                         resp->status = DLB2_ST_INVALID_PORT_ID;
4963                         return -EINVAL;
4964                 }
4965         } else {
4966                 /*
4967                  * If the queue's port is not configured, validate that a free
4968                  * port-queue pair is available.
4969                  */
4970                 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4971                                         typeof(*pq));
4972                 if (!pq) {
4973                         resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
4974                         return -EINVAL;
4975                 }
4976         }
4977
4978         *out_domain = domain;
4979         *out_queue = pq;
4980
4981         return 0;
4982 }
4983
4984 /**
4985  * dlb2_hw_create_dir_queue() - create a directed queue
4986  * @hw: dlb2_hw handle for a particular device.
4987  * @domain_id: domain ID.
4988  * @args: queue creation arguments.
4989  * @resp: response structure.
4990  * @vdev_req: indicates whether this request came from a vdev.
4991  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4992  *
4993  * This function creates a directed queue.
4994  *
4995  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4996  * device.
4997  *
4998  * Return:
4999  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5000  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5001  * contains the queue ID.
5002  *
5003  * resp->id contains a virtual ID if vdev_req is true.
5004  *
5005  * Errors:
5006  * EINVAL - A requested resource is unavailable, the domain is not configured,
5007  *          or the domain has already been started.
5008  * EFAULT - Internal error (resp->status not set).
5009  */
5010 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5011                              u32 domain_id,
5012                              struct dlb2_create_dir_queue_args *args,
5013                              struct dlb2_cmd_response *resp,
5014                              bool vdev_req,
5015                              unsigned int vdev_id)
5016 {
5017         struct dlb2_dir_pq_pair *queue;
5018         struct dlb2_hw_domain *domain;
5019         int ret;
5020
5021         dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5022
5023         /*
5024          * Verify that hardware resources are available before attempting to
5025          * satisfy the request. This simplifies the error unwinding code.
5026          */
5027         ret = dlb2_verify_create_dir_queue_args(hw,
5028                                                 domain_id,
5029                                                 args,
5030                                                 resp,
5031                                                 vdev_req,
5032                                                 vdev_id,
5033                                                 &domain,
5034                                                 &queue);
5035         if (ret)
5036                 return ret;
5037
5038         dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5039
5040         /*
5041          * Configuration succeeded, so move the resource from the 'avail' to
5042          * the 'used' list (if it's not already there).
5043          */
5044         if (args->port_id == -1) {
5045                 dlb2_list_del(&domain->avail_dir_pq_pairs,
5046                               &queue->domain_list);
5047
5048                 dlb2_list_add(&domain->used_dir_pq_pairs,
5049                               &queue->domain_list);
5050         }
5051
5052         resp->status = 0;
5053
5054         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5055
5056         return 0;
5057 }
5058
5059 static bool
5060 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5061                                            struct dlb2_ldb_queue *queue,
5062                                            int *slot)
5063 {
5064         int i;
5065
5066         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5067                 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5068
5069                 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5070                     map->pending_qid == queue->id.phys_id)
5071                         break;
5072         }
5073
5074         *slot = i;
5075
5076         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5077 }
5078
5079 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5080                                               struct dlb2_ldb_queue *queue,
5081                                               struct dlb2_cmd_response *resp)
5082 {
5083         enum dlb2_qid_map_state state;
5084         int i;
5085
5086         /* Unused slot available? */
5087         if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5088                 return 0;
5089
5090         /*
5091          * If the queue is already mapped (from the application's perspective),
5092          * this is simply a priority update.
5093          */
5094         state = DLB2_QUEUE_MAPPED;
5095         if (dlb2_port_find_slot_queue(port, state, queue, &i))
5096                 return 0;
5097
5098         state = DLB2_QUEUE_MAP_IN_PROG;
5099         if (dlb2_port_find_slot_queue(port, state, queue, &i))
5100                 return 0;
5101
5102         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5103                 return 0;
5104
5105         /*
5106          * If the slot contains an unmap in progress, it's considered
5107          * available.
5108          */
5109         state = DLB2_QUEUE_UNMAP_IN_PROG;
5110         if (dlb2_port_find_slot(port, state, &i))
5111                 return 0;
5112
5113         state = DLB2_QUEUE_UNMAPPED;
5114         if (dlb2_port_find_slot(port, state, &i))
5115                 return 0;
5116
5117         resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5118         return -EINVAL;
5119 }
5120
5121 static struct dlb2_ldb_queue *
5122 dlb2_get_domain_ldb_queue(u32 id,
5123                           bool vdev_req,
5124                           struct dlb2_hw_domain *domain)
5125 {
5126         struct dlb2_list_entry *iter;
5127         struct dlb2_ldb_queue *queue;
5128         RTE_SET_USED(iter);
5129
5130         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5131                 return NULL;
5132
5133         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
5134                 if ((!vdev_req && queue->id.phys_id == id) ||
5135                     (vdev_req && queue->id.virt_id == id))
5136                         return queue;
5137         }
5138
5139         return NULL;
5140 }
5141
5142 static struct dlb2_ldb_port *
5143 dlb2_get_domain_used_ldb_port(u32 id,
5144                               bool vdev_req,
5145                               struct dlb2_hw_domain *domain)
5146 {
5147         struct dlb2_list_entry *iter;
5148         struct dlb2_ldb_port *port;
5149         int i;
5150         RTE_SET_USED(iter);
5151
5152         if (id >= DLB2_MAX_NUM_LDB_PORTS)
5153                 return NULL;
5154
5155         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5156                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
5157                         if ((!vdev_req && port->id.phys_id == id) ||
5158                             (vdev_req && port->id.virt_id == id))
5159                                 return port;
5160                 }
5161
5162                 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter) {
5163                         if ((!vdev_req && port->id.phys_id == id) ||
5164                             (vdev_req && port->id.virt_id == id))
5165                                 return port;
5166                 }
5167         }
5168
5169         return NULL;
5170 }
5171
5172 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5173                                               struct dlb2_ldb_port *port,
5174                                               int slot,
5175                                               struct dlb2_map_qid_args *args)
5176 {
5177         u32 cq2priov;
5178
5179         /* Read-modify-write the priority and valid bit register */
5180         cq2priov = DLB2_CSR_RD(hw,
5181                                DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id));
5182
5183         cq2priov |= (1 << (slot + DLB2_LSP_CQ2PRIOV_V_LOC)) &
5184                     DLB2_LSP_CQ2PRIOV_V;
5185         cq2priov |= ((args->priority & 0x7) << slot * 3) &
5186                     DLB2_LSP_CQ2PRIOV_PRIO;
5187
5188         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), cq2priov);
5189
5190         dlb2_flush_csr(hw);
5191
5192         port->qid_map[slot].priority = args->priority;
5193 }
5194
5195 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5196                                     u32 domain_id,
5197                                     struct dlb2_map_qid_args *args,
5198                                     struct dlb2_cmd_response *resp,
5199                                     bool vdev_req,
5200                                     unsigned int vdev_id,
5201                                     struct dlb2_hw_domain **out_domain,
5202                                     struct dlb2_ldb_port **out_port,
5203                                     struct dlb2_ldb_queue **out_queue)
5204 {
5205         struct dlb2_hw_domain *domain;
5206         struct dlb2_ldb_queue *queue;
5207         struct dlb2_ldb_port *port;
5208         int id;
5209
5210         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5211
5212         if (!domain) {
5213                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5214                 return -EINVAL;
5215         }
5216
5217         if (!domain->configured) {
5218                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5219                 return -EINVAL;
5220         }
5221
5222         id = args->port_id;
5223
5224         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5225
5226         if (!port || !port->configured) {
5227                 resp->status = DLB2_ST_INVALID_PORT_ID;
5228                 return -EINVAL;
5229         }
5230
5231         if (args->priority >= DLB2_QID_PRIORITIES) {
5232                 resp->status = DLB2_ST_INVALID_PRIORITY;
5233                 return -EINVAL;
5234         }
5235
5236         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5237
5238         if (!queue || !queue->configured) {
5239                 resp->status = DLB2_ST_INVALID_QID;
5240                 return -EINVAL;
5241         }
5242
5243         if (queue->domain_id.phys_id != domain->id.phys_id) {
5244                 resp->status = DLB2_ST_INVALID_QID;
5245                 return -EINVAL;
5246         }
5247
5248         if (port->domain_id.phys_id != domain->id.phys_id) {
5249                 resp->status = DLB2_ST_INVALID_PORT_ID;
5250                 return -EINVAL;
5251         }
5252
5253         *out_domain = domain;
5254         *out_queue = queue;
5255         *out_port = port;
5256
5257         return 0;
5258 }
5259
5260 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5261                              u32 domain_id,
5262                              struct dlb2_map_qid_args *args,
5263                              bool vdev_req,
5264                              unsigned int vdev_id)
5265 {
5266         DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5267         if (vdev_req)
5268                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5269         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5270                     domain_id);
5271         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5272                     args->port_id);
5273         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5274                     args->qid);
5275         DLB2_HW_DBG(hw, "\tPriority:  %d\n",
5276                     args->priority);
5277 }
5278
5279 /**
5280  * dlb2_hw_map_qid() - map a load-balanced queue to a load-balanced port
5281  * @hw: dlb2_hw handle for a particular device.
5282  * @domain_id: domain ID.
5283  * @args: map QID arguments.
5284  * @resp: response structure.
5285  * @vdev_req: indicates whether this request came from a vdev.
5286  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5287  *
5288  * This function configures the DLB to schedule QEs from the specified queue
5289  * to the specified port. Each load-balanced port can be mapped to up to 8
5290  * queues; each load-balanced queue can potentially map to all the
5291  * load-balanced ports.
5292  *
5293  * A successful return does not necessarily mean the mapping was configured. If
5294  * this function is unable to immediately map the queue to the port, it will
5295  * add the requested operation to a per-port list of pending map/unmap
5296  * operations, and (if it's not already running) launch a kernel thread that
5297  * periodically attempts to process all pending operations. In a sense, this is
5298  * an asynchronous function.
5299  *
5300  * This asynchronicity creates two views of the state of hardware: the actual
5301  * hardware state and the requested state (as if every request completed
5302  * immediately). If there are any pending map/unmap operations, the requested
5303  * state will differ from the actual state. All validation is performed with
5304  * respect to the pending state; for instance, if there are 8 pending map
5305  * operations for port X, a request for a 9th will fail because a load-balanced
5306  * port can only map up to 8 queues.
5307  *
5308  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5309  * device.
5310  *
5311  * Return:
5312  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5313  * assigned a detailed error code from enum dlb2_error.
5314  *
5315  * Errors:
5316  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5317  *          the domain is not configured.
5318  * EFAULT - Internal error (resp->status not set).
5319  */
5320 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5321                     u32 domain_id,
5322                     struct dlb2_map_qid_args *args,
5323                     struct dlb2_cmd_response *resp,
5324                     bool vdev_req,
5325                     unsigned int vdev_id)
5326 {
5327         struct dlb2_hw_domain *domain;
5328         struct dlb2_ldb_queue *queue;
5329         enum dlb2_qid_map_state st;
5330         struct dlb2_ldb_port *port;
5331         int ret, i;
5332         u8 prio;
5333
5334         dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5335
5336         /*
5337          * Verify that hardware resources are available before attempting to
5338          * satisfy the request. This simplifies the error unwinding code.
5339          */
5340         ret = dlb2_verify_map_qid_args(hw,
5341                                        domain_id,
5342                                        args,
5343                                        resp,
5344                                        vdev_req,
5345                                        vdev_id,
5346                                        &domain,
5347                                        &port,
5348                                        &queue);
5349         if (ret)
5350                 return ret;
5351
5352         prio = args->priority;
5353
5354         /*
5355          * If there are any outstanding detach operations for this port,
5356          * attempt to complete them. This may be necessary to free up a QID
5357          * slot for this requested mapping.
5358          */
5359         if (port->num_pending_removals)
5360                 dlb2_domain_finish_unmap_port(hw, domain, port);
5361
5362         ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5363         if (ret)
5364                 return ret;
5365
5366         /* Hardware requires disabling the CQ before mapping QIDs. */
5367         if (port->enabled)
5368                 dlb2_ldb_port_cq_disable(hw, port);
5369
5370         /*
5371          * If this is only a priority change, don't perform the full QID->CQ
5372          * mapping procedure
5373          */
5374         st = DLB2_QUEUE_MAPPED;
5375         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5376                 if (prio != port->qid_map[i].priority) {
5377                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5378                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5379                 }
5380
5381                 st = DLB2_QUEUE_MAPPED;
5382                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5383                 if (ret)
5384                         return ret;
5385
5386                 goto map_qid_done;
5387         }
5388
5389         st = DLB2_QUEUE_UNMAP_IN_PROG;
5390         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5391                 if (prio != port->qid_map[i].priority) {
5392                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5393                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5394                 }
5395
5396                 st = DLB2_QUEUE_MAPPED;
5397                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5398                 if (ret)
5399                         return ret;
5400
5401                 goto map_qid_done;
5402         }
5403
5404         /*
5405          * If this is a priority change on an in-progress mapping, don't
5406          * perform the full QID->CQ mapping procedure.
5407          */
5408         st = DLB2_QUEUE_MAP_IN_PROG;
5409         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5410                 port->qid_map[i].priority = prio;
5411
5412                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5413
5414                 goto map_qid_done;
5415         }
5416
5417         /*
5418          * If this is a priority change on a pending mapping, update the
5419          * pending priority
5420          */
5421         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5422                 port->qid_map[i].pending_priority = prio;
5423
5424                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5425
5426                 goto map_qid_done;
5427         }
5428
5429         /*
5430          * If all the CQ's slots are in use, then there's an unmap in progress
5431          * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5432          * mapping to pending_map and return. When the removal is completed for
5433          * the slot's current occupant, this mapping will be performed.
5434          */
5435         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5436                 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5437                         enum dlb2_qid_map_state new_st;
5438
5439                         port->qid_map[i].pending_qid = queue->id.phys_id;
5440                         port->qid_map[i].pending_priority = prio;
5441
5442                         new_st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5443
5444                         ret = dlb2_port_slot_state_transition(hw, port, queue,
5445                                                               i, new_st);
5446                         if (ret)
5447                                 return ret;
5448
5449                         DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5450
5451                         goto map_qid_done;
5452                 }
5453         }
5454
5455         /*
5456          * If the domain has started, a special "dynamic" CQ->queue mapping
5457          * procedure is required in order to safely update the CQ<->QID tables.
5458          * The "static" procedure cannot be used when traffic is flowing,
5459          * because the CQ<->QID tables cannot be updated atomically and the
5460          * scheduler won't see the new mapping unless the queue's if_status
5461          * changes, which isn't guaranteed.
5462          */
5463         ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5464
5465         /* If ret is less than zero, it's due to an internal error */
5466         if (ret < 0)
5467                 return ret;
5468
5469 map_qid_done:
5470         if (port->enabled)
5471                 dlb2_ldb_port_cq_enable(hw, port);
5472
5473         resp->status = 0;
5474
5475         return 0;
5476 }
5477
5478 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5479                                u32 domain_id,
5480                                struct dlb2_unmap_qid_args *args,
5481                                bool vdev_req,
5482                                unsigned int vdev_id)
5483 {
5484         DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5485         if (vdev_req)
5486                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5487         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5488                     domain_id);
5489         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5490                     args->port_id);
5491         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5492                     args->qid);
5493         if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5494                 DLB2_HW_DBG(hw, "\tQueue's num mappings:  %d\n",
5495                             hw->rsrcs.ldb_queues[args->qid].num_mappings);
5496 }
5497
5498 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5499                                       u32 domain_id,
5500                                       struct dlb2_unmap_qid_args *args,
5501                                       struct dlb2_cmd_response *resp,
5502                                       bool vdev_req,
5503                                       unsigned int vdev_id,
5504                                       struct dlb2_hw_domain **out_domain,
5505                                       struct dlb2_ldb_port **out_port,
5506                                       struct dlb2_ldb_queue **out_queue)
5507 {
5508         enum dlb2_qid_map_state state;
5509         struct dlb2_hw_domain *domain;
5510         struct dlb2_ldb_queue *queue;
5511         struct dlb2_ldb_port *port;
5512         int slot;
5513         int id;
5514
5515         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5516
5517         if (!domain) {
5518                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5519                 return -EINVAL;
5520         }
5521
5522         if (!domain->configured) {
5523                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5524                 return -EINVAL;
5525         }
5526
5527         id = args->port_id;
5528
5529         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5530
5531         if (!port || !port->configured) {
5532                 resp->status = DLB2_ST_INVALID_PORT_ID;
5533                 return -EINVAL;
5534         }
5535
5536         if (port->domain_id.phys_id != domain->id.phys_id) {
5537                 resp->status = DLB2_ST_INVALID_PORT_ID;
5538                 return -EINVAL;
5539         }
5540
5541         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5542
5543         if (!queue || !queue->configured) {
5544                 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5545                             __func__, args->qid);
5546                 resp->status = DLB2_ST_INVALID_QID;
5547                 return -EINVAL;
5548         }
5549
5550         /*
5551          * Verify that the port has the queue mapped. From the application's
5552          * perspective a queue is mapped if it is actually mapped, the map is
5553          * in progress, or the map is blocked pending an unmap.
5554          */
5555         state = DLB2_QUEUE_MAPPED;
5556         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5557                 goto done;
5558
5559         state = DLB2_QUEUE_MAP_IN_PROG;
5560         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5561                 goto done;
5562
5563         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5564                 goto done;
5565
5566         resp->status = DLB2_ST_INVALID_QID;
5567         return -EINVAL;
5568
5569 done:
5570         *out_domain = domain;
5571         *out_port = port;
5572         *out_queue = queue;
5573
5574         return 0;
5575 }
5576
5577 /**
5578  * dlb2_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
5579  * @hw: dlb2_hw handle for a particular device.
5580  * @domain_id: domain ID.
5581  * @args: unmap QID arguments.
5582  * @resp: response structure.
5583  * @vdev_req: indicates whether this request came from a vdev.
5584  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5585  *
5586  * This function configures the DLB to stop scheduling QEs from the specified
5587  * queue to the specified port.
5588  *
5589  * A successful return does not necessarily mean the mapping was removed. If
5590  * this function is unable to immediately unmap the queue from the port, it
5591  * will add the requested operation to a per-port list of pending map/unmap
5592  * operations, and (if it's not already running) launch a kernel thread that
5593  * periodically attempts to process all pending operations. See
5594  * dlb2_hw_map_qid() for more details.
5595  *
5596  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5597  * device.
5598  *
5599  * Return:
5600  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5601  * assigned a detailed error code from enum dlb2_error.
5602  *
5603  * Errors:
5604  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5605  *          the domain is not configured.
5606  * EFAULT - Internal error (resp->status not set).
5607  */
5608 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5609                       u32 domain_id,
5610                       struct dlb2_unmap_qid_args *args,
5611                       struct dlb2_cmd_response *resp,
5612                       bool vdev_req,
5613                       unsigned int vdev_id)
5614 {
5615         struct dlb2_hw_domain *domain;
5616         struct dlb2_ldb_queue *queue;
5617         enum dlb2_qid_map_state st;
5618         struct dlb2_ldb_port *port;
5619         bool unmap_complete;
5620         int i, ret;
5621
5622         dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5623
5624         /*
5625          * Verify that hardware resources are available before attempting to
5626          * satisfy the request. This simplifies the error unwinding code.
5627          */
5628         ret = dlb2_verify_unmap_qid_args(hw,
5629                                          domain_id,
5630                                          args,
5631                                          resp,
5632                                          vdev_req,
5633                                          vdev_id,
5634                                          &domain,
5635                                          &port,
5636                                          &queue);
5637         if (ret)
5638                 return ret;
5639
5640         /*
5641          * If the queue hasn't been mapped yet, we need to update the slot's
5642          * state and re-enable the queue's inflights.
5643          */
5644         st = DLB2_QUEUE_MAP_IN_PROG;
5645         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5646                 /*
5647                  * Since the in-progress map was aborted, re-enable the QID's
5648                  * inflights.
5649                  */
5650                 if (queue->num_pending_additions == 0)
5651                         dlb2_ldb_queue_set_inflight_limit(hw, queue);
5652
5653                 st = DLB2_QUEUE_UNMAPPED;
5654                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5655                 if (ret)
5656                         return ret;
5657
5658                 goto unmap_qid_done;
5659         }
5660
5661         /*
5662          * If the queue mapping is on hold pending an unmap, we simply need to
5663          * update the slot's state.
5664          */
5665         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5666                 st = DLB2_QUEUE_UNMAP_IN_PROG;
5667                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5668                 if (ret)
5669                         return ret;
5670
5671                 goto unmap_qid_done;
5672         }
5673
5674         st = DLB2_QUEUE_MAPPED;
5675         if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5676                 DLB2_HW_ERR(hw,
5677                             "[%s()] Internal error: no available CQ slots\n",
5678                             __func__);
5679                 return -EFAULT;
5680         }
5681
5682         /*
5683          * QID->CQ mapping removal is an asynchronous procedure. It requires
5684          * stopping the DLB2 from scheduling this CQ, draining all inflights
5685          * from the CQ, then unmapping the queue from the CQ. This function
5686          * simply marks the port as needing the queue unmapped, and (if
5687          * necessary) starts the unmapping worker thread.
5688          */
5689         dlb2_ldb_port_cq_disable(hw, port);
5690
5691         st = DLB2_QUEUE_UNMAP_IN_PROG;
5692         ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5693         if (ret)
5694                 return ret;
5695
5696         /*
5697          * Attempt to finish the unmapping now, in case the port has no
5698          * outstanding inflights. If that's not the case, this will fail and
5699          * the unmapping will be completed at a later time.
5700          */
5701         unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
5702
5703         /*
5704          * If the unmapping couldn't complete immediately, launch the worker
5705          * thread (if it isn't already launched) to finish it later.
5706          */
5707         if (!unmap_complete && !os_worker_active(hw))
5708                 os_schedule_work(hw);
5709
5710 unmap_qid_done:
5711         resp->status = 0;
5712
5713         return 0;
5714 }
5715
5716 static void
5717 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
5718                                   struct dlb2_pending_port_unmaps_args *args,
5719                                   bool vdev_req,
5720                                   unsigned int vdev_id)
5721 {
5722         DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
5723         if (vdev_req)
5724                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
5725         DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
5726 }
5727
5728 /**
5729  * dlb2_hw_pending_port_unmaps() - returns the number of unmap operations in
5730  *      progress.
5731  * @hw: dlb2_hw handle for a particular device.
5732  * @domain_id: domain ID.
5733  * @args: number of unmaps in progress args
5734  * @resp: response structure.
5735  * @vdev_req: indicates whether this request came from a vdev.
5736  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5737  *
5738  * Return:
5739  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5740  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5741  * contains the number of unmaps in progress.
5742  *
5743  * Errors:
5744  * EINVAL - Invalid port ID.
5745  */
5746 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
5747                                 u32 domain_id,
5748                                 struct dlb2_pending_port_unmaps_args *args,
5749                                 struct dlb2_cmd_response *resp,
5750                                 bool vdev_req,
5751                                 unsigned int vdev_id)
5752 {
5753         struct dlb2_hw_domain *domain;
5754         struct dlb2_ldb_port *port;
5755
5756         dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
5757
5758         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5759
5760         if (!domain) {
5761                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5762                 return -EINVAL;
5763         }
5764
5765         port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
5766         if (!port || !port->configured) {
5767                 resp->status = DLB2_ST_INVALID_PORT_ID;
5768                 return -EINVAL;
5769         }
5770
5771         resp->id = port->num_pending_removals;
5772
5773         return 0;
5774 }
5775
5776 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
5777                                          u32 domain_id,
5778                                          struct dlb2_cmd_response *resp,
5779                                          bool vdev_req,
5780                                          unsigned int vdev_id,
5781                                          struct dlb2_hw_domain **out_domain)
5782 {
5783         struct dlb2_hw_domain *domain;
5784
5785         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5786
5787         if (!domain) {
5788                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5789                 return -EINVAL;
5790         }
5791
5792         if (!domain->configured) {
5793                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5794                 return -EINVAL;
5795         }
5796
5797         if (domain->started) {
5798                 resp->status = DLB2_ST_DOMAIN_STARTED;
5799                 return -EINVAL;
5800         }
5801
5802         *out_domain = domain;
5803
5804         return 0;
5805 }
5806
5807 static void dlb2_log_start_domain(struct dlb2_hw *hw,
5808                                   u32 domain_id,
5809                                   bool vdev_req,
5810                                   unsigned int vdev_id)
5811 {
5812         DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
5813         if (vdev_req)
5814                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5815         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5816 }
5817
5818 /**
5819  * dlb2_hw_start_domain() - start a scheduling domain
5820  * @hw: dlb2_hw handle for a particular device.
5821  * @domain_id: domain ID.
5822  * @arg: start domain arguments.
5823  * @resp: response structure.
5824  * @vdev_req: indicates whether this request came from a vdev.
5825  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5826  *
5827  * This function starts a scheduling domain, which allows applications to send
5828  * traffic through it. Once a domain is started, its resources can no longer be
5829  * configured (besides QID remapping and port enable/disable).
5830  *
5831  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5832  * device.
5833  *
5834  * Return:
5835  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5836  * assigned a detailed error code from enum dlb2_error.
5837  *
5838  * Errors:
5839  * EINVAL - the domain is not configured, or the domain is already started.
5840  */
5841 int
5842 dlb2_hw_start_domain(struct dlb2_hw *hw,
5843                      u32 domain_id,
5844                      struct dlb2_start_domain_args *args,
5845                      struct dlb2_cmd_response *resp,
5846                      bool vdev_req,
5847                      unsigned int vdev_id)
5848 {
5849         struct dlb2_list_entry *iter;
5850         struct dlb2_dir_pq_pair *dir_queue;
5851         struct dlb2_ldb_queue *ldb_queue;
5852         struct dlb2_hw_domain *domain;
5853         int ret;
5854         RTE_SET_USED(args);
5855         RTE_SET_USED(iter);
5856
5857         dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
5858
5859         ret = dlb2_verify_start_domain_args(hw,
5860                                             domain_id,
5861                                             resp,
5862                                             vdev_req,
5863                                             vdev_id,
5864                                             &domain);
5865         if (ret)
5866                 return ret;
5867
5868         /*
5869          * Enable load-balanced and directed queue write permissions for the
5870          * queues this domain owns. Without this, the DLB2 will drop all
5871          * incoming traffic to those queues.
5872          */
5873         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
5874                 u32 vasqid_v = 0;
5875                 unsigned int offs;
5876
5877                 DLB2_BIT_SET(vasqid_v, DLB2_SYS_LDB_VASQID_V_VASQID_V);
5878
5879                 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
5880                         ldb_queue->id.phys_id;
5881
5882                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), vasqid_v);
5883         }
5884
5885         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
5886                 u32 vasqid_v = 0;
5887                 unsigned int offs;
5888
5889                 DLB2_BIT_SET(vasqid_v, DLB2_SYS_DIR_VASQID_V_VASQID_V);
5890
5891                 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
5892                         dir_queue->id.phys_id;
5893
5894                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), vasqid_v);
5895         }
5896
5897         dlb2_flush_csr(hw);
5898
5899         domain->started = true;
5900
5901         resp->status = 0;
5902
5903         return 0;
5904 }
5905
5906 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
5907                                          u32 domain_id,
5908                                          u32 queue_id,
5909                                          bool vdev_req,
5910                                          unsigned int vf_id)
5911 {
5912         DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
5913         if (vdev_req)
5914                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5915         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5916         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5917 }
5918
5919 /**
5920  * dlb2_hw_get_dir_queue_depth() - returns the depth of a directed queue
5921  * @hw: dlb2_hw handle for a particular device.
5922  * @domain_id: domain ID.
5923  * @args: queue depth args
5924  * @resp: response structure.
5925  * @vdev_req: indicates whether this request came from a vdev.
5926  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5927  *
5928  * This function returns the depth of a directed queue.
5929  *
5930  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5931  * device.
5932  *
5933  * Return:
5934  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5935  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5936  * contains the depth.
5937  *
5938  * Errors:
5939  * EINVAL - Invalid domain ID or queue ID.
5940  */
5941 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
5942                                 u32 domain_id,
5943                                 struct dlb2_get_dir_queue_depth_args *args,
5944                                 struct dlb2_cmd_response *resp,
5945                                 bool vdev_req,
5946                                 unsigned int vdev_id)
5947 {
5948         struct dlb2_dir_pq_pair *queue;
5949         struct dlb2_hw_domain *domain;
5950         int id;
5951
5952         id = domain_id;
5953
5954         dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
5955                                      vdev_req, vdev_id);
5956
5957         domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
5958         if (!domain) {
5959                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5960                 return -EINVAL;
5961         }
5962
5963         id = args->queue_id;
5964
5965         queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
5966         if (!queue) {
5967                 resp->status = DLB2_ST_INVALID_QID;
5968                 return -EINVAL;
5969         }
5970
5971         resp->id = dlb2_dir_queue_depth(hw, queue);
5972
5973         return 0;
5974 }
5975
5976 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
5977                                          u32 domain_id,
5978                                          u32 queue_id,
5979                                          bool vdev_req,
5980                                          unsigned int vf_id)
5981 {
5982         DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
5983         if (vdev_req)
5984                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5985         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5986         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5987 }
5988
5989 /**
5990  * dlb2_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
5991  * @hw: dlb2_hw handle for a particular device.
5992  * @domain_id: domain ID.
5993  * @args: queue depth args
5994  * @resp: response structure.
5995  * @vdev_req: indicates whether this request came from a vdev.
5996  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5997  *
5998  * This function returns the depth of a load-balanced queue.
5999  *
6000  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
6001  * device.
6002  *
6003  * Return:
6004  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6005  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
6006  * contains the depth.
6007  *
6008  * Errors:
6009  * EINVAL - Invalid domain ID or queue ID.
6010  */
6011 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
6012                                 u32 domain_id,
6013                                 struct dlb2_get_ldb_queue_depth_args *args,
6014                                 struct dlb2_cmd_response *resp,
6015                                 bool vdev_req,
6016                                 unsigned int vdev_id)
6017 {
6018         struct dlb2_hw_domain *domain;
6019         struct dlb2_ldb_queue *queue;
6020
6021         dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
6022                                      vdev_req, vdev_id);
6023
6024         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6025         if (!domain) {
6026                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6027                 return -EINVAL;
6028         }
6029
6030         queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
6031         if (!queue) {
6032                 resp->status = DLB2_ST_INVALID_QID;
6033                 return -EINVAL;
6034         }
6035
6036         resp->id = dlb2_ldb_queue_depth(hw, queue);
6037
6038         return 0;
6039 }
6040
6041 /**
6042  * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures
6043  * @hw: dlb2_hw handle for a particular device.
6044  *
6045  * This function attempts to finish any outstanding unmap procedures.
6046  * This function should be called by the kernel thread responsible for
6047  * finishing map/unmap procedures.
6048  *
6049  * Return:
6050  * Returns the number of procedures that weren't completed.
6051  */
6052 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
6053 {
6054         int i, num = 0;
6055
6056         /* Finish queue unmap jobs for any domain that needs it */
6057         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6058                 struct dlb2_hw_domain *domain = &hw->domains[i];
6059
6060                 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
6061         }
6062
6063         return num;
6064 }
6065
6066 /**
6067  * dlb2_finish_map_qid_procedures() - finish any pending map procedures
6068  * @hw: dlb2_hw handle for a particular device.
6069  *
6070  * This function attempts to finish any outstanding map procedures.
6071  * This function should be called by the kernel thread responsible for
6072  * finishing map/unmap procedures.
6073  *
6074  * Return:
6075  * Returns the number of procedures that weren't completed.
6076  */
6077 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
6078 {
6079         int i, num = 0;
6080
6081         /* Finish queue map jobs for any domain that needs it */
6082         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6083                 struct dlb2_hw_domain *domain = &hw->domains[i];
6084
6085                 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
6086         }
6087
6088         return num;
6089 }
6090
6091 /**
6092  * dlb2_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports.
6093  * @hw: dlb2_hw handle for a particular device.
6094  *
6095  * This function must be called prior to configuring scheduling domains.
6096  */
6097
6098 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
6099 {
6100         u32 ctrl;
6101
6102         ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6103
6104         DLB2_BIT_SET(ctrl,
6105                      DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_DIR_CQ_MODE);
6106
6107         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6108 }
6109
6110 /**
6111  * dlb2_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
6112  *      ports.
6113  * @hw: dlb2_hw handle for a particular device.
6114  *
6115  * This function must be called prior to configuring scheduling domains.
6116  */
6117 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
6118 {
6119         u32 ctrl;
6120
6121         ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6122
6123         DLB2_BIT_SET(ctrl,
6124                      DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_LDB_CQ_MODE);
6125
6126         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6127 }
6128
6129 /**
6130  * dlb2_get_group_sequence_numbers() - return a group's number of SNs per queue
6131  * @hw: dlb2_hw handle for a particular device.
6132  * @group_id: sequence number group ID.
6133  *
6134  * This function returns the configured number of sequence numbers per queue
6135  * for the specified group.
6136  *
6137  * Return:
6138  * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6139  */
6140 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, u32 group_id)
6141 {
6142         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6143                 return -EINVAL;
6144
6145         return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
6146 }
6147
6148 /**
6149  * dlb2_get_group_sequence_number_occupancy() - return a group's in-use slots
6150  * @hw: dlb2_hw handle for a particular device.
6151  * @group_id: sequence number group ID.
6152  *
6153  * This function returns the group's number of in-use slots (i.e. load-balanced
6154  * queues using the specified group).
6155  *
6156  * Return:
6157  * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6158  */
6159 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw, u32 group_id)
6160 {
6161         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6162                 return -EINVAL;
6163
6164         return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
6165 }
6166
6167 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
6168                                                 u32 group_id,
6169                                                 u32 val)
6170 {
6171         DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
6172         DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
6173         DLB2_HW_DBG(hw, "\tValue:    %u\n", val);
6174 }
6175
6176 /**
6177  * dlb2_set_group_sequence_numbers() - assign a group's number of SNs per queue
6178  * @hw: dlb2_hw handle for a particular device.
6179  * @group_id: sequence number group ID.
6180  * @val: requested amount of sequence numbers per queue.
6181  *
6182  * This function configures the group's number of sequence numbers per queue.
6183  * val can be a power-of-two between 32 and 1024, inclusive. This setting can
6184  * be configured until the first ordered load-balanced queue is configured, at
6185  * which point the configuration is locked.
6186  *
6187  * Return:
6188  * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
6189  * ordered queue is configured.
6190  */
6191 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
6192                                     u32 group_id,
6193                                     u32 val)
6194 {
6195         const u32 valid_allocations[] = {64, 128, 256, 512, 1024};
6196         struct dlb2_sn_group *group;
6197         u32 sn_mode = 0;
6198         int mode;
6199
6200         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6201                 return -EINVAL;
6202
6203         group = &hw->rsrcs.sn_groups[group_id];
6204
6205         /*
6206          * Once the first load-balanced queue using an SN group is configured,
6207          * the group cannot be changed.
6208          */
6209         if (group->slot_use_bitmap != 0)
6210                 return -EPERM;
6211
6212         for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
6213                 if (val == valid_allocations[mode])
6214                         break;
6215
6216         if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
6217                 return -EINVAL;
6218
6219         group->mode = mode;
6220         group->sequence_numbers_per_queue = val;
6221
6222         DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[0].mode,
6223                  DLB2_RO_GRP_SN_MODE_SN_MODE_0);
6224         DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[1].mode,
6225                  DLB2_RO_GRP_SN_MODE_SN_MODE_1);
6226
6227         DLB2_CSR_WR(hw, DLB2_RO_GRP_SN_MODE(hw->ver), sn_mode);
6228
6229         dlb2_log_set_group_sequence_numbers(hw, group_id, val);
6230
6231         return 0;
6232 }
6233