0f18bfeffdd4b0b5eafdcfc026f19faad7d31c73
[dpdk.git] / drivers / event / dlb2 / pf / base / dlb2_resource_new.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #define DLB2_USE_NEW_HEADERS /* TEMPORARY FOR MERGE */
6
7 #include "dlb2_user.h"
8
9 #include "dlb2_hw_types_new.h"
10 #include "dlb2_osdep.h"
11 #include "dlb2_osdep_bitmap.h"
12 #include "dlb2_osdep_types.h"
13 #include "dlb2_regs_new.h"
14 #include "dlb2_resource_new.h" /* TEMP FOR UPSTREAMPATCHES */
15
16 #include "../../dlb2_priv.h"
17 #include "../../dlb2_inline_fns.h"
18
19 #define DLB2_DOM_LIST_HEAD(head, type) \
20         DLB2_LIST_HEAD((head), type, domain_list)
21
22 #define DLB2_FUNC_LIST_HEAD(head, type) \
23         DLB2_LIST_HEAD((head), type, func_list)
24
25 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
26         DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
27
28 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
29         DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
30
31 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
32         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
33
34 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
35         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
36
37 /*
38  * The PF driver cannot assume that a register write will affect subsequent HCW
39  * writes. To ensure a write completes, the driver must read back a CSR. This
40  * function only need be called for configuration that can occur after the
41  * domain has started; prior to starting, applications can't send HCWs.
42  */
43 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
44 {
45         DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
46 }
47
48 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
49 {
50         int i;
51
52         dlb2_list_init_head(&domain->used_ldb_queues);
53         dlb2_list_init_head(&domain->used_dir_pq_pairs);
54         dlb2_list_init_head(&domain->avail_ldb_queues);
55         dlb2_list_init_head(&domain->avail_dir_pq_pairs);
56
57         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
58                 dlb2_list_init_head(&domain->used_ldb_ports[i]);
59         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
60                 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
61 }
62
63 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
64 {
65         int i;
66         dlb2_list_init_head(&rsrc->avail_domains);
67         dlb2_list_init_head(&rsrc->used_domains);
68         dlb2_list_init_head(&rsrc->avail_ldb_queues);
69         dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
70
71         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
72                 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
73 }
74
75 /**
76  * dlb2_resource_free() - free device state memory
77  * @hw: dlb2_hw handle for a particular device.
78  *
79  * This function frees software state pointed to by dlb2_hw. This function
80  * should be called when resetting the device or unloading the driver.
81  */
82 void dlb2_resource_free(struct dlb2_hw *hw)
83 {
84         int i;
85
86         if (hw->pf.avail_hist_list_entries)
87                 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
88
89         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
90                 if (hw->vdev[i].avail_hist_list_entries)
91                         dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
92         }
93 }
94
95 /**
96  * dlb2_resource_init() - initialize the device
97  * @hw: pointer to struct dlb2_hw.
98  * @ver: device version.
99  *
100  * This function initializes the device's software state (pointed to by the hw
101  * argument) and programs global scheduling QoS registers. This function should
102  * be called during driver initialization, and the dlb2_hw structure should
103  * be zero-initialized before calling the function.
104  *
105  * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
106  * device is reset.
107  *
108  * Return:
109  * Returns 0 upon success, <0 otherwise.
110  */
111 int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
112 {
113         struct dlb2_list_entry *list;
114         unsigned int i;
115         int ret;
116
117         /*
118          * For optimal load-balancing, ports that map to one or more QIDs in
119          * common should not be in numerical sequence. The port->QID mapping is
120          * application dependent, but the driver interleaves port IDs as much
121          * as possible to reduce the likelihood of sequential ports mapping to
122          * the same QID(s). This initial allocation of port IDs maximizes the
123          * average distance between an ID and its immediate neighbors (i.e.
124          * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
125          * 3, etc.).
126          */
127         const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
128                 0,  7,  14,  5, 12,  3, 10,  1,  8, 15,  6, 13,  4, 11,  2,  9,
129                 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
130                 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
131                 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
132         };
133
134         hw->ver = ver;
135
136         dlb2_init_fn_rsrc_lists(&hw->pf);
137
138         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
139                 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
140
141         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
142                 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
143                 hw->domains[i].parent_func = &hw->pf;
144         }
145
146         /* Give all resources to the PF driver */
147         hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
148         for (i = 0; i < hw->pf.num_avail_domains; i++) {
149                 list = &hw->domains[i].func_list;
150
151                 dlb2_list_add(&hw->pf.avail_domains, list);
152         }
153
154         hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
155         for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
156                 list = &hw->rsrcs.ldb_queues[i].func_list;
157
158                 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
159         }
160
161         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
162                 hw->pf.num_avail_ldb_ports[i] =
163                         DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
164
165         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
166                 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
167                 struct dlb2_ldb_port *port;
168
169                 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
170
171                 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
172                               &port->func_list);
173         }
174
175         hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
176         for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
177                 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
178
179                 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
180         }
181
182         if (hw->ver == DLB2_HW_V2) {
183                 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
184                 hw->pf.num_avail_dqed_entries =
185                         DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
186         } else {
187                 hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
188         }
189
190         hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
191
192         ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
193                                 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
194         if (ret)
195                 goto unwind;
196
197         ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
198         if (ret)
199                 goto unwind;
200
201         for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
202                 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
203                                         DLB2_MAX_NUM_HIST_LIST_ENTRIES);
204                 if (ret)
205                         goto unwind;
206
207                 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
208                 if (ret)
209                         goto unwind;
210         }
211
212         /* Initialize the hardware resource IDs */
213         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
214                 hw->domains[i].id.phys_id = i;
215                 hw->domains[i].id.vdev_owned = false;
216         }
217
218         for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
219                 hw->rsrcs.ldb_queues[i].id.phys_id = i;
220                 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
221         }
222
223         for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
224                 hw->rsrcs.ldb_ports[i].id.phys_id = i;
225                 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
226         }
227
228         for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
229                 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
230                 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
231         }
232
233         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
234                 hw->rsrcs.sn_groups[i].id = i;
235                 /* Default mode (0) is 64 sequence numbers per queue */
236                 hw->rsrcs.sn_groups[i].mode = 0;
237                 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
238                 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
239         }
240
241         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
242                 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
243
244         return 0;
245
246 unwind:
247         dlb2_resource_free(hw);
248
249         return ret;
250 }
251
252 /**
253  * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
254  * @hw: dlb2_hw handle for a particular device.
255  * @ver: device version.
256  *
257  * Clearing the PMCSR must be done at initialization to make the device fully
258  * operational.
259  */
260 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
261 {
262         u32 pmcsr_dis;
263
264         pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
265
266         DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
267
268         DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
269 }
270
271 /**
272  * dlb2_hw_get_num_resources() - query the PCI function's available resources
273  * @hw: dlb2_hw handle for a particular device.
274  * @arg: pointer to resource counts.
275  * @vdev_req: indicates whether this request came from a vdev.
276  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
277  *
278  * This function returns the number of available resources for the PF or for a
279  * VF.
280  *
281  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
282  * device.
283  *
284  * Return:
285  * Returns 0 upon success, -EINVAL if vdev_req is true and vdev_id is
286  * invalid.
287  */
288 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
289                               struct dlb2_get_num_resources_args *arg,
290                               bool vdev_req,
291                               unsigned int vdev_id)
292 {
293         struct dlb2_function_resources *rsrcs;
294         struct dlb2_bitmap *map;
295         int i;
296
297         if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
298                 return -EINVAL;
299
300         if (vdev_req)
301                 rsrcs = &hw->vdev[vdev_id];
302         else
303                 rsrcs = &hw->pf;
304
305         arg->num_sched_domains = rsrcs->num_avail_domains;
306
307         arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
308
309         arg->num_ldb_ports = 0;
310         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
311                 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
312
313         arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
314         arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
315         arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
316         arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
317
318         arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
319
320         arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
321
322         map = rsrcs->avail_hist_list_entries;
323
324         arg->num_hist_list_entries = dlb2_bitmap_count(map);
325
326         arg->max_contiguous_hist_list_entries =
327                 dlb2_bitmap_longest_set_range(map);
328
329         if (hw->ver == DLB2_HW_V2) {
330                 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
331                 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
332         } else {
333                 arg->num_credits = rsrcs->num_avail_entries;
334         }
335         return 0;
336 }
337
338 static void dlb2_configure_domain_credits_v2_5(struct dlb2_hw *hw,
339                                                struct dlb2_hw_domain *domain)
340 {
341         u32 reg = 0;
342
343         DLB2_BITS_SET(reg, domain->num_credits, DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
344         DLB2_CSR_WR(hw, DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id), reg);
345 }
346
347 static void dlb2_configure_domain_credits_v2(struct dlb2_hw *hw,
348                                              struct dlb2_hw_domain *domain)
349 {
350         u32 reg = 0;
351
352         DLB2_BITS_SET(reg, domain->num_ldb_credits,
353                       DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
354         DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), reg);
355
356         reg = 0;
357         DLB2_BITS_SET(reg, domain->num_dir_credits,
358                       DLB2_CHP_CFG_DIR_VAS_CRD_COUNT);
359         DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), reg);
360 }
361
362 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
363                                           struct dlb2_hw_domain *domain)
364 {
365         if (hw->ver == DLB2_HW_V2)
366                 dlb2_configure_domain_credits_v2(hw, domain);
367         else
368                 dlb2_configure_domain_credits_v2_5(hw, domain);
369 }
370
371 static int dlb2_attach_credits(struct dlb2_function_resources *rsrcs,
372                                struct dlb2_hw_domain *domain,
373                                u32 num_credits,
374                                struct dlb2_cmd_response *resp)
375 {
376         if (rsrcs->num_avail_entries < num_credits) {
377                 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
378                 return -EINVAL;
379         }
380
381         rsrcs->num_avail_entries -= num_credits;
382         domain->num_credits += num_credits;
383         return 0;
384 }
385
386 static struct dlb2_ldb_port *
387 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
388                        struct dlb2_function_resources *rsrcs,
389                        u32 domain_id,
390                        u32 cos_id)
391 {
392         struct dlb2_list_entry *iter;
393         struct dlb2_ldb_port *port;
394         RTE_SET_USED(iter);
395
396         /*
397          * To reduce the odds of consecutive load-balanced ports mapping to the
398          * same queue(s), the driver attempts to allocate ports whose neighbors
399          * are owned by a different domain.
400          */
401         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
402                 u32 next, prev;
403                 u32 phys_id;
404
405                 phys_id = port->id.phys_id;
406                 next = phys_id + 1;
407                 prev = phys_id - 1;
408
409                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
410                         next = 0;
411                 if (phys_id == 0)
412                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
413
414                 if (!hw->rsrcs.ldb_ports[next].owned ||
415                     hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
416                         continue;
417
418                 if (!hw->rsrcs.ldb_ports[prev].owned ||
419                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
420                         continue;
421
422                 return port;
423         }
424
425         /*
426          * Failing that, the driver looks for a port with one neighbor owned by
427          * a different domain and the other unallocated.
428          */
429         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
430                 u32 next, prev;
431                 u32 phys_id;
432
433                 phys_id = port->id.phys_id;
434                 next = phys_id + 1;
435                 prev = phys_id - 1;
436
437                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
438                         next = 0;
439                 if (phys_id == 0)
440                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
441
442                 if (!hw->rsrcs.ldb_ports[prev].owned &&
443                     hw->rsrcs.ldb_ports[next].owned &&
444                     hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
445                         return port;
446
447                 if (!hw->rsrcs.ldb_ports[next].owned &&
448                     hw->rsrcs.ldb_ports[prev].owned &&
449                     hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
450                         return port;
451         }
452
453         /*
454          * Failing that, the driver looks for a port with both neighbors
455          * unallocated.
456          */
457         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
458                 u32 next, prev;
459                 u32 phys_id;
460
461                 phys_id = port->id.phys_id;
462                 next = phys_id + 1;
463                 prev = phys_id - 1;
464
465                 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
466                         next = 0;
467                 if (phys_id == 0)
468                         prev = DLB2_MAX_NUM_LDB_PORTS - 1;
469
470                 if (!hw->rsrcs.ldb_ports[prev].owned &&
471                     !hw->rsrcs.ldb_ports[next].owned)
472                         return port;
473         }
474
475         /* If all else fails, the driver returns the next available port. */
476         return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
477                                    typeof(*port));
478 }
479
480 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
481                                    struct dlb2_function_resources *rsrcs,
482                                    struct dlb2_hw_domain *domain,
483                                    u32 num_ports,
484                                    u32 cos_id,
485                                    struct dlb2_cmd_response *resp)
486 {
487         unsigned int i;
488
489         if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
490                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
491                 return -EINVAL;
492         }
493
494         for (i = 0; i < num_ports; i++) {
495                 struct dlb2_ldb_port *port;
496
497                 port = dlb2_get_next_ldb_port(hw, rsrcs,
498                                               domain->id.phys_id, cos_id);
499                 if (port == NULL) {
500                         DLB2_HW_ERR(hw,
501                                     "[%s()] Internal error: domain validation failed\n",
502                                     __func__);
503                         return -EFAULT;
504                 }
505
506                 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
507                               &port->func_list);
508
509                 port->domain_id = domain->id;
510                 port->owned = true;
511
512                 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
513                               &port->domain_list);
514         }
515
516         rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
517
518         return 0;
519 }
520
521
522 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
523                                  struct dlb2_function_resources *rsrcs,
524                                  struct dlb2_hw_domain *domain,
525                                  struct dlb2_create_sched_domain_args *args,
526                                  struct dlb2_cmd_response *resp)
527 {
528         unsigned int i, j;
529         int ret;
530
531         if (args->cos_strict) {
532                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
533                         u32 num = args->num_cos_ldb_ports[i];
534
535                         /* Allocate ports from specific classes-of-service */
536                         ret = __dlb2_attach_ldb_ports(hw,
537                                                       rsrcs,
538                                                       domain,
539                                                       num,
540                                                       i,
541                                                       resp);
542                         if (ret)
543                                 return ret;
544                 }
545         } else {
546                 unsigned int k;
547                 u32 cos_id;
548
549                 /*
550                  * Attempt to allocate from specific class-of-service, but
551                  * fallback to the other classes if that fails.
552                  */
553                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
554                         for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
555                                 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
556                                         cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
557
558                                         ret = __dlb2_attach_ldb_ports(hw,
559                                                                       rsrcs,
560                                                                       domain,
561                                                                       1,
562                                                                       cos_id,
563                                                                       resp);
564                                         if (ret == 0)
565                                                 break;
566                                 }
567
568                                 if (ret)
569                                         return ret;
570                         }
571                 }
572         }
573
574         /* Allocate num_ldb_ports from any class-of-service */
575         for (i = 0; i < args->num_ldb_ports; i++) {
576                 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
577                         ret = __dlb2_attach_ldb_ports(hw,
578                                                       rsrcs,
579                                                       domain,
580                                                       1,
581                                                       j,
582                                                       resp);
583                         if (ret == 0)
584                                 break;
585                 }
586
587                 if (ret)
588                         return ret;
589         }
590
591         return 0;
592 }
593
594 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
595                                  struct dlb2_function_resources *rsrcs,
596                                  struct dlb2_hw_domain *domain,
597                                  u32 num_ports,
598                                  struct dlb2_cmd_response *resp)
599 {
600         unsigned int i;
601
602         if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
603                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
604                 return -EINVAL;
605         }
606
607         for (i = 0; i < num_ports; i++) {
608                 struct dlb2_dir_pq_pair *port;
609
610                 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
611                                            typeof(*port));
612                 if (port == NULL) {
613                         DLB2_HW_ERR(hw,
614                                     "[%s()] Internal error: domain validation failed\n",
615                                     __func__);
616                         return -EFAULT;
617                 }
618
619                 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
620
621                 port->domain_id = domain->id;
622                 port->owned = true;
623
624                 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
625         }
626
627         rsrcs->num_avail_dir_pq_pairs -= num_ports;
628
629         return 0;
630 }
631
632 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
633                                    struct dlb2_hw_domain *domain,
634                                    u32 num_credits,
635                                    struct dlb2_cmd_response *resp)
636 {
637         if (rsrcs->num_avail_qed_entries < num_credits) {
638                 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
639                 return -EINVAL;
640         }
641
642         rsrcs->num_avail_qed_entries -= num_credits;
643         domain->num_ldb_credits += num_credits;
644         return 0;
645 }
646
647 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
648                                    struct dlb2_hw_domain *domain,
649                                    u32 num_credits,
650                                    struct dlb2_cmd_response *resp)
651 {
652         if (rsrcs->num_avail_dqed_entries < num_credits) {
653                 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
654                 return -EINVAL;
655         }
656
657         rsrcs->num_avail_dqed_entries -= num_credits;
658         domain->num_dir_credits += num_credits;
659         return 0;
660 }
661
662
663 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
664                                         struct dlb2_hw_domain *domain,
665                                         u32 num_atomic_inflights,
666                                         struct dlb2_cmd_response *resp)
667 {
668         if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
669                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
670                 return -EINVAL;
671         }
672
673         rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
674         domain->num_avail_aqed_entries += num_atomic_inflights;
675         return 0;
676 }
677
678 static int
679 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
680                                      struct dlb2_hw_domain *domain,
681                                      u32 num_hist_list_entries,
682                                      struct dlb2_cmd_response *resp)
683 {
684         struct dlb2_bitmap *bitmap;
685         int base;
686
687         if (num_hist_list_entries) {
688                 bitmap = rsrcs->avail_hist_list_entries;
689
690                 base = dlb2_bitmap_find_set_bit_range(bitmap,
691                                                       num_hist_list_entries);
692                 if (base < 0)
693                         goto error;
694
695                 domain->total_hist_list_entries = num_hist_list_entries;
696                 domain->avail_hist_list_entries = num_hist_list_entries;
697                 domain->hist_list_entry_base = base;
698                 domain->hist_list_entry_offset = 0;
699
700                 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
701         }
702         return 0;
703
704 error:
705         resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
706         return -EINVAL;
707 }
708
709 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
710                                   struct dlb2_function_resources *rsrcs,
711                                   struct dlb2_hw_domain *domain,
712                                   u32 num_queues,
713                                   struct dlb2_cmd_response *resp)
714 {
715         unsigned int i;
716
717         if (rsrcs->num_avail_ldb_queues < num_queues) {
718                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
719                 return -EINVAL;
720         }
721
722         for (i = 0; i < num_queues; i++) {
723                 struct dlb2_ldb_queue *queue;
724
725                 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
726                                             typeof(*queue));
727                 if (queue == NULL) {
728                         DLB2_HW_ERR(hw,
729                                     "[%s()] Internal error: domain validation failed\n",
730                                     __func__);
731                         return -EFAULT;
732                 }
733
734                 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
735
736                 queue->domain_id = domain->id;
737                 queue->owned = true;
738
739                 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
740         }
741
742         rsrcs->num_avail_ldb_queues -= num_queues;
743
744         return 0;
745 }
746
747 static int
748 dlb2_domain_attach_resources(struct dlb2_hw *hw,
749                              struct dlb2_function_resources *rsrcs,
750                              struct dlb2_hw_domain *domain,
751                              struct dlb2_create_sched_domain_args *args,
752                              struct dlb2_cmd_response *resp)
753 {
754         int ret;
755
756         ret = dlb2_attach_ldb_queues(hw,
757                                      rsrcs,
758                                      domain,
759                                      args->num_ldb_queues,
760                                      resp);
761         if (ret)
762                 return ret;
763
764         ret = dlb2_attach_ldb_ports(hw,
765                                     rsrcs,
766                                     domain,
767                                     args,
768                                     resp);
769         if (ret)
770                 return ret;
771
772         ret = dlb2_attach_dir_ports(hw,
773                                     rsrcs,
774                                     domain,
775                                     args->num_dir_ports,
776                                     resp);
777         if (ret)
778                 return ret;
779
780         if (hw->ver == DLB2_HW_V2) {
781                 ret = dlb2_attach_ldb_credits(rsrcs,
782                                               domain,
783                                               args->num_ldb_credits,
784                                               resp);
785                 if (ret)
786                         return ret;
787
788                 ret = dlb2_attach_dir_credits(rsrcs,
789                                               domain,
790                                               args->num_dir_credits,
791                                               resp);
792                 if (ret)
793                         return ret;
794         } else {  /* DLB 2.5 */
795                 ret = dlb2_attach_credits(rsrcs,
796                                           domain,
797                                           args->num_credits,
798                                           resp);
799                 if (ret)
800                         return ret;
801         }
802
803         ret = dlb2_attach_domain_hist_list_entries(rsrcs,
804                                                    domain,
805                                                    args->num_hist_list_entries,
806                                                    resp);
807         if (ret)
808                 return ret;
809
810         ret = dlb2_attach_atomic_inflights(rsrcs,
811                                            domain,
812                                            args->num_atomic_inflights,
813                                            resp);
814         if (ret)
815                 return ret;
816
817         dlb2_configure_domain_credits(hw, domain);
818
819         domain->configured = true;
820
821         domain->started = false;
822
823         rsrcs->num_avail_domains--;
824
825         return 0;
826 }
827
828 static int
829 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
830                                   struct dlb2_create_sched_domain_args *args,
831                                   struct dlb2_cmd_response *resp,
832                                   struct dlb2_hw *hw,
833                                   struct dlb2_hw_domain **out_domain)
834 {
835         u32 num_avail_ldb_ports, req_ldb_ports;
836         struct dlb2_bitmap *avail_hl_entries;
837         unsigned int max_contig_hl_range;
838         struct dlb2_hw_domain *domain;
839         int i;
840
841         avail_hl_entries = rsrcs->avail_hist_list_entries;
842
843         max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
844
845         num_avail_ldb_ports = 0;
846         req_ldb_ports = 0;
847         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
848                 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
849
850                 req_ldb_ports += args->num_cos_ldb_ports[i];
851         }
852
853         req_ldb_ports += args->num_ldb_ports;
854
855         if (rsrcs->num_avail_domains < 1) {
856                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
857                 return -EINVAL;
858         }
859
860         domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
861         if (domain == NULL) {
862                 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
863                 return -EFAULT;
864         }
865
866         if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
867                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
868                 return -EINVAL;
869         }
870
871         if (req_ldb_ports > num_avail_ldb_ports) {
872                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
873                 return -EINVAL;
874         }
875
876         for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
877                 if (args->num_cos_ldb_ports[i] >
878                     rsrcs->num_avail_ldb_ports[i]) {
879                         resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
880                         return -EINVAL;
881                 }
882         }
883
884         if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
885                 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
886                 return -EINVAL;
887         }
888
889         if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
890                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
891                 return -EINVAL;
892         }
893         if (hw->ver == DLB2_HW_V2_5) {
894                 if (rsrcs->num_avail_entries < args->num_credits) {
895                         resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
896                         return -EINVAL;
897                 }
898         } else {
899                 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
900                         resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
901                         return -EINVAL;
902                 }
903                 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
904                         resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
905                         return -EINVAL;
906                 }
907         }
908
909         if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
910                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
911                 return -EINVAL;
912         }
913
914         if (max_contig_hl_range < args->num_hist_list_entries) {
915                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
916                 return -EINVAL;
917         }
918
919         *out_domain = domain;
920
921         return 0;
922 }
923
924 static void
925 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
926                                   struct dlb2_create_sched_domain_args *args,
927                                   bool vdev_req,
928                                   unsigned int vdev_id)
929 {
930         DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
931         if (vdev_req)
932                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
933         DLB2_HW_DBG(hw, "\tNumber of LDB queues:          %d\n",
934                     args->num_ldb_queues);
935         DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
936                     args->num_ldb_ports);
937         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0):   %d\n",
938                     args->num_cos_ldb_ports[0]);
939         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1):   %d\n",
940                     args->num_cos_ldb_ports[1]);
941         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2):   %d\n",
942                     args->num_cos_ldb_ports[2]);
943         DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3):   %d\n",
944                     args->num_cos_ldb_ports[3]);
945         DLB2_HW_DBG(hw, "\tStrict CoS allocation:         %d\n",
946                     args->cos_strict);
947         DLB2_HW_DBG(hw, "\tNumber of DIR ports:           %d\n",
948                     args->num_dir_ports);
949         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:       %d\n",
950                     args->num_atomic_inflights);
951         DLB2_HW_DBG(hw, "\tNumber of hist list entries:   %d\n",
952                     args->num_hist_list_entries);
953         if (hw->ver == DLB2_HW_V2) {
954                 DLB2_HW_DBG(hw, "\tNumber of LDB credits:         %d\n",
955                             args->num_ldb_credits);
956                 DLB2_HW_DBG(hw, "\tNumber of DIR credits:         %d\n",
957                             args->num_dir_credits);
958         } else {
959                 DLB2_HW_DBG(hw, "\tNumber of credits:         %d\n",
960                             args->num_credits);
961         }
962 }
963
964 /**
965  * dlb2_hw_create_sched_domain() - create a scheduling domain
966  * @hw: dlb2_hw handle for a particular device.
967  * @args: scheduling domain creation arguments.
968  * @resp: response structure.
969  * @vdev_req: indicates whether this request came from a vdev.
970  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
971  *
972  * This function creates a scheduling domain containing the resources specified
973  * in args. The individual resources (queues, ports, credits) can be configured
974  * after creating a scheduling domain.
975  *
976  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
977  * device.
978  *
979  * Return:
980  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
981  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
982  * contains the domain ID.
983  *
984  * resp->id contains a virtual ID if vdev_req is true.
985  *
986  * Errors:
987  * EINVAL - A requested resource is unavailable, or the requested domain name
988  *          is already in use.
989  * EFAULT - Internal error (resp->status not set).
990  */
991 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
992                                 struct dlb2_create_sched_domain_args *args,
993                                 struct dlb2_cmd_response *resp,
994                                 bool vdev_req,
995                                 unsigned int vdev_id)
996 {
997         struct dlb2_function_resources *rsrcs;
998         struct dlb2_hw_domain *domain;
999         int ret;
1000
1001         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1002
1003         dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
1004
1005         /*
1006          * Verify that hardware resources are available before attempting to
1007          * satisfy the request. This simplifies the error unwinding code.
1008          */
1009         ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp, hw, &domain);
1010         if (ret)
1011                 return ret;
1012
1013         dlb2_init_domain_rsrc_lists(domain);
1014
1015         ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
1016         if (ret) {
1017                 DLB2_HW_ERR(hw,
1018                             "[%s()] Internal error: failed to verify args.\n",
1019                             __func__);
1020
1021                 return ret;
1022         }
1023
1024         dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
1025
1026         dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
1027
1028         resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
1029         resp->status = 0;
1030
1031         return 0;
1032 }
1033
1034 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
1035                                      struct dlb2_dir_pq_pair *port)
1036 {
1037         u32 reg = 0;
1038
1039         DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
1040         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1041
1042         dlb2_flush_csr(hw);
1043 }
1044
1045 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
1046                                    struct dlb2_dir_pq_pair *port)
1047 {
1048         u32 cnt;
1049
1050         cnt = DLB2_CSR_RD(hw,
1051                           DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
1052
1053         /*
1054          * Account for the initial token count, which is used in order to
1055          * provide a CQ with depth less than 8.
1056          */
1057
1058         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
1059                port->init_tkn_cnt;
1060 }
1061
1062 static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
1063                               struct dlb2_dir_pq_pair *port)
1064 {
1065         unsigned int port_id = port->id.phys_id;
1066         u32 cnt;
1067
1068         /* Return any outstanding tokens */
1069         cnt = dlb2_dir_cq_token_count(hw, port);
1070
1071         if (cnt != 0) {
1072                 struct dlb2_hcw hcw_mem[8], *hcw;
1073                 void __iomem *pp_addr;
1074
1075                 pp_addr = os_map_producer_port(hw, port_id, false);
1076
1077                 /* Point hcw to a 64B-aligned location */
1078                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1079
1080                 /*
1081                  * Program the first HCW for a batch token return and
1082                  * the rest as NOOPS
1083                  */
1084                 memset(hcw, 0, 4 * sizeof(*hcw));
1085                 hcw->cq_token = 1;
1086                 hcw->lock_id = cnt - 1;
1087
1088                 dlb2_movdir64b(pp_addr, hcw);
1089
1090                 os_fence_hcw(hw, pp_addr);
1091
1092                 os_unmap_producer_port(hw, pp_addr);
1093         }
1094 }
1095
1096 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1097                                     struct dlb2_dir_pq_pair *port)
1098 {
1099         u32 reg = 0;
1100
1101         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1102
1103         dlb2_flush_csr(hw);
1104 }
1105
1106 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1107                                      struct dlb2_hw_domain *domain,
1108                                      bool toggle_port)
1109 {
1110         struct dlb2_list_entry *iter;
1111         struct dlb2_dir_pq_pair *port;
1112         RTE_SET_USED(iter);
1113
1114         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1115                 /*
1116                  * Can't drain a port if it's not configured, and there's
1117                  * nothing to drain if its queue is unconfigured.
1118                  */
1119                 if (!port->port_configured || !port->queue_configured)
1120                         continue;
1121
1122                 if (toggle_port)
1123                         dlb2_dir_port_cq_disable(hw, port);
1124
1125                 dlb2_drain_dir_cq(hw, port);
1126
1127                 if (toggle_port)
1128                         dlb2_dir_port_cq_enable(hw, port);
1129         }
1130
1131         return 0;
1132 }
1133
1134 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1135                                 struct dlb2_dir_pq_pair *queue)
1136 {
1137         u32 cnt;
1138
1139         cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
1140                                                       queue->id.phys_id));
1141
1142         return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
1143 }
1144
1145 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1146                                     struct dlb2_dir_pq_pair *queue)
1147 {
1148         return dlb2_dir_queue_depth(hw, queue) == 0;
1149 }
1150
1151 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1152                                          struct dlb2_hw_domain *domain)
1153 {
1154         struct dlb2_list_entry *iter;
1155         struct dlb2_dir_pq_pair *queue;
1156         RTE_SET_USED(iter);
1157
1158         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1159                 if (!dlb2_dir_queue_is_empty(hw, queue))
1160                         return false;
1161         }
1162
1163         return true;
1164 }
1165 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1166                                         struct dlb2_hw_domain *domain)
1167 {
1168         int i;
1169
1170         /* If the domain hasn't been started, there's no traffic to drain */
1171         if (!domain->started)
1172                 return 0;
1173
1174         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1175                 dlb2_domain_drain_dir_cqs(hw, domain, true);
1176
1177                 if (dlb2_domain_dir_queues_empty(hw, domain))
1178                         break;
1179         }
1180
1181         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1182                 DLB2_HW_ERR(hw,
1183                             "[%s()] Internal error: failed to empty queues\n",
1184                             __func__);
1185                 return -EFAULT;
1186         }
1187
1188         /*
1189          * Drain the CQs one more time. For the queues to go empty, they would
1190          * have scheduled one or more QEs.
1191          */
1192         dlb2_domain_drain_dir_cqs(hw, domain, true);
1193
1194         return 0;
1195 }
1196
1197 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1198                                     struct dlb2_ldb_port *port)
1199 {
1200         u32 reg = 0;
1201
1202         /*
1203          * Don't re-enable the port if a removal is pending. The caller should
1204          * mark this port as enabled (if it isn't already), and when the
1205          * removal completes the port will be enabled.
1206          */
1207         if (port->num_pending_removals)
1208                 return;
1209
1210         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1211
1212         dlb2_flush_csr(hw);
1213 }
1214
1215 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1216                                      struct dlb2_ldb_port *port)
1217 {
1218         u32 reg = 0;
1219
1220         DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
1221         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1222
1223         dlb2_flush_csr(hw);
1224 }
1225
1226 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1227                                       struct dlb2_ldb_port *port)
1228 {
1229         u32 cnt;
1230
1231         cnt = DLB2_CSR_RD(hw,
1232                           DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
1233
1234         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
1235 }
1236
1237 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1238                                    struct dlb2_ldb_port *port)
1239 {
1240         u32 cnt;
1241
1242         cnt = DLB2_CSR_RD(hw,
1243                           DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
1244
1245         /*
1246          * Account for the initial token count, which is used in order to
1247          * provide a CQ with depth less than 8.
1248          */
1249
1250         return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
1251                 port->init_tkn_cnt;
1252 }
1253
1254 static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1255 {
1256         u32 infl_cnt, tkn_cnt;
1257         unsigned int i;
1258
1259         infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1260         tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1261
1262         if (infl_cnt || tkn_cnt) {
1263                 struct dlb2_hcw hcw_mem[8], *hcw;
1264                 void __iomem *pp_addr;
1265
1266                 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1267
1268                 /* Point hcw to a 64B-aligned location */
1269                 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1270
1271                 /*
1272                  * Program the first HCW for a completion and token return and
1273                  * the other HCWs as NOOPS
1274                  */
1275
1276                 memset(hcw, 0, 4 * sizeof(*hcw));
1277                 hcw->qe_comp = (infl_cnt > 0);
1278                 hcw->cq_token = (tkn_cnt > 0);
1279                 hcw->lock_id = tkn_cnt - 1;
1280
1281                 /* Return tokens in the first HCW */
1282                 dlb2_movdir64b(pp_addr, hcw);
1283
1284                 hcw->cq_token = 0;
1285
1286                 /* Issue remaining completions (if any) */
1287                 for (i = 1; i < infl_cnt; i++)
1288                         dlb2_movdir64b(pp_addr, hcw);
1289
1290                 os_fence_hcw(hw, pp_addr);
1291
1292                 os_unmap_producer_port(hw, pp_addr);
1293         }
1294 }
1295
1296 static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1297                                       struct dlb2_hw_domain *domain,
1298                                       bool toggle_port)
1299 {
1300         struct dlb2_list_entry *iter;
1301         struct dlb2_ldb_port *port;
1302         int i;
1303         RTE_SET_USED(iter);
1304
1305         /* If the domain hasn't been started, there's no traffic to drain */
1306         if (!domain->started)
1307                 return;
1308
1309         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1310                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1311                         if (toggle_port)
1312                                 dlb2_ldb_port_cq_disable(hw, port);
1313
1314                         dlb2_drain_ldb_cq(hw, port);
1315
1316                         if (toggle_port)
1317                                 dlb2_ldb_port_cq_enable(hw, port);
1318                 }
1319         }
1320 }
1321
1322 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1323                                 struct dlb2_ldb_queue *queue)
1324 {
1325         u32 aqed, ldb, atm;
1326
1327         aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1328                                                        queue->id.phys_id));
1329         ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1330                                                       queue->id.phys_id));
1331         atm = DLB2_CSR_RD(hw,
1332                           DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
1333
1334         return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
1335                + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
1336                + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
1337 }
1338
1339 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1340                                     struct dlb2_ldb_queue *queue)
1341 {
1342         return dlb2_ldb_queue_depth(hw, queue) == 0;
1343 }
1344
1345 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1346                                             struct dlb2_hw_domain *domain)
1347 {
1348         struct dlb2_list_entry *iter;
1349         struct dlb2_ldb_queue *queue;
1350         RTE_SET_USED(iter);
1351
1352         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1353                 if (queue->num_mappings == 0)
1354                         continue;
1355
1356                 if (!dlb2_ldb_queue_is_empty(hw, queue))
1357                         return false;
1358         }
1359
1360         return true;
1361 }
1362
1363 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1364                                            struct dlb2_hw_domain *domain)
1365 {
1366         int i;
1367
1368         /* If the domain hasn't been started, there's no traffic to drain */
1369         if (!domain->started)
1370                 return 0;
1371
1372         if (domain->num_pending_removals > 0) {
1373                 DLB2_HW_ERR(hw,
1374                             "[%s()] Internal error: failed to unmap domain queues\n",
1375                             __func__);
1376                 return -EFAULT;
1377         }
1378
1379         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1380                 dlb2_domain_drain_ldb_cqs(hw, domain, true);
1381
1382                 if (dlb2_domain_mapped_queues_empty(hw, domain))
1383                         break;
1384         }
1385
1386         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1387                 DLB2_HW_ERR(hw,
1388                             "[%s()] Internal error: failed to empty queues\n",
1389                             __func__);
1390                 return -EFAULT;
1391         }
1392
1393         /*
1394          * Drain the CQs one more time. For the queues to go empty, they would
1395          * have scheduled one or more QEs.
1396          */
1397         dlb2_domain_drain_ldb_cqs(hw, domain, true);
1398
1399         return 0;
1400 }
1401
1402 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1403                                        struct dlb2_hw_domain *domain)
1404 {
1405         struct dlb2_list_entry *iter;
1406         struct dlb2_ldb_port *port;
1407         int i;
1408         RTE_SET_USED(iter);
1409
1410         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1411                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1412                         port->enabled = true;
1413
1414                         dlb2_ldb_port_cq_enable(hw, port);
1415                 }
1416         }
1417 }
1418
1419 static struct dlb2_ldb_queue *
1420 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1421                            u32 id,
1422                            bool vdev_req,
1423                            unsigned int vdev_id)
1424 {
1425         struct dlb2_list_entry *iter1;
1426         struct dlb2_list_entry *iter2;
1427         struct dlb2_function_resources *rsrcs;
1428         struct dlb2_hw_domain *domain;
1429         struct dlb2_ldb_queue *queue;
1430         RTE_SET_USED(iter1);
1431         RTE_SET_USED(iter2);
1432
1433         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1434                 return NULL;
1435
1436         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1437
1438         if (!vdev_req)
1439                 return &hw->rsrcs.ldb_queues[id];
1440
1441         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1442                 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
1443                         if (queue->id.virt_id == id)
1444                                 return queue;
1445                 }
1446         }
1447
1448         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
1449                 if (queue->id.virt_id == id)
1450                         return queue;
1451         }
1452
1453         return NULL;
1454 }
1455
1456 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1457                                                       u32 id,
1458                                                       bool vdev_req,
1459                                                       unsigned int vdev_id)
1460 {
1461         struct dlb2_list_entry *iteration;
1462         struct dlb2_function_resources *rsrcs;
1463         struct dlb2_hw_domain *domain;
1464         RTE_SET_USED(iteration);
1465
1466         if (id >= DLB2_MAX_NUM_DOMAINS)
1467                 return NULL;
1468
1469         if (!vdev_req)
1470                 return &hw->domains[id];
1471
1472         rsrcs = &hw->vdev[vdev_id];
1473
1474         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
1475                 if (domain->id.virt_id == id)
1476                         return domain;
1477         }
1478
1479         return NULL;
1480 }
1481
1482 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1483                                            struct dlb2_ldb_port *port,
1484                                            struct dlb2_ldb_queue *queue,
1485                                            int slot,
1486                                            enum dlb2_qid_map_state new_state)
1487 {
1488         enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1489         struct dlb2_hw_domain *domain;
1490         int domain_id;
1491
1492         domain_id = port->domain_id.phys_id;
1493
1494         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1495         if (domain == NULL) {
1496                 DLB2_HW_ERR(hw,
1497                             "[%s()] Internal error: unable to find domain %d\n",
1498                             __func__, domain_id);
1499                 return -EINVAL;
1500         }
1501
1502         switch (curr_state) {
1503         case DLB2_QUEUE_UNMAPPED:
1504                 switch (new_state) {
1505                 case DLB2_QUEUE_MAPPED:
1506                         queue->num_mappings++;
1507                         port->num_mappings++;
1508                         break;
1509                 case DLB2_QUEUE_MAP_IN_PROG:
1510                         queue->num_pending_additions++;
1511                         domain->num_pending_additions++;
1512                         break;
1513                 default:
1514                         goto error;
1515                 }
1516                 break;
1517         case DLB2_QUEUE_MAPPED:
1518                 switch (new_state) {
1519                 case DLB2_QUEUE_UNMAPPED:
1520                         queue->num_mappings--;
1521                         port->num_mappings--;
1522                         break;
1523                 case DLB2_QUEUE_UNMAP_IN_PROG:
1524                         port->num_pending_removals++;
1525                         domain->num_pending_removals++;
1526                         break;
1527                 case DLB2_QUEUE_MAPPED:
1528                         /* Priority change, nothing to update */
1529                         break;
1530                 default:
1531                         goto error;
1532                 }
1533                 break;
1534         case DLB2_QUEUE_MAP_IN_PROG:
1535                 switch (new_state) {
1536                 case DLB2_QUEUE_UNMAPPED:
1537                         queue->num_pending_additions--;
1538                         domain->num_pending_additions--;
1539                         break;
1540                 case DLB2_QUEUE_MAPPED:
1541                         queue->num_mappings++;
1542                         port->num_mappings++;
1543                         queue->num_pending_additions--;
1544                         domain->num_pending_additions--;
1545                         break;
1546                 default:
1547                         goto error;
1548                 }
1549                 break;
1550         case DLB2_QUEUE_UNMAP_IN_PROG:
1551                 switch (new_state) {
1552                 case DLB2_QUEUE_UNMAPPED:
1553                         port->num_pending_removals--;
1554                         domain->num_pending_removals--;
1555                         queue->num_mappings--;
1556                         port->num_mappings--;
1557                         break;
1558                 case DLB2_QUEUE_MAPPED:
1559                         port->num_pending_removals--;
1560                         domain->num_pending_removals--;
1561                         break;
1562                 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1563                         /* Nothing to update */
1564                         break;
1565                 default:
1566                         goto error;
1567                 }
1568                 break;
1569         case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1570                 switch (new_state) {
1571                 case DLB2_QUEUE_UNMAP_IN_PROG:
1572                         /* Nothing to update */
1573                         break;
1574                 case DLB2_QUEUE_UNMAPPED:
1575                         /*
1576                          * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1577                          * becomes UNMAPPED before it transitions to
1578                          * MAP_IN_PROG.
1579                          */
1580                         queue->num_mappings--;
1581                         port->num_mappings--;
1582                         port->num_pending_removals--;
1583                         domain->num_pending_removals--;
1584                         break;
1585                 default:
1586                         goto error;
1587                 }
1588                 break;
1589         default:
1590                 goto error;
1591         }
1592
1593         port->qid_map[slot].state = new_state;
1594
1595         DLB2_HW_DBG(hw,
1596                     "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1597                     __func__, queue->id.phys_id, port->id.phys_id,
1598                     curr_state, new_state);
1599         return 0;
1600
1601 error:
1602         DLB2_HW_ERR(hw,
1603                     "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1604                     __func__, queue->id.phys_id, port->id.phys_id,
1605                     curr_state, new_state);
1606         return -EFAULT;
1607 }
1608
1609 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1610                                 enum dlb2_qid_map_state state,
1611                                 int *slot)
1612 {
1613         int i;
1614
1615         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1616                 if (port->qid_map[i].state == state)
1617                         break;
1618         }
1619
1620         *slot = i;
1621
1622         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1623 }
1624
1625 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1626                                       enum dlb2_qid_map_state state,
1627                                       struct dlb2_ldb_queue *queue,
1628                                       int *slot)
1629 {
1630         int i;
1631
1632         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1633                 if (port->qid_map[i].state == state &&
1634                     port->qid_map[i].qid == queue->id.phys_id)
1635                         break;
1636         }
1637
1638         *slot = i;
1639
1640         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1641 }
1642
1643 /*
1644  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1645  * their function names imply, and should only be called by the dynamic CQ
1646  * mapping code.
1647  */
1648 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1649                                               struct dlb2_hw_domain *domain,
1650                                               struct dlb2_ldb_queue *queue)
1651 {
1652         struct dlb2_list_entry *iter;
1653         struct dlb2_ldb_port *port;
1654         int slot, i;
1655         RTE_SET_USED(iter);
1656
1657         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1658                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1659                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1660
1661                         if (!dlb2_port_find_slot_queue(port, state,
1662                                                        queue, &slot))
1663                                 continue;
1664
1665                         if (port->enabled)
1666                                 dlb2_ldb_port_cq_disable(hw, port);
1667                 }
1668         }
1669 }
1670
1671 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1672                                              struct dlb2_hw_domain *domain,
1673                                              struct dlb2_ldb_queue *queue)
1674 {
1675         struct dlb2_list_entry *iter;
1676         struct dlb2_ldb_port *port;
1677         int slot, i;
1678         RTE_SET_USED(iter);
1679
1680         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1681                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1682                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1683
1684                         if (!dlb2_port_find_slot_queue(port, state,
1685                                                        queue, &slot))
1686                                 continue;
1687
1688                         if (port->enabled)
1689                                 dlb2_ldb_port_cq_enable(hw, port);
1690                 }
1691         }
1692 }
1693
1694 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1695                                                 struct dlb2_ldb_port *port,
1696                                                 int slot)
1697 {
1698         u32 ctrl = 0;
1699
1700         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1701         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1702         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1703
1704         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1705
1706         dlb2_flush_csr(hw);
1707 }
1708
1709 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1710                                               struct dlb2_ldb_port *port,
1711                                               int slot)
1712 {
1713         u32 ctrl = 0;
1714
1715         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1716         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1717         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1718         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1719
1720         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1721
1722         dlb2_flush_csr(hw);
1723 }
1724
1725 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1726                                         struct dlb2_ldb_port *p,
1727                                         struct dlb2_ldb_queue *q,
1728                                         u8 priority)
1729 {
1730         enum dlb2_qid_map_state state;
1731         u32 lsp_qid2cq2;
1732         u32 lsp_qid2cq;
1733         u32 atm_qid2cq;
1734         u32 cq2priov;
1735         u32 cq2qid;
1736         int i;
1737
1738         /* Look for a pending or already mapped slot, else an unused slot */
1739         if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1740             !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1741             !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1742                 DLB2_HW_ERR(hw,
1743                             "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1744                             __func__, __LINE__);
1745                 return -EFAULT;
1746         }
1747
1748         /* Read-modify-write the priority and valid bit register */
1749         cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
1750
1751         cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
1752         cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
1753                     & DLB2_LSP_CQ2PRIOV_PRIO;
1754
1755         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
1756
1757         /* Read-modify-write the QID map register */
1758         if (i < 4)
1759                 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
1760                                                           p->id.phys_id));
1761         else
1762                 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
1763                                                           p->id.phys_id));
1764
1765         if (i == 0 || i == 4)
1766                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
1767         if (i == 1 || i == 5)
1768                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
1769         if (i == 2 || i == 6)
1770                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
1771         if (i == 3 || i == 7)
1772                 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
1773
1774         if (i < 4)
1775                 DLB2_CSR_WR(hw,
1776                             DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
1777         else
1778                 DLB2_CSR_WR(hw,
1779                             DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
1780
1781         atm_qid2cq = DLB2_CSR_RD(hw,
1782                                  DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1783                                                 p->id.phys_id / 4));
1784
1785         lsp_qid2cq = DLB2_CSR_RD(hw,
1786                                  DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
1787                                                 p->id.phys_id / 4));
1788
1789         lsp_qid2cq2 = DLB2_CSR_RD(hw,
1790                                   DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
1791                                                   p->id.phys_id / 4));
1792
1793         switch (p->id.phys_id % 4) {
1794         case 0:
1795                 DLB2_BIT_SET(atm_qid2cq,
1796                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
1797                 DLB2_BIT_SET(lsp_qid2cq,
1798                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
1799                 DLB2_BIT_SET(lsp_qid2cq2,
1800                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
1801                 break;
1802
1803         case 1:
1804                 DLB2_BIT_SET(atm_qid2cq,
1805                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
1806                 DLB2_BIT_SET(lsp_qid2cq,
1807                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
1808                 DLB2_BIT_SET(lsp_qid2cq2,
1809                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
1810                 break;
1811
1812         case 2:
1813                 DLB2_BIT_SET(atm_qid2cq,
1814                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
1815                 DLB2_BIT_SET(lsp_qid2cq,
1816                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
1817                 DLB2_BIT_SET(lsp_qid2cq2,
1818                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
1819                 break;
1820
1821         case 3:
1822                 DLB2_BIT_SET(atm_qid2cq,
1823                              1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
1824                 DLB2_BIT_SET(lsp_qid2cq,
1825                              1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
1826                 DLB2_BIT_SET(lsp_qid2cq2,
1827                              1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
1828                 break;
1829         }
1830
1831         DLB2_CSR_WR(hw,
1832                     DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1833                     atm_qid2cq);
1834
1835         DLB2_CSR_WR(hw,
1836                     DLB2_LSP_QID2CQIDIX(hw->ver,
1837                                         q->id.phys_id, p->id.phys_id / 4),
1838                     lsp_qid2cq);
1839
1840         DLB2_CSR_WR(hw,
1841                     DLB2_LSP_QID2CQIDIX2(hw->ver,
1842                                          q->id.phys_id, p->id.phys_id / 4),
1843                     lsp_qid2cq2);
1844
1845         dlb2_flush_csr(hw);
1846
1847         p->qid_map[i].qid = q->id.phys_id;
1848         p->qid_map[i].priority = priority;
1849
1850         state = DLB2_QUEUE_MAPPED;
1851
1852         return dlb2_port_slot_state_transition(hw, p, q, i, state);
1853 }
1854
1855 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1856                                            struct dlb2_ldb_port *port,
1857                                            struct dlb2_ldb_queue *queue,
1858                                            int slot)
1859 {
1860         u32 ctrl = 0;
1861         u32 active;
1862         u32 enq;
1863
1864         /* Set the atomic scheduling haswork bit */
1865         active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1866                                                          queue->id.phys_id));
1867
1868         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1869         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1870         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1871         DLB2_BITS_SET(ctrl,
1872                       DLB2_BITS_GET(active,
1873                                     DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
1874                                     DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1875
1876         /* Set the non-atomic scheduling haswork bit */
1877         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1878
1879         enq = DLB2_CSR_RD(hw,
1880                           DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1881                                                        queue->id.phys_id));
1882
1883         memset(&ctrl, 0, sizeof(ctrl));
1884
1885         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1886         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1887         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1888         DLB2_BITS_SET(ctrl,
1889                       DLB2_BITS_GET(enq,
1890                                     DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
1891                       DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1892
1893         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1894
1895         dlb2_flush_csr(hw);
1896
1897         return 0;
1898 }
1899
1900 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1901                                               struct dlb2_ldb_port *port,
1902                                               u8 slot)
1903 {
1904         u32 ctrl = 0;
1905
1906         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1907         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1908         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1909
1910         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1911
1912         memset(&ctrl, 0, sizeof(ctrl));
1913
1914         DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1915         DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1916         DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1917
1918         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1919
1920         dlb2_flush_csr(hw);
1921 }
1922
1923
1924 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1925                                               struct dlb2_ldb_queue *queue)
1926 {
1927         u32 infl_lim = 0;
1928
1929         DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
1930                  DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
1931
1932         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1933                     infl_lim);
1934 }
1935
1936 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1937                                                 struct dlb2_ldb_queue *queue)
1938 {
1939         DLB2_CSR_WR(hw,
1940                     DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1941                     DLB2_LSP_QID_LDB_INFL_LIM_RST);
1942 }
1943
1944 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1945                                                 struct dlb2_hw_domain *domain,
1946                                                 struct dlb2_ldb_port *port,
1947                                                 struct dlb2_ldb_queue *queue)
1948 {
1949         struct dlb2_list_entry *iter;
1950         enum dlb2_qid_map_state state;
1951         int slot, ret, i;
1952         u32 infl_cnt;
1953         u8 prio;
1954         RTE_SET_USED(iter);
1955
1956         infl_cnt = DLB2_CSR_RD(hw,
1957                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
1958                                                     queue->id.phys_id));
1959
1960         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
1961                 DLB2_HW_ERR(hw,
1962                             "[%s()] Internal error: non-zero QID inflight count\n",
1963                             __func__);
1964                 return -EINVAL;
1965         }
1966
1967         /*
1968          * Static map the port and set its corresponding has_work bits.
1969          */
1970         state = DLB2_QUEUE_MAP_IN_PROG;
1971         if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1972                 return -EINVAL;
1973
1974         prio = port->qid_map[slot].priority;
1975
1976         /*
1977          * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1978          * the port's qid_map state.
1979          */
1980         ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1981         if (ret)
1982                 return ret;
1983
1984         ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1985         if (ret)
1986                 return ret;
1987
1988         /*
1989          * Ensure IF_status(cq,qid) is 0 before enabling the port to
1990          * prevent spurious schedules to cause the queue's inflight
1991          * count to increase.
1992          */
1993         dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1994
1995         /* Reset the queue's inflight status */
1996         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1997                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1998                         state = DLB2_QUEUE_MAPPED;
1999                         if (!dlb2_port_find_slot_queue(port, state,
2000                                                        queue, &slot))
2001                                 continue;
2002
2003                         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2004                 }
2005         }
2006
2007         dlb2_ldb_queue_set_inflight_limit(hw, queue);
2008
2009         /* Re-enable CQs mapped to this queue */
2010         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2011
2012         /* If this queue has other mappings pending, clear its inflight limit */
2013         if (queue->num_pending_additions > 0)
2014                 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
2015
2016         return 0;
2017 }
2018
2019 /**
2020  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
2021  * @hw: dlb2_hw handle for a particular device.
2022  * @port: load-balanced port
2023  * @queue: load-balanced queue
2024  * @priority: queue servicing priority
2025  *
2026  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
2027  * at a later point, and <0 if an error occurred.
2028  */
2029 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
2030                                          struct dlb2_ldb_port *port,
2031                                          struct dlb2_ldb_queue *queue,
2032                                          u8 priority)
2033 {
2034         enum dlb2_qid_map_state state;
2035         struct dlb2_hw_domain *domain;
2036         int domain_id, slot, ret;
2037         u32 infl_cnt;
2038
2039         domain_id = port->domain_id.phys_id;
2040
2041         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
2042         if (domain == NULL) {
2043                 DLB2_HW_ERR(hw,
2044                             "[%s()] Internal error: unable to find domain %d\n",
2045                             __func__, port->domain_id.phys_id);
2046                 return -EINVAL;
2047         }
2048
2049         /*
2050          * Set the QID inflight limit to 0 to prevent further scheduling of the
2051          * queue.
2052          */
2053         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
2054                                                   queue->id.phys_id), 0);
2055
2056         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
2057                 DLB2_HW_ERR(hw,
2058                             "Internal error: No available unmapped slots\n");
2059                 return -EFAULT;
2060         }
2061
2062         port->qid_map[slot].qid = queue->id.phys_id;
2063         port->qid_map[slot].priority = priority;
2064
2065         state = DLB2_QUEUE_MAP_IN_PROG;
2066         ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
2067         if (ret)
2068                 return ret;
2069
2070         infl_cnt = DLB2_CSR_RD(hw,
2071                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2072                                                     queue->id.phys_id));
2073
2074         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2075                 /*
2076                  * The queue is owed completions so it's not safe to map it
2077                  * yet. Schedule a kernel thread to complete the mapping later,
2078                  * once software has completed all the queue's inflight events.
2079                  */
2080                 if (!os_worker_active(hw))
2081                         os_schedule_work(hw);
2082
2083                 return 1;
2084         }
2085
2086         /*
2087          * Disable the affected CQ, and the CQs already mapped to the QID,
2088          * before reading the QID's inflight count a second time. There is an
2089          * unlikely race in which the QID may schedule one more QE after we
2090          * read an inflight count of 0, and disabling the CQs guarantees that
2091          * the race will not occur after a re-read of the inflight count
2092          * register.
2093          */
2094         if (port->enabled)
2095                 dlb2_ldb_port_cq_disable(hw, port);
2096
2097         dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2098
2099         infl_cnt = DLB2_CSR_RD(hw,
2100                                DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2101                                                     queue->id.phys_id));
2102
2103         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2104                 if (port->enabled)
2105                         dlb2_ldb_port_cq_enable(hw, port);
2106
2107                 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2108
2109                 /*
2110                  * The queue is owed completions so it's not safe to map it
2111                  * yet. Schedule a kernel thread to complete the mapping later,
2112                  * once software has completed all the queue's inflight events.
2113                  */
2114                 if (!os_worker_active(hw))
2115                         os_schedule_work(hw);
2116
2117                 return 1;
2118         }
2119
2120         return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2121 }
2122
2123 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2124                                         struct dlb2_hw_domain *domain,
2125                                         struct dlb2_ldb_port *port)
2126 {
2127         int i;
2128
2129         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2130                 u32 infl_cnt;
2131                 struct dlb2_ldb_queue *queue;
2132                 int qid;
2133
2134                 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2135                         continue;
2136
2137                 qid = port->qid_map[i].qid;
2138
2139                 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2140
2141                 if (queue == NULL) {
2142                         DLB2_HW_ERR(hw,
2143                                     "[%s()] Internal error: unable to find queue %d\n",
2144                                     __func__, qid);
2145                         continue;
2146                 }
2147
2148                 infl_cnt = DLB2_CSR_RD(hw,
2149                                        DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2150
2151                 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
2152                         continue;
2153
2154                 /*
2155                  * Disable the affected CQ, and the CQs already mapped to the
2156                  * QID, before reading the QID's inflight count a second time.
2157                  * There is an unlikely race in which the QID may schedule one
2158                  * more QE after we read an inflight count of 0, and disabling
2159                  * the CQs guarantees that the race will not occur after a
2160                  * re-read of the inflight count register.
2161                  */
2162                 if (port->enabled)
2163                         dlb2_ldb_port_cq_disable(hw, port);
2164
2165                 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2166
2167                 infl_cnt = DLB2_CSR_RD(hw,
2168                                        DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2169
2170                 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2171                         if (port->enabled)
2172                                 dlb2_ldb_port_cq_enable(hw, port);
2173
2174                         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2175
2176                         continue;
2177                 }
2178
2179                 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2180         }
2181 }
2182
2183 static unsigned int
2184 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2185                                       struct dlb2_hw_domain *domain)
2186 {
2187         struct dlb2_list_entry *iter;
2188         struct dlb2_ldb_port *port;
2189         int i;
2190         RTE_SET_USED(iter);
2191
2192         if (!domain->configured || domain->num_pending_additions == 0)
2193                 return 0;
2194
2195         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2196                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2197                         dlb2_domain_finish_map_port(hw, domain, port);
2198         }
2199
2200         return domain->num_pending_additions;
2201 }
2202
2203 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2204                                    struct dlb2_ldb_port *port,
2205                                    struct dlb2_ldb_queue *queue)
2206 {
2207         enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2208         u32 lsp_qid2cq2;
2209         u32 lsp_qid2cq;
2210         u32 atm_qid2cq;
2211         u32 cq2priov;
2212         u32 queue_id;
2213         u32 port_id;
2214         int i;
2215
2216         /* Find the queue's slot */
2217         mapped = DLB2_QUEUE_MAPPED;
2218         in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2219         pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2220
2221         if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2222             !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2223             !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2224                 DLB2_HW_ERR(hw,
2225                             "[%s():%d] Internal error: QID %d isn't mapped\n",
2226                             __func__, __LINE__, queue->id.phys_id);
2227                 return -EFAULT;
2228         }
2229
2230         port_id = port->id.phys_id;
2231         queue_id = queue->id.phys_id;
2232
2233         /* Read-modify-write the priority and valid bit register */
2234         cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
2235
2236         cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
2237
2238         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
2239
2240         atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
2241                                                          port_id / 4));
2242
2243         lsp_qid2cq = DLB2_CSR_RD(hw,
2244                                  DLB2_LSP_QID2CQIDIX(hw->ver,
2245                                                 queue_id, port_id / 4));
2246
2247         lsp_qid2cq2 = DLB2_CSR_RD(hw,
2248                                   DLB2_LSP_QID2CQIDIX2(hw->ver,
2249                                                   queue_id, port_id / 4));
2250
2251         switch (port_id % 4) {
2252         case 0:
2253                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2254                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2255                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2256                 break;
2257
2258         case 1:
2259                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2260                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2261                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2262                 break;
2263
2264         case 2:
2265                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2266                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2267                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2268                 break;
2269
2270         case 3:
2271                 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2272                 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2273                 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2274                 break;
2275         }
2276
2277         DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
2278
2279         DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
2280                     lsp_qid2cq);
2281
2282         DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
2283                     lsp_qid2cq2);
2284
2285         dlb2_flush_csr(hw);
2286
2287         unmapped = DLB2_QUEUE_UNMAPPED;
2288
2289         return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2290 }
2291
2292 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2293                                  struct dlb2_hw_domain *domain,
2294                                  struct dlb2_ldb_port *port,
2295                                  struct dlb2_ldb_queue *queue,
2296                                  u8 prio)
2297 {
2298         if (domain->started)
2299                 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2300         else
2301                 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2302 }
2303
2304 static void
2305 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2306                                    struct dlb2_hw_domain *domain,
2307                                    struct dlb2_ldb_port *port,
2308                                    int slot)
2309 {
2310         enum dlb2_qid_map_state state;
2311         struct dlb2_ldb_queue *queue;
2312
2313         queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2314
2315         state = port->qid_map[slot].state;
2316
2317         /* Update the QID2CQIDX and CQ2QID vectors */
2318         dlb2_ldb_port_unmap_qid(hw, port, queue);
2319
2320         /*
2321          * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2322          * the has_work bits
2323          */
2324         dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2325
2326         /* Reset the {CQ, slot} to its default state */
2327         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2328
2329         /* Re-enable the CQ if it was not manually disabled by the user */
2330         if (port->enabled)
2331                 dlb2_ldb_port_cq_enable(hw, port);
2332
2333         /*
2334          * If there is a mapping that is pending this slot's removal, perform
2335          * the mapping now.
2336          */
2337         if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2338                 struct dlb2_ldb_port_qid_map *map;
2339                 struct dlb2_ldb_queue *map_queue;
2340                 u8 prio;
2341
2342                 map = &port->qid_map[slot];
2343
2344                 map->qid = map->pending_qid;
2345                 map->priority = map->pending_priority;
2346
2347                 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2348                 prio = map->priority;
2349
2350                 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2351         }
2352 }
2353
2354
2355 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2356                                           struct dlb2_hw_domain *domain,
2357                                           struct dlb2_ldb_port *port)
2358 {
2359         u32 infl_cnt;
2360         int i;
2361
2362         if (port->num_pending_removals == 0)
2363                 return false;
2364
2365         /*
2366          * The unmap requires all the CQ's outstanding inflights to be
2367          * completed.
2368          */
2369         infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
2370                                                        port->id.phys_id));
2371         if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
2372                 return false;
2373
2374         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2375                 struct dlb2_ldb_port_qid_map *map;
2376
2377                 map = &port->qid_map[i];
2378
2379                 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2380                     map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2381                         continue;
2382
2383                 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2384         }
2385
2386         return true;
2387 }
2388
2389 static unsigned int
2390 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2391                                         struct dlb2_hw_domain *domain)
2392 {
2393         struct dlb2_list_entry *iter;
2394         struct dlb2_ldb_port *port;
2395         int i;
2396         RTE_SET_USED(iter);
2397
2398         if (!domain->configured || domain->num_pending_removals == 0)
2399                 return 0;
2400
2401         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2402                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2403                         dlb2_domain_finish_unmap_port(hw, domain, port);
2404         }
2405
2406         return domain->num_pending_removals;
2407 }
2408
2409 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2410                                         struct dlb2_hw_domain *domain)
2411 {
2412         struct dlb2_list_entry *iter;
2413         struct dlb2_ldb_port *port;
2414         int i;
2415         RTE_SET_USED(iter);
2416
2417         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2418                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2419                         port->enabled = false;
2420
2421                         dlb2_ldb_port_cq_disable(hw, port);
2422                 }
2423         }
2424 }
2425
2426
2427 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2428                                   u32 domain_id,
2429                                   bool vdev_req,
2430                                   unsigned int vdev_id)
2431 {
2432         DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2433         if (vdev_req)
2434                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2435         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2436 }
2437
2438 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2439                                          struct dlb2_hw_domain *domain,
2440                                          unsigned int vdev_id)
2441 {
2442         struct dlb2_list_entry *iter;
2443         struct dlb2_dir_pq_pair *port;
2444         u32 vpp_v = 0;
2445         RTE_SET_USED(iter);
2446
2447         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2448                 unsigned int offs;
2449                 u32 virt_id;
2450
2451                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2452                         virt_id = port->id.virt_id;
2453                 else
2454                         virt_id = port->id.phys_id;
2455
2456                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2457
2458                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
2459         }
2460 }
2461
2462 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2463                                          struct dlb2_hw_domain *domain,
2464                                          unsigned int vdev_id)
2465 {
2466         struct dlb2_list_entry *iter;
2467         struct dlb2_ldb_port *port;
2468         u32 vpp_v = 0;
2469         int i;
2470         RTE_SET_USED(iter);
2471
2472         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2473                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2474                         unsigned int offs;
2475                         u32 virt_id;
2476
2477                         if (hw->virt_mode == DLB2_VIRT_SRIOV)
2478                                 virt_id = port->id.virt_id;
2479                         else
2480                                 virt_id = port->id.phys_id;
2481
2482                         offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2483
2484                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
2485                 }
2486         }
2487 }
2488
2489 static void
2490 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2491                                         struct dlb2_hw_domain *domain)
2492 {
2493         struct dlb2_list_entry *iter;
2494         struct dlb2_ldb_port *port;
2495         u32 int_en = 0;
2496         u32 wd_en = 0;
2497         int i;
2498         RTE_SET_USED(iter);
2499
2500         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2501                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2502                         DLB2_CSR_WR(hw,
2503                                     DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
2504                                                        port->id.phys_id),
2505                                     int_en);
2506
2507                         DLB2_CSR_WR(hw,
2508                                     DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
2509                                                       port->id.phys_id),
2510                                     wd_en);
2511                 }
2512         }
2513 }
2514
2515 static void
2516 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2517                                         struct dlb2_hw_domain *domain)
2518 {
2519         struct dlb2_list_entry *iter;
2520         struct dlb2_dir_pq_pair *port;
2521         u32 int_en = 0;
2522         u32 wd_en = 0;
2523         RTE_SET_USED(iter);
2524
2525         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2526                 DLB2_CSR_WR(hw,
2527                             DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2528                             int_en);
2529
2530                 DLB2_CSR_WR(hw,
2531                             DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
2532                             wd_en);
2533         }
2534 }
2535
2536 static void
2537 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2538                                           struct dlb2_hw_domain *domain)
2539 {
2540         int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2541         struct dlb2_list_entry *iter;
2542         struct dlb2_ldb_queue *queue;
2543         RTE_SET_USED(iter);
2544
2545         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2546                 int idx = domain_offset + queue->id.phys_id;
2547
2548                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
2549
2550                 if (queue->id.vdev_owned) {
2551                         DLB2_CSR_WR(hw,
2552                                     DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2553                                     0);
2554
2555                         idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2556                                 queue->id.virt_id;
2557
2558                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
2559
2560                         DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
2561                 }
2562         }
2563 }
2564
2565 static void
2566 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2567                                           struct dlb2_hw_domain *domain)
2568 {
2569         struct dlb2_list_entry *iter;
2570         struct dlb2_dir_pq_pair *queue;
2571         unsigned long max_ports;
2572         int domain_offset;
2573         RTE_SET_USED(iter);
2574
2575         max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
2576
2577         domain_offset = domain->id.phys_id * max_ports;
2578
2579         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2580                 int idx = domain_offset + queue->id.phys_id;
2581
2582                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
2583
2584                 if (queue->id.vdev_owned) {
2585                         idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
2586
2587                         DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
2588
2589                         DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
2590                 }
2591         }
2592 }
2593
2594 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2595                                                struct dlb2_hw_domain *domain)
2596 {
2597         struct dlb2_list_entry *iter;
2598         struct dlb2_ldb_port *port;
2599         u32 chk_en = 0;
2600         int i;
2601         RTE_SET_USED(iter);
2602
2603         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2604                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2605                         DLB2_CSR_WR(hw,
2606                                     DLB2_CHP_SN_CHK_ENBL(hw->ver,
2607                                                          port->id.phys_id),
2608                                     chk_en);
2609                 }
2610         }
2611 }
2612
2613 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2614                                                  struct dlb2_hw_domain *domain)
2615 {
2616         struct dlb2_list_entry *iter;
2617         struct dlb2_ldb_port *port;
2618         int i;
2619         RTE_SET_USED(iter);
2620
2621         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2622                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2623                         int j;
2624
2625                         for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
2626                                 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2627                                         break;
2628                         }
2629
2630                         if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2631                                 DLB2_HW_ERR(hw,
2632                                             "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2633                                             __func__, port->id.phys_id);
2634                                 return -EFAULT;
2635                         }
2636                 }
2637         }
2638
2639         return 0;
2640 }
2641
2642 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2643                                         struct dlb2_hw_domain *domain)
2644 {
2645         struct dlb2_list_entry *iter;
2646         struct dlb2_dir_pq_pair *port;
2647         RTE_SET_USED(iter);
2648
2649         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2650                 port->enabled = false;
2651
2652                 dlb2_dir_port_cq_disable(hw, port);
2653         }
2654 }
2655
2656 static void
2657 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2658                                        struct dlb2_hw_domain *domain)
2659 {
2660         struct dlb2_list_entry *iter;
2661         struct dlb2_dir_pq_pair *port;
2662         u32 pp_v = 0;
2663         RTE_SET_USED(iter);
2664
2665         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2666                 DLB2_CSR_WR(hw,
2667                             DLB2_SYS_DIR_PP_V(port->id.phys_id),
2668                             pp_v);
2669         }
2670 }
2671
2672 static void
2673 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2674                                        struct dlb2_hw_domain *domain)
2675 {
2676         struct dlb2_list_entry *iter;
2677         struct dlb2_ldb_port *port;
2678         u32 pp_v = 0;
2679         int i;
2680         RTE_SET_USED(iter);
2681
2682         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2683                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2684                         DLB2_CSR_WR(hw,
2685                                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2686                                     pp_v);
2687                 }
2688         }
2689 }
2690
2691 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2692                                             struct dlb2_hw_domain *domain)
2693 {
2694         struct dlb2_list_entry *iter;
2695         struct dlb2_dir_pq_pair *dir_port;
2696         struct dlb2_ldb_port *ldb_port;
2697         struct dlb2_ldb_queue *queue;
2698         int i;
2699         RTE_SET_USED(iter);
2700
2701         /*
2702          * Confirm that all the domain's queue's inflight counts and AQED
2703          * active counts are 0.
2704          */
2705         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2706                 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2707                         DLB2_HW_ERR(hw,
2708                                     "[%s()] Internal error: failed to empty ldb queue %d\n",
2709                                     __func__, queue->id.phys_id);
2710                         return -EFAULT;
2711                 }
2712         }
2713
2714         /* Confirm that all the domain's CQs inflight and token counts are 0. */
2715         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2716                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2717                         if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2718                             dlb2_ldb_cq_token_count(hw, ldb_port)) {
2719                                 DLB2_HW_ERR(hw,
2720                                             "[%s()] Internal error: failed to empty ldb port %d\n",
2721                                             __func__, ldb_port->id.phys_id);
2722                                 return -EFAULT;
2723                         }
2724                 }
2725         }
2726
2727         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2728                 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2729                         DLB2_HW_ERR(hw,
2730                                     "[%s()] Internal error: failed to empty dir queue %d\n",
2731                                     __func__, dir_port->id.phys_id);
2732                         return -EFAULT;
2733                 }
2734
2735                 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2736                         DLB2_HW_ERR(hw,
2737                                     "[%s()] Internal error: failed to empty dir port %d\n",
2738                                     __func__, dir_port->id.phys_id);
2739                         return -EFAULT;
2740                 }
2741         }
2742
2743         return 0;
2744 }
2745
2746 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2747                                                    struct dlb2_ldb_port *port)
2748 {
2749         DLB2_CSR_WR(hw,
2750                     DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2751                     DLB2_SYS_LDB_PP2VAS_RST);
2752
2753         DLB2_CSR_WR(hw,
2754                     DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
2755                     DLB2_CHP_LDB_CQ2VAS_RST);
2756
2757         DLB2_CSR_WR(hw,
2758                     DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2759                     DLB2_SYS_LDB_PP2VDEV_RST);
2760
2761         if (port->id.vdev_owned) {
2762                 unsigned int offs;
2763                 u32 virt_id;
2764
2765                 /*
2766                  * DLB uses producer port address bits 17:12 to determine the
2767                  * producer port ID. In Scalable IOV mode, PP accesses come
2768                  * through the PF MMIO window for the physical producer port,
2769                  * so for translation purposes the virtual and physical port
2770                  * IDs are equal.
2771                  */
2772                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2773                         virt_id = port->id.virt_id;
2774                 else
2775                         virt_id = port->id.phys_id;
2776
2777                 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2778
2779                 DLB2_CSR_WR(hw,
2780                             DLB2_SYS_VF_LDB_VPP2PP(offs),
2781                             DLB2_SYS_VF_LDB_VPP2PP_RST);
2782
2783                 DLB2_CSR_WR(hw,
2784                             DLB2_SYS_VF_LDB_VPP_V(offs),
2785                             DLB2_SYS_VF_LDB_VPP_V_RST);
2786         }
2787
2788         DLB2_CSR_WR(hw,
2789                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
2790                     DLB2_SYS_LDB_PP_V_RST);
2791
2792         DLB2_CSR_WR(hw,
2793                     DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
2794                     DLB2_LSP_CQ_LDB_DSBL_RST);
2795
2796         DLB2_CSR_WR(hw,
2797                     DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
2798                     DLB2_CHP_LDB_CQ_DEPTH_RST);
2799
2800         if (hw->ver != DLB2_HW_V2)
2801                 DLB2_CSR_WR(hw,
2802                             DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
2803                             DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
2804
2805         DLB2_CSR_WR(hw,
2806                     DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
2807                     DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2808
2809         DLB2_CSR_WR(hw,
2810                     DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
2811                     DLB2_CHP_HIST_LIST_LIM_RST);
2812
2813         DLB2_CSR_WR(hw,
2814                     DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
2815                     DLB2_CHP_HIST_LIST_BASE_RST);
2816
2817         DLB2_CSR_WR(hw,
2818                     DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
2819                     DLB2_CHP_HIST_LIST_POP_PTR_RST);
2820
2821         DLB2_CSR_WR(hw,
2822                     DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
2823                     DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2824
2825         DLB2_CSR_WR(hw,
2826                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2827                     DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2828
2829         DLB2_CSR_WR(hw,
2830                     DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2831                     DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2832
2833         DLB2_CSR_WR(hw,
2834                     DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
2835                     DLB2_CHP_LDB_CQ_INT_ENB_RST);
2836
2837         DLB2_CSR_WR(hw,
2838                     DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2839                     DLB2_SYS_LDB_CQ_ISR_RST);
2840
2841         DLB2_CSR_WR(hw,
2842                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2843                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2844
2845         DLB2_CSR_WR(hw,
2846                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2847                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2848
2849         DLB2_CSR_WR(hw,
2850                     DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
2851                     DLB2_CHP_LDB_CQ_WPTR_RST);
2852
2853         DLB2_CSR_WR(hw,
2854                     DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
2855                     DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2856
2857         DLB2_CSR_WR(hw,
2858                     DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2859                     DLB2_SYS_LDB_CQ_ADDR_L_RST);
2860
2861         DLB2_CSR_WR(hw,
2862                     DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2863                     DLB2_SYS_LDB_CQ_ADDR_U_RST);
2864
2865         if (hw->ver == DLB2_HW_V2)
2866                 DLB2_CSR_WR(hw,
2867                             DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2868                             DLB2_SYS_LDB_CQ_AT_RST);
2869
2870         DLB2_CSR_WR(hw,
2871                     DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
2872                     DLB2_SYS_LDB_CQ_PASID_RST);
2873
2874         DLB2_CSR_WR(hw,
2875                     DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2876                     DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2877
2878         DLB2_CSR_WR(hw,
2879                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
2880                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2881
2882         DLB2_CSR_WR(hw,
2883                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
2884                     DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2885
2886         DLB2_CSR_WR(hw,
2887                     DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
2888                     DLB2_LSP_CQ2QID0_RST);
2889
2890         DLB2_CSR_WR(hw,
2891                     DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
2892                     DLB2_LSP_CQ2QID1_RST);
2893
2894         DLB2_CSR_WR(hw,
2895                     DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
2896                     DLB2_LSP_CQ2PRIOV_RST);
2897 }
2898
2899 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2900                                                  struct dlb2_hw_domain *domain)
2901 {
2902         struct dlb2_list_entry *iter;
2903         struct dlb2_ldb_port *port;
2904         int i;
2905         RTE_SET_USED(iter);
2906
2907         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2908                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2909                         __dlb2_domain_reset_ldb_port_registers(hw, port);
2910         }
2911 }
2912
2913 static void
2914 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2915                                        struct dlb2_dir_pq_pair *port)
2916 {
2917         u32 reg = 0;
2918
2919         DLB2_CSR_WR(hw,
2920                     DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
2921                     DLB2_CHP_DIR_CQ2VAS_RST);
2922
2923         DLB2_CSR_WR(hw,
2924                     DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
2925                     DLB2_LSP_CQ_DIR_DSBL_RST);
2926
2927         DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
2928
2929         if (hw->ver == DLB2_HW_V2)
2930                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2931         else
2932                 DLB2_CSR_WR(hw,
2933                             DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
2934
2935         DLB2_CSR_WR(hw,
2936                     DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
2937                     DLB2_CHP_DIR_CQ_DEPTH_RST);
2938
2939         DLB2_CSR_WR(hw,
2940                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2941                     DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2942
2943         DLB2_CSR_WR(hw,
2944                     DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2945                     DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2946
2947         DLB2_CSR_WR(hw,
2948                     DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2949                     DLB2_CHP_DIR_CQ_INT_ENB_RST);
2950
2951         DLB2_CSR_WR(hw,
2952                     DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2953                     DLB2_SYS_DIR_CQ_ISR_RST);
2954
2955         DLB2_CSR_WR(hw,
2956                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
2957                                                       port->id.phys_id),
2958                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2959
2960         DLB2_CSR_WR(hw,
2961                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2962                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2963
2964         DLB2_CSR_WR(hw,
2965                     DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
2966                     DLB2_CHP_DIR_CQ_WPTR_RST);
2967
2968         DLB2_CSR_WR(hw,
2969                     DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
2970                     DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2971
2972         DLB2_CSR_WR(hw,
2973                     DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2974                     DLB2_SYS_DIR_CQ_ADDR_L_RST);
2975
2976         DLB2_CSR_WR(hw,
2977                     DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2978                     DLB2_SYS_DIR_CQ_ADDR_U_RST);
2979
2980         DLB2_CSR_WR(hw,
2981                     DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2982                     DLB2_SYS_DIR_CQ_AT_RST);
2983
2984         if (hw->ver == DLB2_HW_V2)
2985                 DLB2_CSR_WR(hw,
2986                             DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2987                             DLB2_SYS_DIR_CQ_AT_RST);
2988
2989         DLB2_CSR_WR(hw,
2990                     DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
2991                     DLB2_SYS_DIR_CQ_PASID_RST);
2992
2993         DLB2_CSR_WR(hw,
2994                     DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2995                     DLB2_SYS_DIR_CQ_FMT_RST);
2996
2997         DLB2_CSR_WR(hw,
2998                     DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2999                     DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
3000
3001         DLB2_CSR_WR(hw,
3002                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3003                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
3004
3005         DLB2_CSR_WR(hw,
3006                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3007                     DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
3008
3009         DLB2_CSR_WR(hw,
3010                     DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
3011                     DLB2_SYS_DIR_PP2VAS_RST);
3012
3013         DLB2_CSR_WR(hw,
3014                     DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3015                     DLB2_CHP_DIR_CQ2VAS_RST);
3016
3017         DLB2_CSR_WR(hw,
3018                     DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
3019                     DLB2_SYS_DIR_PP2VDEV_RST);
3020
3021         if (port->id.vdev_owned) {
3022                 unsigned int offs;
3023                 u32 virt_id;
3024
3025                 /*
3026                  * DLB uses producer port address bits 17:12 to determine the
3027                  * producer port ID. In Scalable IOV mode, PP accesses come
3028                  * through the PF MMIO window for the physical producer port,
3029                  * so for translation purposes the virtual and physical port
3030                  * IDs are equal.
3031                  */
3032                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3033                         virt_id = port->id.virt_id;
3034                 else
3035                         virt_id = port->id.phys_id;
3036
3037                 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3038                         virt_id;
3039
3040                 DLB2_CSR_WR(hw,
3041                             DLB2_SYS_VF_DIR_VPP2PP(offs),
3042                             DLB2_SYS_VF_DIR_VPP2PP_RST);
3043
3044                 DLB2_CSR_WR(hw,
3045                             DLB2_SYS_VF_DIR_VPP_V(offs),
3046                             DLB2_SYS_VF_DIR_VPP_V_RST);
3047         }
3048
3049         DLB2_CSR_WR(hw,
3050                     DLB2_SYS_DIR_PP_V(port->id.phys_id),
3051                     DLB2_SYS_DIR_PP_V_RST);
3052 }
3053
3054 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3055                                                  struct dlb2_hw_domain *domain)
3056 {
3057         struct dlb2_list_entry *iter;
3058         struct dlb2_dir_pq_pair *port;
3059         RTE_SET_USED(iter);
3060
3061         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3062                 __dlb2_domain_reset_dir_port_registers(hw, port);
3063 }
3064
3065 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
3066                                                   struct dlb2_hw_domain *domain)
3067 {
3068         struct dlb2_list_entry *iter;
3069         struct dlb2_ldb_queue *queue;
3070         RTE_SET_USED(iter);
3071
3072         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3073                 unsigned int queue_id = queue->id.phys_id;
3074                 int i;
3075
3076                 DLB2_CSR_WR(hw,
3077                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
3078                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3079
3080                 DLB2_CSR_WR(hw,
3081                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
3082                             DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3083
3084                 DLB2_CSR_WR(hw,
3085                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
3086                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3087
3088                 DLB2_CSR_WR(hw,
3089                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
3090                             DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3091
3092                 DLB2_CSR_WR(hw,
3093                             DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
3094                             DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3095
3096                 DLB2_CSR_WR(hw,
3097                             DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
3098                             DLB2_LSP_QID_LDB_INFL_LIM_RST);
3099
3100                 DLB2_CSR_WR(hw,
3101                             DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
3102                             DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3103
3104                 DLB2_CSR_WR(hw,
3105                             DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
3106                             DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3107
3108                 DLB2_CSR_WR(hw,
3109                             DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
3110                             DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3111
3112                 DLB2_CSR_WR(hw,
3113                             DLB2_SYS_LDB_QID_ITS(queue_id),
3114                             DLB2_SYS_LDB_QID_ITS_RST);
3115
3116                 DLB2_CSR_WR(hw,
3117                             DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
3118                             DLB2_CHP_ORD_QID_SN_RST);
3119
3120                 DLB2_CSR_WR(hw,
3121                             DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
3122                             DLB2_CHP_ORD_QID_SN_MAP_RST);
3123
3124                 DLB2_CSR_WR(hw,
3125                             DLB2_SYS_LDB_QID_V(queue_id),
3126                             DLB2_SYS_LDB_QID_V_RST);
3127
3128                 DLB2_CSR_WR(hw,
3129                             DLB2_SYS_LDB_QID_CFG_V(queue_id),
3130                             DLB2_SYS_LDB_QID_CFG_V_RST);
3131
3132                 if (queue->sn_cfg_valid) {
3133                         u32 offs[2];
3134
3135                         offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
3136                                                          queue->sn_slot);
3137                         offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
3138                                                          queue->sn_slot);
3139
3140                         DLB2_CSR_WR(hw,
3141                                     offs[queue->sn_group],
3142                                     DLB2_RO_GRP_0_SLT_SHFT_RST);
3143                 }
3144
3145                 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3146                         DLB2_CSR_WR(hw,
3147                                     DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
3148                                     DLB2_LSP_QID2CQIDIX_00_RST);
3149
3150                         DLB2_CSR_WR(hw,
3151                                     DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
3152                                     DLB2_LSP_QID2CQIDIX2_00_RST);
3153
3154                         DLB2_CSR_WR(hw,
3155                                     DLB2_ATM_QID2CQIDIX(queue_id, i),
3156                                     DLB2_ATM_QID2CQIDIX_00_RST);
3157                 }
3158         }
3159 }
3160
3161 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3162                                                   struct dlb2_hw_domain *domain)
3163 {
3164         struct dlb2_list_entry *iter;
3165         struct dlb2_dir_pq_pair *queue;
3166         RTE_SET_USED(iter);
3167
3168         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3169                 DLB2_CSR_WR(hw,
3170                             DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
3171                                                        queue->id.phys_id),
3172                             DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3173
3174                 DLB2_CSR_WR(hw,
3175                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
3176                                                           queue->id.phys_id),
3177                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3178
3179                 DLB2_CSR_WR(hw,
3180                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
3181                                                           queue->id.phys_id),
3182                             DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3183
3184                 DLB2_CSR_WR(hw,
3185                             DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
3186                                                          queue->id.phys_id),
3187                             DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3188
3189                 DLB2_CSR_WR(hw,
3190                             DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3191                             DLB2_SYS_DIR_QID_ITS_RST);
3192
3193                 DLB2_CSR_WR(hw,
3194                             DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3195                             DLB2_SYS_DIR_QID_V_RST);
3196         }
3197 }
3198
3199
3200
3201
3202
3203 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3204                                         struct dlb2_hw_domain *domain)
3205 {
3206         dlb2_domain_reset_ldb_port_registers(hw, domain);
3207
3208         dlb2_domain_reset_dir_port_registers(hw, domain);
3209
3210         dlb2_domain_reset_ldb_queue_registers(hw, domain);
3211
3212         dlb2_domain_reset_dir_queue_registers(hw, domain);
3213
3214         if (hw->ver == DLB2_HW_V2) {
3215                 DLB2_CSR_WR(hw,
3216                             DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3217                             DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3218
3219                 DLB2_CSR_WR(hw,
3220                             DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3221                             DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3222         } else
3223                 DLB2_CSR_WR(hw,
3224                             DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
3225                             DLB2_CHP_CFG_VAS_CRD_RST);
3226 }
3227
3228 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3229                                             struct dlb2_hw_domain *domain)
3230 {
3231         struct dlb2_dir_pq_pair *tmp_dir_port;
3232         struct dlb2_ldb_queue *tmp_ldb_queue;
3233         struct dlb2_ldb_port *tmp_ldb_port;
3234         struct dlb2_list_entry *iter1;
3235         struct dlb2_list_entry *iter2;
3236         struct dlb2_function_resources *rsrcs;
3237         struct dlb2_dir_pq_pair *dir_port;
3238         struct dlb2_ldb_queue *ldb_queue;
3239         struct dlb2_ldb_port *ldb_port;
3240         struct dlb2_list_head *list;
3241         int ret, i;
3242         RTE_SET_USED(tmp_dir_port);
3243         RTE_SET_USED(tmp_ldb_queue);
3244         RTE_SET_USED(tmp_ldb_port);
3245         RTE_SET_USED(iter1);
3246         RTE_SET_USED(iter2);
3247
3248         rsrcs = domain->parent_func;
3249
3250         /* Move the domain's ldb queues to the function's avail list */
3251         list = &domain->used_ldb_queues;
3252         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3253                 if (ldb_queue->sn_cfg_valid) {
3254                         struct dlb2_sn_group *grp;
3255
3256                         grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3257
3258                         dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3259                         ldb_queue->sn_cfg_valid = false;
3260                 }
3261
3262                 ldb_queue->owned = false;
3263                 ldb_queue->num_mappings = 0;
3264                 ldb_queue->num_pending_additions = 0;
3265
3266                 dlb2_list_del(&domain->used_ldb_queues,
3267                               &ldb_queue->domain_list);
3268                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3269                               &ldb_queue->func_list);
3270                 rsrcs->num_avail_ldb_queues++;
3271         }
3272
3273         list = &domain->avail_ldb_queues;
3274         DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3275                 ldb_queue->owned = false;
3276
3277                 dlb2_list_del(&domain->avail_ldb_queues,
3278                               &ldb_queue->domain_list);
3279                 dlb2_list_add(&rsrcs->avail_ldb_queues,
3280                               &ldb_queue->func_list);
3281                 rsrcs->num_avail_ldb_queues++;
3282         }
3283
3284         /* Move the domain's ldb ports to the function's avail list */
3285         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3286                 list = &domain->used_ldb_ports[i];
3287                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3288                                        iter1, iter2) {
3289                         int j;
3290
3291                         ldb_port->owned = false;
3292                         ldb_port->configured = false;
3293                         ldb_port->num_pending_removals = 0;
3294                         ldb_port->num_mappings = 0;
3295                         ldb_port->init_tkn_cnt = 0;
3296                         ldb_port->cq_depth = 0;
3297                         for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3298                                 ldb_port->qid_map[j].state =
3299                                         DLB2_QUEUE_UNMAPPED;
3300
3301                         dlb2_list_del(&domain->used_ldb_ports[i],
3302                                       &ldb_port->domain_list);
3303                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3304                                       &ldb_port->func_list);
3305                         rsrcs->num_avail_ldb_ports[i]++;
3306                 }
3307
3308                 list = &domain->avail_ldb_ports[i];
3309                 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3310                                        iter1, iter2) {
3311                         ldb_port->owned = false;
3312
3313                         dlb2_list_del(&domain->avail_ldb_ports[i],
3314                                       &ldb_port->domain_list);
3315                         dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3316                                       &ldb_port->func_list);
3317                         rsrcs->num_avail_ldb_ports[i]++;
3318                 }
3319         }
3320
3321         /* Move the domain's dir ports to the function's avail list */
3322         list = &domain->used_dir_pq_pairs;
3323         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3324                 dir_port->owned = false;
3325                 dir_port->port_configured = false;
3326                 dir_port->init_tkn_cnt = 0;
3327
3328                 dlb2_list_del(&domain->used_dir_pq_pairs,
3329                               &dir_port->domain_list);
3330
3331                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3332                               &dir_port->func_list);
3333                 rsrcs->num_avail_dir_pq_pairs++;
3334         }
3335
3336         list = &domain->avail_dir_pq_pairs;
3337         DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3338                 dir_port->owned = false;
3339
3340                 dlb2_list_del(&domain->avail_dir_pq_pairs,
3341                               &dir_port->domain_list);
3342
3343                 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3344                               &dir_port->func_list);
3345                 rsrcs->num_avail_dir_pq_pairs++;
3346         }
3347
3348         /* Return hist list entries to the function */
3349         ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3350                                     domain->hist_list_entry_base,
3351                                     domain->total_hist_list_entries);
3352         if (ret) {
3353                 DLB2_HW_ERR(hw,
3354                             "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3355                             __func__);
3356                 return ret;
3357         }
3358
3359         domain->total_hist_list_entries = 0;
3360         domain->avail_hist_list_entries = 0;
3361         domain->hist_list_entry_base = 0;
3362         domain->hist_list_entry_offset = 0;
3363
3364         if (hw->ver == DLB2_HW_V2_5) {
3365                 rsrcs->num_avail_entries += domain->num_credits;
3366                 domain->num_credits = 0;
3367         } else {
3368                 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3369                 domain->num_ldb_credits = 0;
3370
3371                 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3372                 domain->num_dir_credits = 0;
3373         }
3374         rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3375         rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3376         domain->num_avail_aqed_entries = 0;
3377         domain->num_used_aqed_entries = 0;
3378
3379         domain->num_pending_removals = 0;
3380         domain->num_pending_additions = 0;
3381         domain->configured = false;
3382         domain->started = false;
3383
3384         /*
3385          * Move the domain out of the used_domains list and back to the
3386          * function's avail_domains list.
3387          */
3388         dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3389         dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3390         rsrcs->num_avail_domains++;
3391
3392         return 0;
3393 }
3394
3395 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3396                                             struct dlb2_hw_domain *domain,
3397                                             struct dlb2_ldb_queue *queue)
3398 {
3399         struct dlb2_ldb_port *port = NULL;
3400         int ret, i;
3401
3402         /* If a domain has LDB queues, it must have LDB ports */
3403         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3404                 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
3405                                           typeof(*port));
3406                 if (port)
3407                         break;
3408         }
3409
3410         if (port == NULL) {
3411                 DLB2_HW_ERR(hw,
3412                             "[%s()] Internal error: No configured LDB ports\n",
3413                             __func__);
3414                 return -EFAULT;
3415         }
3416
3417         /* If necessary, free up a QID slot in this CQ */
3418         if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3419                 struct dlb2_ldb_queue *mapped_queue;
3420
3421                 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3422
3423                 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3424                 if (ret)
3425                         return ret;
3426         }
3427
3428         ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3429         if (ret)
3430                 return ret;
3431
3432         return dlb2_domain_drain_mapped_queues(hw, domain);
3433 }
3434
3435 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3436                                              struct dlb2_hw_domain *domain)
3437 {
3438         struct dlb2_list_entry *iter;
3439         struct dlb2_ldb_queue *queue;
3440         int ret;
3441         RTE_SET_USED(iter);
3442
3443         /* If the domain hasn't been started, there's no traffic to drain */
3444         if (!domain->started)
3445                 return 0;
3446
3447         /*
3448          * Pre-condition: the unattached queue must not have any outstanding
3449          * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3450          * prior to this in dlb2_domain_drain_mapped_queues().
3451          */
3452         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3453                 if (queue->num_mappings != 0 ||
3454                     dlb2_ldb_queue_is_empty(hw, queue))
3455                         continue;
3456
3457                 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3458                 if (ret)
3459                         return ret;
3460         }
3461
3462         return 0;
3463 }
3464
3465 /**
3466  * dlb2_reset_domain() - reset a scheduling domain
3467  * @hw: dlb2_hw handle for a particular device.
3468  * @domain_id: domain ID.
3469  * @vdev_req: indicates whether this request came from a vdev.
3470  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3471  *
3472  * This function resets and frees a DLB 2.0 scheduling domain and its associated
3473  * resources.
3474  *
3475  * Pre-condition: the driver must ensure software has stopped sending QEs
3476  * through this domain's producer ports before invoking this function, or
3477  * undefined behavior will result.
3478  *
3479  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3480  * device.
3481  *
3482  * Return:
3483  * Returns 0 upon success, -1 otherwise.
3484  *
3485  * EINVAL - Invalid domain ID, or the domain is not configured.
3486  * EFAULT - Internal error. (Possibly caused if software is the pre-condition
3487  *          is not met.)
3488  * ETIMEDOUT - Hardware component didn't reset in the expected time.
3489  */
3490 int dlb2_reset_domain(struct dlb2_hw *hw,
3491                       u32 domain_id,
3492                       bool vdev_req,
3493                       unsigned int vdev_id)
3494 {
3495         struct dlb2_hw_domain *domain;
3496         int ret;
3497
3498         dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3499
3500         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3501
3502         if (domain == NULL || !domain->configured)
3503                 return -EINVAL;
3504
3505         /* Disable VPPs */
3506         if (vdev_req) {
3507                 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3508
3509                 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3510         }
3511
3512         /* Disable CQ interrupts */
3513         dlb2_domain_disable_dir_port_interrupts(hw, domain);
3514
3515         dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3516
3517         /*
3518          * For each queue owned by this domain, disable its write permissions to
3519          * cause any traffic sent to it to be dropped. Well-behaved software
3520          * should not be sending QEs at this point.
3521          */
3522         dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3523
3524         dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3525
3526         /* Turn off completion tracking on all the domain's PPs. */
3527         dlb2_domain_disable_ldb_seq_checks(hw, domain);
3528
3529         /*
3530          * Disable the LDB CQs and drain them in order to complete the map and
3531          * unmap procedures, which require zero CQ inflights and zero QID
3532          * inflights respectively.
3533          */
3534         dlb2_domain_disable_ldb_cqs(hw, domain);
3535
3536         dlb2_domain_drain_ldb_cqs(hw, domain, false);
3537
3538         ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3539         if (ret)
3540                 return ret;
3541
3542         ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3543         if (ret)
3544                 return ret;
3545
3546         ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3547         if (ret)
3548                 return ret;
3549
3550         /* Re-enable the CQs in order to drain the mapped queues. */
3551         dlb2_domain_enable_ldb_cqs(hw, domain);
3552
3553         ret = dlb2_domain_drain_mapped_queues(hw, domain);
3554         if (ret)
3555                 return ret;
3556
3557         ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3558         if (ret)
3559                 return ret;
3560
3561         /* Done draining LDB QEs, so disable the CQs. */
3562         dlb2_domain_disable_ldb_cqs(hw, domain);
3563
3564         dlb2_domain_drain_dir_queues(hw, domain);
3565
3566         /* Done draining DIR QEs, so disable the CQs. */
3567         dlb2_domain_disable_dir_cqs(hw, domain);
3568
3569         /* Disable PPs */
3570         dlb2_domain_disable_dir_producer_ports(hw, domain);
3571
3572         dlb2_domain_disable_ldb_producer_ports(hw, domain);
3573
3574         ret = dlb2_domain_verify_reset_success(hw, domain);
3575         if (ret)
3576                 return ret;
3577
3578         /* Reset the QID and port state. */
3579         dlb2_domain_reset_registers(hw, domain);
3580
3581         /* Hardware reset complete. Reset the domain's software state */
3582         return dlb2_domain_reset_software_state(hw, domain);
3583 }
3584
3585 static void
3586 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3587                                u32 domain_id,
3588                                struct dlb2_create_ldb_queue_args *args,
3589                                bool vdev_req,
3590                                unsigned int vdev_id)
3591 {
3592         DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3593         if (vdev_req)
3594                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3595         DLB2_HW_DBG(hw, "\tDomain ID:                  %d\n",
3596                     domain_id);
3597         DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3598                     args->num_sequence_numbers);
3599         DLB2_HW_DBG(hw, "\tNumber of QID inflights:    %d\n",
3600                     args->num_qid_inflights);
3601         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:    %d\n",
3602                     args->num_atomic_inflights);
3603 }
3604
3605 static int
3606 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3607                                   struct dlb2_ldb_queue *queue,
3608                                   struct dlb2_create_ldb_queue_args *args)
3609 {
3610         int slot = -1;
3611         int i;
3612
3613         queue->sn_cfg_valid = false;
3614
3615         if (args->num_sequence_numbers == 0)
3616                 return 0;
3617
3618         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3619                 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3620
3621                 if (group->sequence_numbers_per_queue ==
3622                     args->num_sequence_numbers &&
3623                     !dlb2_sn_group_full(group)) {
3624                         slot = dlb2_sn_group_alloc_slot(group);
3625                         if (slot >= 0)
3626                                 break;
3627                 }
3628         }
3629
3630         if (slot == -1) {
3631                 DLB2_HW_ERR(hw,
3632                             "[%s():%d] Internal error: no sequence number slots available\n",
3633                             __func__, __LINE__);
3634                 return -EFAULT;
3635         }
3636
3637         queue->sn_cfg_valid = true;
3638         queue->sn_group = i;
3639         queue->sn_slot = slot;
3640         return 0;
3641 }
3642
3643 static int
3644 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3645                                   u32 domain_id,
3646                                   struct dlb2_create_ldb_queue_args *args,
3647                                   struct dlb2_cmd_response *resp,
3648                                   bool vdev_req,
3649                                   unsigned int vdev_id,
3650                                   struct dlb2_hw_domain **out_domain,
3651                                   struct dlb2_ldb_queue **out_queue)
3652 {
3653         struct dlb2_hw_domain *domain;
3654         struct dlb2_ldb_queue *queue;
3655         int i;
3656
3657         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3658
3659         if (!domain) {
3660                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3661                 return -EINVAL;
3662         }
3663
3664         if (!domain->configured) {
3665                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3666                 return -EINVAL;
3667         }
3668
3669         if (domain->started) {
3670                 resp->status = DLB2_ST_DOMAIN_STARTED;
3671                 return -EINVAL;
3672         }
3673
3674         queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3675         if (!queue) {
3676                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3677                 return -EINVAL;
3678         }
3679
3680         if (args->num_sequence_numbers) {
3681                 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3682                         struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3683
3684                         if (group->sequence_numbers_per_queue ==
3685                             args->num_sequence_numbers &&
3686                             !dlb2_sn_group_full(group))
3687                                 break;
3688                 }
3689
3690                 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3691                         resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3692                         return -EINVAL;
3693                 }
3694         }
3695
3696         if (args->num_qid_inflights > 4096) {
3697                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3698                 return -EINVAL;
3699         }
3700
3701         /* Inflights must be <= number of sequence numbers if ordered */
3702         if (args->num_sequence_numbers != 0 &&
3703             args->num_qid_inflights > args->num_sequence_numbers) {
3704                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3705                 return -EINVAL;
3706         }
3707
3708         if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3709                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3710                 return -EINVAL;
3711         }
3712
3713         if (args->num_atomic_inflights &&
3714             args->lock_id_comp_level != 0 &&
3715             args->lock_id_comp_level != 64 &&
3716             args->lock_id_comp_level != 128 &&
3717             args->lock_id_comp_level != 256 &&
3718             args->lock_id_comp_level != 512 &&
3719             args->lock_id_comp_level != 1024 &&
3720             args->lock_id_comp_level != 2048 &&
3721             args->lock_id_comp_level != 4096 &&
3722             args->lock_id_comp_level != 65536) {
3723                 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3724                 return -EINVAL;
3725         }
3726
3727         *out_domain = domain;
3728         *out_queue = queue;
3729
3730         return 0;
3731 }
3732
3733 static int
3734 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3735                                 struct dlb2_hw_domain *domain,
3736                                 struct dlb2_ldb_queue *queue,
3737                                 struct dlb2_create_ldb_queue_args *args)
3738 {
3739         int ret;
3740         ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3741         if (ret)
3742                 return ret;
3743
3744         /* Attach QID inflights */
3745         queue->num_qid_inflights = args->num_qid_inflights;
3746
3747         /* Attach atomic inflights */
3748         queue->aqed_limit = args->num_atomic_inflights;
3749
3750         domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3751         domain->num_used_aqed_entries += args->num_atomic_inflights;
3752
3753         return 0;
3754 }
3755
3756 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3757                                      struct dlb2_hw_domain *domain,
3758                                      struct dlb2_ldb_queue *queue,
3759                                      struct dlb2_create_ldb_queue_args *args,
3760                                      bool vdev_req,
3761                                      unsigned int vdev_id)
3762 {
3763         struct dlb2_sn_group *sn_group;
3764         unsigned int offs;
3765         u32 reg = 0;
3766         u32 alimit;
3767
3768         /* QID write permissions are turned on when the domain is started */
3769         offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.phys_id;
3770
3771         DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), reg);
3772
3773         /*
3774          * Unordered QIDs get 4K inflights, ordered get as many as the number
3775          * of sequence numbers.
3776          */
3777         DLB2_BITS_SET(reg, args->num_qid_inflights,
3778                       DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
3779         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
3780                                                   queue->id.phys_id), reg);
3781
3782         alimit = queue->aqed_limit;
3783
3784         if (alimit > DLB2_MAX_NUM_AQED_ENTRIES)
3785                 alimit = DLB2_MAX_NUM_AQED_ENTRIES;
3786
3787         reg = 0;
3788         DLB2_BITS_SET(reg, alimit, DLB2_LSP_QID_AQED_ACTIVE_LIM_LIMIT);
3789         DLB2_CSR_WR(hw,
3790                     DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver,
3791                                                  queue->id.phys_id), reg);
3792
3793         reg = 0;
3794         switch (args->lock_id_comp_level) {
3795         case 64:
3796                 DLB2_BITS_SET(reg, 1, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3797                 break;
3798         case 128:
3799                 DLB2_BITS_SET(reg, 2, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3800                 break;
3801         case 256:
3802                 DLB2_BITS_SET(reg, 3, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3803                 break;
3804         case 512:
3805                 DLB2_BITS_SET(reg, 4, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3806                 break;
3807         case 1024:
3808                 DLB2_BITS_SET(reg, 5, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3809                 break;
3810         case 2048:
3811                 DLB2_BITS_SET(reg, 6, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3812                 break;
3813         case 4096:
3814                 DLB2_BITS_SET(reg, 7, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3815                 break;
3816         default:
3817                 /* No compression by default */
3818                 break;
3819         }
3820
3821         DLB2_CSR_WR(hw, DLB2_AQED_QID_HID_WIDTH(queue->id.phys_id), reg);
3822
3823         reg = 0;
3824         /* Don't timestamp QEs that pass through this queue */
3825         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_ITS(queue->id.phys_id), reg);
3826
3827         DLB2_BITS_SET(reg, args->depth_threshold,
3828                       DLB2_LSP_QID_ATM_DEPTH_THRSH_THRESH);
3829         DLB2_CSR_WR(hw,
3830                     DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver,
3831                                                  queue->id.phys_id), reg);
3832
3833         reg = 0;
3834         DLB2_BITS_SET(reg, args->depth_threshold,
3835                       DLB2_LSP_QID_NALDB_DEPTH_THRSH_THRESH);
3836         DLB2_CSR_WR(hw,
3837                     DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue->id.phys_id),
3838                     reg);
3839
3840         /*
3841          * This register limits the number of inflight flows a queue can have
3842          * at one time.  It has an upper bound of 2048, but can be
3843          * over-subscribed. 512 is chosen so that a single queue does not use
3844          * the entire atomic storage, but can use a substantial portion if
3845          * needed.
3846          */
3847         reg = 0;
3848         DLB2_BITS_SET(reg, 512, DLB2_AQED_QID_FID_LIM_QID_FID_LIMIT);
3849         DLB2_CSR_WR(hw, DLB2_AQED_QID_FID_LIM(queue->id.phys_id), reg);
3850
3851         /* Configure SNs */
3852         reg = 0;
3853         sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3854         DLB2_BITS_SET(reg, sn_group->mode, DLB2_CHP_ORD_QID_SN_MAP_MODE);
3855         DLB2_BITS_SET(reg, queue->sn_slot, DLB2_CHP_ORD_QID_SN_MAP_SLOT);
3856         DLB2_BITS_SET(reg, sn_group->id, DLB2_CHP_ORD_QID_SN_MAP_GRP);
3857
3858         DLB2_CSR_WR(hw,
3859                     DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue->id.phys_id), reg);
3860
3861         reg = 0;
3862         DLB2_BITS_SET(reg, (args->num_sequence_numbers != 0),
3863                  DLB2_SYS_LDB_QID_CFG_V_SN_CFG_V);
3864         DLB2_BITS_SET(reg, (args->num_atomic_inflights != 0),
3865                  DLB2_SYS_LDB_QID_CFG_V_FID_CFG_V);
3866
3867         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), reg);
3868
3869         if (vdev_req) {
3870                 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3871
3872                 reg = 0;
3873                 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VQID_V_VQID_V);
3874                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), reg);
3875
3876                 reg = 0;
3877                 DLB2_BITS_SET(reg, queue->id.phys_id,
3878                               DLB2_SYS_VF_LDB_VQID2QID_QID);
3879                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), reg);
3880
3881                 reg = 0;
3882                 DLB2_BITS_SET(reg, queue->id.virt_id,
3883                               DLB2_SYS_LDB_QID2VQID_VQID);
3884                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID2VQID(queue->id.phys_id), reg);
3885         }
3886
3887         reg = 0;
3888         DLB2_BIT_SET(reg, DLB2_SYS_LDB_QID_V_QID_V);
3889         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), reg);
3890 }
3891
3892 /**
3893  * dlb2_hw_create_ldb_queue() - create a load-balanced queue
3894  * @hw: dlb2_hw handle for a particular device.
3895  * @domain_id: domain ID.
3896  * @args: queue creation arguments.
3897  * @resp: response structure.
3898  * @vdev_req: indicates whether this request came from a vdev.
3899  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3900  *
3901  * This function creates a load-balanced queue.
3902  *
3903  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3904  * device.
3905  *
3906  * Return:
3907  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
3908  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
3909  * contains the queue ID.
3910  *
3911  * resp->id contains a virtual ID if vdev_req is true.
3912  *
3913  * Errors:
3914  * EINVAL - A requested resource is unavailable, the domain is not configured,
3915  *          the domain has already been started, or the requested queue name is
3916  *          already in use.
3917  * EFAULT - Internal error (resp->status not set).
3918  */
3919 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3920                              u32 domain_id,
3921                              struct dlb2_create_ldb_queue_args *args,
3922                              struct dlb2_cmd_response *resp,
3923                              bool vdev_req,
3924                              unsigned int vdev_id)
3925 {
3926         struct dlb2_hw_domain *domain;
3927         struct dlb2_ldb_queue *queue;
3928         int ret;
3929
3930         dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3931
3932         /*
3933          * Verify that hardware resources are available before attempting to
3934          * satisfy the request. This simplifies the error unwinding code.
3935          */
3936         ret = dlb2_verify_create_ldb_queue_args(hw,
3937                                                 domain_id,
3938                                                 args,
3939                                                 resp,
3940                                                 vdev_req,
3941                                                 vdev_id,
3942                                                 &domain,
3943                                                 &queue);
3944         if (ret)
3945                 return ret;
3946
3947         ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3948
3949         if (ret) {
3950                 DLB2_HW_ERR(hw,
3951                             "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3952                             __func__, __LINE__);
3953                 return ret;
3954         }
3955
3956         dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3957
3958         queue->num_mappings = 0;
3959
3960         queue->configured = true;
3961
3962         /*
3963          * Configuration succeeded, so move the resource from the 'avail' to
3964          * the 'used' list.
3965          */
3966         dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
3967
3968         dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
3969
3970         resp->status = 0;
3971         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
3972
3973         return 0;
3974 }
3975
3976 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
3977                                        struct dlb2_hw_domain *domain,
3978                                        struct dlb2_ldb_port *port,
3979                                        bool vdev_req,
3980                                        unsigned int vdev_id)
3981 {
3982         u32 reg = 0;
3983
3984         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
3985         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
3986
3987         if (vdev_req) {
3988                 unsigned int offs;
3989                 u32 virt_id;
3990
3991                 /*
3992                  * DLB uses producer port address bits 17:12 to determine the
3993                  * producer port ID. In Scalable IOV mode, PP accesses come
3994                  * through the PF MMIO window for the physical producer port,
3995                  * so for translation purposes the virtual and physical port
3996                  * IDs are equal.
3997                  */
3998                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3999                         virt_id = port->id.virt_id;
4000                 else
4001                         virt_id = port->id.phys_id;
4002
4003                 reg = 0;
4004                 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
4005                 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4006                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
4007
4008                 reg = 0;
4009                 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
4010                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
4011
4012                 reg = 0;
4013                 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
4014                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
4015         }
4016
4017         reg = 0;
4018         DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
4019         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
4020 }
4021
4022 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4023                                       struct dlb2_hw_domain *domain,
4024                                       struct dlb2_ldb_port *port,
4025                                       uintptr_t cq_dma_base,
4026                                       struct dlb2_create_ldb_port_args *args,
4027                                       bool vdev_req,
4028                                       unsigned int vdev_id)
4029 {
4030         u32 hl_base = 0;
4031         u32 reg = 0;
4032         u32 ds = 0;
4033
4034         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4035         DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
4036         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
4037
4038         reg = cq_dma_base >> 32;
4039         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
4040
4041         /*
4042          * 'ro' == relaxed ordering. This setting allows DLB2 to write
4043          * cache lines out-of-order (but QEs within a cache line are always
4044          * updated in-order).
4045          */
4046         reg = 0;
4047         DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
4048         DLB2_BITS_SET(reg,
4049                  !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4050                  DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
4051         DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
4052
4053         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
4054
4055         port->cq_depth = args->cq_depth;
4056
4057         if (args->cq_depth <= 8) {
4058                 ds = 1;
4059         } else if (args->cq_depth == 16) {
4060                 ds = 2;
4061         } else if (args->cq_depth == 32) {
4062                 ds = 3;
4063         } else if (args->cq_depth == 64) {
4064                 ds = 4;
4065         } else if (args->cq_depth == 128) {
4066                 ds = 5;
4067         } else if (args->cq_depth == 256) {
4068                 ds = 6;
4069         } else if (args->cq_depth == 512) {
4070                 ds = 7;
4071         } else if (args->cq_depth == 1024) {
4072                 ds = 8;
4073         } else {
4074                 DLB2_HW_ERR(hw,
4075                             "[%s():%d] Internal error: invalid CQ depth\n",
4076                             __func__, __LINE__);
4077                 return -EFAULT;
4078         }
4079
4080         reg = 0;
4081         DLB2_BITS_SET(reg, ds,
4082                       DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4083         DLB2_CSR_WR(hw,
4084                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4085                     reg);
4086
4087         /*
4088          * To support CQs with depth less than 8, program the token count
4089          * register with a non-zero initial value. Operations such as domain
4090          * reset must take this initial value into account when quiescing the
4091          * CQ.
4092          */
4093         port->init_tkn_cnt = 0;
4094
4095         if (args->cq_depth < 8) {
4096                 reg = 0;
4097                 port->init_tkn_cnt = 8 - args->cq_depth;
4098
4099                 DLB2_BITS_SET(reg,
4100                               port->init_tkn_cnt,
4101                               DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
4102                 DLB2_CSR_WR(hw,
4103                             DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4104                             reg);
4105         } else {
4106                 DLB2_CSR_WR(hw,
4107                             DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4108                             DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4109         }
4110
4111         reg = 0;
4112         DLB2_BITS_SET(reg, ds,
4113                       DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
4114         DLB2_CSR_WR(hw,
4115                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4116                     reg);
4117
4118         /* Reset the CQ write pointer */
4119         DLB2_CSR_WR(hw,
4120                     DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
4121                     DLB2_CHP_LDB_CQ_WPTR_RST);
4122
4123         reg = 0;
4124         DLB2_BITS_SET(reg,
4125                       port->hist_list_entry_limit - 1,
4126                       DLB2_CHP_HIST_LIST_LIM_LIMIT);
4127         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
4128
4129         DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
4130                       DLB2_CHP_HIST_LIST_BASE_BASE);
4131         DLB2_CSR_WR(hw,
4132                     DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
4133                     hl_base);
4134
4135         /*
4136          * The inflight limit sets a cap on the number of QEs for which this CQ
4137          * can owe completions at one time.
4138          */
4139         reg = 0;
4140         DLB2_BITS_SET(reg, args->cq_history_list_size,
4141                       DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
4142         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
4143                     reg);
4144
4145         reg = 0;
4146         DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4147                       DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
4148         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
4149                     reg);
4150
4151         reg = 0;
4152         DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4153                       DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
4154         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
4155                     reg);
4156
4157         /*
4158          * Address translation (AT) settings: 0: untranslated, 2: translated
4159          * (see ATS spec regarding Address Type field for more details)
4160          */
4161
4162         if (hw->ver == DLB2_HW_V2) {
4163                 reg = 0;
4164                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
4165         }
4166
4167         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4168                 reg = 0;
4169                 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4170                               DLB2_SYS_LDB_CQ_PASID_PASID);
4171                 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
4172         }
4173
4174         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
4175
4176         reg = 0;
4177         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
4178         DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
4179
4180         /* Disable the port's QID mappings */
4181         reg = 0;
4182         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
4183
4184         return 0;
4185 }
4186
4187 static bool
4188 dlb2_cq_depth_is_valid(u32 depth)
4189 {
4190         if (depth != 1 && depth != 2 &&
4191             depth != 4 && depth != 8 &&
4192             depth != 16 && depth != 32 &&
4193             depth != 64 && depth != 128 &&
4194             depth != 256 && depth != 512 &&
4195             depth != 1024)
4196                 return false;
4197
4198         return true;
4199 }
4200
4201 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4202                                    struct dlb2_hw_domain *domain,
4203                                    struct dlb2_ldb_port *port,
4204                                    uintptr_t cq_dma_base,
4205                                    struct dlb2_create_ldb_port_args *args,
4206                                    bool vdev_req,
4207                                    unsigned int vdev_id)
4208 {
4209         int ret, i;
4210
4211         port->hist_list_entry_base = domain->hist_list_entry_base +
4212                                      domain->hist_list_entry_offset;
4213         port->hist_list_entry_limit = port->hist_list_entry_base +
4214                                       args->cq_history_list_size;
4215
4216         domain->hist_list_entry_offset += args->cq_history_list_size;
4217         domain->avail_hist_list_entries -= args->cq_history_list_size;
4218
4219         ret = dlb2_ldb_port_configure_cq(hw,
4220                                          domain,
4221                                          port,
4222                                          cq_dma_base,
4223                                          args,
4224                                          vdev_req,
4225                                          vdev_id);
4226         if (ret)
4227                 return ret;
4228
4229         dlb2_ldb_port_configure_pp(hw,
4230                                    domain,
4231                                    port,
4232                                    vdev_req,
4233                                    vdev_id);
4234
4235         dlb2_ldb_port_cq_enable(hw, port);
4236
4237         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4238                 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4239         port->num_mappings = 0;
4240
4241         port->enabled = true;
4242
4243         port->configured = true;
4244
4245         return 0;
4246 }
4247
4248 static void
4249 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4250                               u32 domain_id,
4251                               uintptr_t cq_dma_base,
4252                               struct dlb2_create_ldb_port_args *args,
4253                               bool vdev_req,
4254                               unsigned int vdev_id)
4255 {
4256         DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4257         if (vdev_req)
4258                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4259         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4260                     domain_id);
4261         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4262                     args->cq_depth);
4263         DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
4264                     args->cq_history_list_size);
4265         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4266                     cq_dma_base);
4267         DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
4268         DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
4269                     args->cos_strict);
4270 }
4271
4272 static int
4273 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4274                                  u32 domain_id,
4275                                  uintptr_t cq_dma_base,
4276                                  struct dlb2_create_ldb_port_args *args,
4277                                  struct dlb2_cmd_response *resp,
4278                                  bool vdev_req,
4279                                  unsigned int vdev_id,
4280                                  struct dlb2_hw_domain **out_domain,
4281                                  struct dlb2_ldb_port **out_port,
4282                                  int *out_cos_id)
4283 {
4284         struct dlb2_hw_domain *domain;
4285         struct dlb2_ldb_port *port;
4286         int i, id;
4287
4288         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4289
4290         if (!domain) {
4291                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4292                 return -EINVAL;
4293         }
4294
4295         if (!domain->configured) {
4296                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4297                 return -EINVAL;
4298         }
4299
4300         if (domain->started) {
4301                 resp->status = DLB2_ST_DOMAIN_STARTED;
4302                 return -EINVAL;
4303         }
4304
4305         if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4306                 resp->status = DLB2_ST_INVALID_COS_ID;
4307                 return -EINVAL;
4308         }
4309
4310         if (args->cos_strict) {
4311                 id = args->cos_id;
4312                 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4313                                           typeof(*port));
4314         } else {
4315                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4316                         id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4317
4318                         port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4319                                                   typeof(*port));
4320                         if (port)
4321                                 break;
4322                 }
4323         }
4324
4325         if (!port) {
4326                 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4327                 return -EINVAL;
4328         }
4329
4330         /* Check cache-line alignment */
4331         if ((cq_dma_base & 0x3F) != 0) {
4332                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4333                 return -EINVAL;
4334         }
4335
4336         if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4337                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4338                 return -EINVAL;
4339         }
4340
4341         /* The history list size must be >= 1 */
4342         if (!args->cq_history_list_size) {
4343                 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4344                 return -EINVAL;
4345         }
4346
4347         if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4348                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4349                 return -EINVAL;
4350         }
4351
4352         *out_domain = domain;
4353         *out_port = port;
4354         *out_cos_id = id;
4355
4356         return 0;
4357 }
4358
4359 /**
4360  * dlb2_hw_create_ldb_port() - create a load-balanced port
4361  * @hw: dlb2_hw handle for a particular device.
4362  * @domain_id: domain ID.
4363  * @args: port creation arguments.
4364  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4365  * @resp: response structure.
4366  * @vdev_req: indicates whether this request came from a vdev.
4367  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4368  *
4369  * This function creates a load-balanced port.
4370  *
4371  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4372  * device.
4373  *
4374  * Return:
4375  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4376  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4377  * contains the port ID.
4378  *
4379  * resp->id contains a virtual ID if vdev_req is true.
4380  *
4381  * Errors:
4382  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4383  *          pointer address is not properly aligned, the domain is not
4384  *          configured, or the domain has already been started.
4385  * EFAULT - Internal error (resp->status not set).
4386  */
4387 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4388                             u32 domain_id,
4389                             struct dlb2_create_ldb_port_args *args,
4390                             uintptr_t cq_dma_base,
4391                             struct dlb2_cmd_response *resp,
4392                             bool vdev_req,
4393                             unsigned int vdev_id)
4394 {
4395         struct dlb2_hw_domain *domain;
4396         struct dlb2_ldb_port *port;
4397         int ret, cos_id;
4398
4399         dlb2_log_create_ldb_port_args(hw,
4400                                       domain_id,
4401                                       cq_dma_base,
4402                                       args,
4403                                       vdev_req,
4404                                       vdev_id);
4405
4406         /*
4407          * Verify that hardware resources are available before attempting to
4408          * satisfy the request. This simplifies the error unwinding code.
4409          */
4410         ret = dlb2_verify_create_ldb_port_args(hw,
4411                                                domain_id,
4412                                                cq_dma_base,
4413                                                args,
4414                                                resp,
4415                                                vdev_req,
4416                                                vdev_id,
4417                                                &domain,
4418                                                &port,
4419                                                &cos_id);
4420         if (ret)
4421                 return ret;
4422
4423         ret = dlb2_configure_ldb_port(hw,
4424                                       domain,
4425                                       port,
4426                                       cq_dma_base,
4427                                       args,
4428                                       vdev_req,
4429                                       vdev_id);
4430         if (ret)
4431                 return ret;
4432
4433         /*
4434          * Configuration succeeded, so move the resource from the 'avail' to
4435          * the 'used' list.
4436          */
4437         dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4438
4439         dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4440
4441         resp->status = 0;
4442         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4443
4444         return 0;
4445 }
4446
4447 static void
4448 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4449                               u32 domain_id,
4450                               uintptr_t cq_dma_base,
4451                               struct dlb2_create_dir_port_args *args,
4452                               bool vdev_req,
4453                               unsigned int vdev_id)
4454 {
4455         DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4456         if (vdev_req)
4457                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4458         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4459                     domain_id);
4460         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4461                     args->cq_depth);
4462         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4463                     cq_dma_base);
4464 }
4465
4466 static struct dlb2_dir_pq_pair *
4467 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
4468                             u32 id,
4469                             bool vdev_req,
4470                             struct dlb2_hw_domain *domain)
4471 {
4472         struct dlb2_list_entry *iter;
4473         struct dlb2_dir_pq_pair *port;
4474         RTE_SET_USED(iter);
4475
4476         if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
4477                 return NULL;
4478
4479         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
4480                 if ((!vdev_req && port->id.phys_id == id) ||
4481                     (vdev_req && port->id.virt_id == id))
4482                         return port;
4483         }
4484
4485         return NULL;
4486 }
4487
4488 static int
4489 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4490                                  u32 domain_id,
4491                                  uintptr_t cq_dma_base,
4492                                  struct dlb2_create_dir_port_args *args,
4493                                  struct dlb2_cmd_response *resp,
4494                                  bool vdev_req,
4495                                  unsigned int vdev_id,
4496                                  struct dlb2_hw_domain **out_domain,
4497                                  struct dlb2_dir_pq_pair **out_port)
4498 {
4499         struct dlb2_hw_domain *domain;
4500         struct dlb2_dir_pq_pair *pq;
4501
4502         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4503
4504         if (!domain) {
4505                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4506                 return -EINVAL;
4507         }
4508
4509         if (!domain->configured) {
4510                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4511                 return -EINVAL;
4512         }
4513
4514         if (domain->started) {
4515                 resp->status = DLB2_ST_DOMAIN_STARTED;
4516                 return -EINVAL;
4517         }
4518
4519         if (args->queue_id != -1) {
4520                 /*
4521                  * If the user claims the queue is already configured, validate
4522                  * the queue ID, its domain, and whether the queue is
4523                  * configured.
4524                  */
4525                 pq = dlb2_get_domain_used_dir_pq(hw,
4526                                                  args->queue_id,
4527                                                  vdev_req,
4528                                                  domain);
4529
4530                 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4531                     !pq->queue_configured) {
4532                         resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4533                         return -EINVAL;
4534                 }
4535         } else {
4536                 /*
4537                  * If the port's queue is not configured, validate that a free
4538                  * port-queue pair is available.
4539                  */
4540                 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4541                                         typeof(*pq));
4542                 if (!pq) {
4543                         resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4544                         return -EINVAL;
4545                 }
4546         }
4547
4548         /* Check cache-line alignment */
4549         if ((cq_dma_base & 0x3F) != 0) {
4550                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4551                 return -EINVAL;
4552         }
4553
4554         if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4555                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4556                 return -EINVAL;
4557         }
4558
4559         *out_domain = domain;
4560         *out_port = pq;
4561
4562         return 0;
4563 }
4564
4565 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4566                                        struct dlb2_hw_domain *domain,
4567                                        struct dlb2_dir_pq_pair *port,
4568                                        bool vdev_req,
4569                                        unsigned int vdev_id)
4570 {
4571         u32 reg = 0;
4572
4573         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_DIR_PP2VAS_VAS);
4574         DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), reg);
4575
4576         if (vdev_req) {
4577                 unsigned int offs;
4578                 u32 virt_id;
4579
4580                 /*
4581                  * DLB uses producer port address bits 17:12 to determine the
4582                  * producer port ID. In Scalable IOV mode, PP accesses come
4583                  * through the PF MMIO window for the physical producer port,
4584                  * so for translation purposes the virtual and physical port
4585                  * IDs are equal.
4586                  */
4587                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4588                         virt_id = port->id.virt_id;
4589                 else
4590                         virt_id = port->id.phys_id;
4591
4592                 reg = 0;
4593                 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_DIR_VPP2PP_PP);
4594                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
4595                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), reg);
4596
4597                 reg = 0;
4598                 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_PP2VDEV_VDEV);
4599                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VDEV(port->id.phys_id), reg);
4600
4601                 reg = 0;
4602                 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VPP_V_VPP_V);
4603                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), reg);
4604         }
4605
4606         reg = 0;
4607         DLB2_BIT_SET(reg, DLB2_SYS_DIR_PP_V_PP_V);
4608         DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP_V(port->id.phys_id), reg);
4609 }
4610
4611 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4612                                       struct dlb2_hw_domain *domain,
4613                                       struct dlb2_dir_pq_pair *port,
4614                                       uintptr_t cq_dma_base,
4615                                       struct dlb2_create_dir_port_args *args,
4616                                       bool vdev_req,
4617                                       unsigned int vdev_id)
4618 {
4619         u32 reg = 0;
4620         u32 ds = 0;
4621
4622         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4623         DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_DIR_CQ_ADDR_L_ADDR_L);
4624         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), reg);
4625
4626         reg = cq_dma_base >> 32;
4627         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), reg);
4628
4629         /*
4630          * 'ro' == relaxed ordering. This setting allows DLB2 to write
4631          * cache lines out-of-order (but QEs within a cache line are always
4632          * updated in-order).
4633          */
4634         reg = 0;
4635         DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_CQ2VF_PF_RO_VF);
4636         DLB2_BITS_SET(reg, !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4637                  DLB2_SYS_DIR_CQ2VF_PF_RO_IS_PF);
4638         DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ2VF_PF_RO_RO);
4639
4640         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), reg);
4641
4642         if (args->cq_depth <= 8) {
4643                 ds = 1;
4644         } else if (args->cq_depth == 16) {
4645                 ds = 2;
4646         } else if (args->cq_depth == 32) {
4647                 ds = 3;
4648         } else if (args->cq_depth == 64) {
4649                 ds = 4;
4650         } else if (args->cq_depth == 128) {
4651                 ds = 5;
4652         } else if (args->cq_depth == 256) {
4653                 ds = 6;
4654         } else if (args->cq_depth == 512) {
4655                 ds = 7;
4656         } else if (args->cq_depth == 1024) {
4657                 ds = 8;
4658         } else {
4659                 DLB2_HW_ERR(hw,
4660                             "[%s():%d] Internal error: invalid CQ depth\n",
4661                             __func__, __LINE__);
4662                 return -EFAULT;
4663         }
4664
4665         reg = 0;
4666         DLB2_BITS_SET(reg, ds,
4667                       DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4668         DLB2_CSR_WR(hw,
4669                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4670                     reg);
4671
4672         /*
4673          * To support CQs with depth less than 8, program the token count
4674          * register with a non-zero initial value. Operations such as domain
4675          * reset must take this initial value into account when quiescing the
4676          * CQ.
4677          */
4678         port->init_tkn_cnt = 0;
4679
4680         if (args->cq_depth < 8) {
4681                 reg = 0;
4682                 port->init_tkn_cnt = 8 - args->cq_depth;
4683
4684                 DLB2_BITS_SET(reg, port->init_tkn_cnt,
4685                               DLB2_LSP_CQ_DIR_TKN_CNT_COUNT);
4686                 DLB2_CSR_WR(hw,
4687                             DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4688                             reg);
4689         } else {
4690                 DLB2_CSR_WR(hw,
4691                             DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4692                             DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4693         }
4694
4695         reg = 0;
4696         DLB2_BITS_SET(reg, ds,
4697                       DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_TOKEN_DEPTH_SELECT_V2);
4698         DLB2_CSR_WR(hw,
4699                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
4700                                                       port->id.phys_id),
4701                     reg);
4702
4703         /* Reset the CQ write pointer */
4704         DLB2_CSR_WR(hw,
4705                     DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
4706                     DLB2_CHP_DIR_CQ_WPTR_RST);
4707
4708         /* Virtualize the PPID */
4709         reg = 0;
4710         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), reg);
4711
4712         /*
4713          * Address translation (AT) settings: 0: untranslated, 2: translated
4714          * (see ATS spec regarding Address Type field for more details)
4715          */
4716         if (hw->ver == DLB2_HW_V2) {
4717                 reg = 0;
4718                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), reg);
4719         }
4720
4721         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4722                 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4723                               DLB2_SYS_DIR_CQ_PASID_PASID);
4724                 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ_PASID_FMT2);
4725         }
4726
4727         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id), reg);
4728
4729         reg = 0;
4730         DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_DIR_CQ2VAS_CQ2VAS);
4731         DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id), reg);
4732
4733         return 0;
4734 }
4735
4736 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
4737                                    struct dlb2_hw_domain *domain,
4738                                    struct dlb2_dir_pq_pair *port,
4739                                    uintptr_t cq_dma_base,
4740                                    struct dlb2_create_dir_port_args *args,
4741                                    bool vdev_req,
4742                                    unsigned int vdev_id)
4743 {
4744         int ret;
4745
4746         ret = dlb2_dir_port_configure_cq(hw,
4747                                          domain,
4748                                          port,
4749                                          cq_dma_base,
4750                                          args,
4751                                          vdev_req,
4752                                          vdev_id);
4753
4754         if (ret)
4755                 return ret;
4756
4757         dlb2_dir_port_configure_pp(hw,
4758                                    domain,
4759                                    port,
4760                                    vdev_req,
4761                                    vdev_id);
4762
4763         dlb2_dir_port_cq_enable(hw, port);
4764
4765         port->enabled = true;
4766
4767         port->port_configured = true;
4768
4769         return 0;
4770 }
4771
4772 /**
4773  * dlb2_hw_create_dir_port() - create a directed port
4774  * @hw: dlb2_hw handle for a particular device.
4775  * @domain_id: domain ID.
4776  * @args: port creation arguments.
4777  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4778  * @resp: response structure.
4779  * @vdev_req: indicates whether this request came from a vdev.
4780  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4781  *
4782  * This function creates a directed port.
4783  *
4784  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4785  * device.
4786  *
4787  * Return:
4788  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4789  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4790  * contains the port ID.
4791  *
4792  * resp->id contains a virtual ID if vdev_req is true.
4793  *
4794  * Errors:
4795  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4796  *          pointer address is not properly aligned, the domain is not
4797  *          configured, or the domain has already been started.
4798  * EFAULT - Internal error (resp->status not set).
4799  */
4800 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
4801                             u32 domain_id,
4802                             struct dlb2_create_dir_port_args *args,
4803                             uintptr_t cq_dma_base,
4804                             struct dlb2_cmd_response *resp,
4805                             bool vdev_req,
4806                             unsigned int vdev_id)
4807 {
4808         struct dlb2_dir_pq_pair *port;
4809         struct dlb2_hw_domain *domain;
4810         int ret;
4811
4812         dlb2_log_create_dir_port_args(hw,
4813                                       domain_id,
4814                                       cq_dma_base,
4815                                       args,
4816                                       vdev_req,
4817                                       vdev_id);
4818
4819         /*
4820          * Verify that hardware resources are available before attempting to
4821          * satisfy the request. This simplifies the error unwinding code.
4822          */
4823         ret = dlb2_verify_create_dir_port_args(hw,
4824                                                domain_id,
4825                                                cq_dma_base,
4826                                                args,
4827                                                resp,
4828                                                vdev_req,
4829                                                vdev_id,
4830                                                &domain,
4831                                                &port);
4832         if (ret)
4833                 return ret;
4834
4835         ret = dlb2_configure_dir_port(hw,
4836                                       domain,
4837                                       port,
4838                                       cq_dma_base,
4839                                       args,
4840                                       vdev_req,
4841                                       vdev_id);
4842         if (ret)
4843                 return ret;
4844
4845         /*
4846          * Configuration succeeded, so move the resource from the 'avail' to
4847          * the 'used' list (if it's not already there).
4848          */
4849         if (args->queue_id == -1) {
4850                 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
4851
4852                 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
4853         }
4854
4855         resp->status = 0;
4856         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4857
4858         return 0;
4859 }
4860
4861 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
4862                                      struct dlb2_hw_domain *domain,
4863                                      struct dlb2_dir_pq_pair *queue,
4864                                      struct dlb2_create_dir_queue_args *args,
4865                                      bool vdev_req,
4866                                      unsigned int vdev_id)
4867 {
4868         unsigned int offs;
4869         u32 reg = 0;
4870
4871         /* QID write permissions are turned on when the domain is started */
4872         offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4873                 queue->id.phys_id;
4874
4875         DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), reg);
4876
4877         /* Don't timestamp QEs that pass through this queue */
4878         DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_ITS(queue->id.phys_id), reg);
4879
4880         reg = 0;
4881         DLB2_BITS_SET(reg, args->depth_threshold,
4882                       DLB2_LSP_QID_DIR_DEPTH_THRSH_THRESH);
4883         DLB2_CSR_WR(hw,
4884                     DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver, queue->id.phys_id),
4885                     reg);
4886
4887         if (vdev_req) {
4888                 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4889                         queue->id.virt_id;
4890
4891                 reg = 0;
4892                 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VQID_V_VQID_V);
4893                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), reg);
4894
4895                 reg = 0;
4896                 DLB2_BITS_SET(reg, queue->id.phys_id,
4897                               DLB2_SYS_VF_DIR_VQID2QID_QID);
4898                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), reg);
4899         }
4900
4901         reg = 0;
4902         DLB2_BIT_SET(reg, DLB2_SYS_DIR_QID_V_QID_V);
4903         DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), reg);
4904
4905         queue->queue_configured = true;
4906 }
4907
4908 static void
4909 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
4910                                u32 domain_id,
4911                                struct dlb2_create_dir_queue_args *args,
4912                                bool vdev_req,
4913                                unsigned int vdev_id)
4914 {
4915         DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
4916         if (vdev_req)
4917                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4918         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
4919         DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
4920 }
4921
4922 static int
4923 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
4924                                   u32 domain_id,
4925                                   struct dlb2_create_dir_queue_args *args,
4926                                   struct dlb2_cmd_response *resp,
4927                                   bool vdev_req,
4928                                   unsigned int vdev_id,
4929                                   struct dlb2_hw_domain **out_domain,
4930                                   struct dlb2_dir_pq_pair **out_queue)
4931 {
4932         struct dlb2_hw_domain *domain;
4933         struct dlb2_dir_pq_pair *pq;
4934
4935         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4936
4937         if (!domain) {
4938                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4939                 return -EINVAL;
4940         }
4941
4942         if (!domain->configured) {
4943                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4944                 return -EINVAL;
4945         }
4946
4947         if (domain->started) {
4948                 resp->status = DLB2_ST_DOMAIN_STARTED;
4949                 return -EINVAL;
4950         }
4951
4952         /*
4953          * If the user claims the port is already configured, validate the port
4954          * ID, its domain, and whether the port is configured.
4955          */
4956         if (args->port_id != -1) {
4957                 pq = dlb2_get_domain_used_dir_pq(hw,
4958                                                  args->port_id,
4959                                                  vdev_req,
4960                                                  domain);
4961
4962                 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4963                     !pq->port_configured) {
4964                         resp->status = DLB2_ST_INVALID_PORT_ID;
4965                         return -EINVAL;
4966                 }
4967         } else {
4968                 /*
4969                  * If the queue's port is not configured, validate that a free
4970                  * port-queue pair is available.
4971                  */
4972                 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4973                                         typeof(*pq));
4974                 if (!pq) {
4975                         resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
4976                         return -EINVAL;
4977                 }
4978         }
4979
4980         *out_domain = domain;
4981         *out_queue = pq;
4982
4983         return 0;
4984 }
4985
4986 /**
4987  * dlb2_hw_create_dir_queue() - create a directed queue
4988  * @hw: dlb2_hw handle for a particular device.
4989  * @domain_id: domain ID.
4990  * @args: queue creation arguments.
4991  * @resp: response structure.
4992  * @vdev_req: indicates whether this request came from a vdev.
4993  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4994  *
4995  * This function creates a directed queue.
4996  *
4997  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4998  * device.
4999  *
5000  * Return:
5001  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5002  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5003  * contains the queue ID.
5004  *
5005  * resp->id contains a virtual ID if vdev_req is true.
5006  *
5007  * Errors:
5008  * EINVAL - A requested resource is unavailable, the domain is not configured,
5009  *          or the domain has already been started.
5010  * EFAULT - Internal error (resp->status not set).
5011  */
5012 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5013                              u32 domain_id,
5014                              struct dlb2_create_dir_queue_args *args,
5015                              struct dlb2_cmd_response *resp,
5016                              bool vdev_req,
5017                              unsigned int vdev_id)
5018 {
5019         struct dlb2_dir_pq_pair *queue;
5020         struct dlb2_hw_domain *domain;
5021         int ret;
5022
5023         dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5024
5025         /*
5026          * Verify that hardware resources are available before attempting to
5027          * satisfy the request. This simplifies the error unwinding code.
5028          */
5029         ret = dlb2_verify_create_dir_queue_args(hw,
5030                                                 domain_id,
5031                                                 args,
5032                                                 resp,
5033                                                 vdev_req,
5034                                                 vdev_id,
5035                                                 &domain,
5036                                                 &queue);
5037         if (ret)
5038                 return ret;
5039
5040         dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5041
5042         /*
5043          * Configuration succeeded, so move the resource from the 'avail' to
5044          * the 'used' list (if it's not already there).
5045          */
5046         if (args->port_id == -1) {
5047                 dlb2_list_del(&domain->avail_dir_pq_pairs,
5048                               &queue->domain_list);
5049
5050                 dlb2_list_add(&domain->used_dir_pq_pairs,
5051                               &queue->domain_list);
5052         }
5053
5054         resp->status = 0;
5055
5056         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5057
5058         return 0;
5059 }
5060
5061 static bool
5062 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5063                                            struct dlb2_ldb_queue *queue,
5064                                            int *slot)
5065 {
5066         int i;
5067
5068         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5069                 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5070
5071                 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5072                     map->pending_qid == queue->id.phys_id)
5073                         break;
5074         }
5075
5076         *slot = i;
5077
5078         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5079 }
5080
5081 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5082                                               struct dlb2_ldb_queue *queue,
5083                                               struct dlb2_cmd_response *resp)
5084 {
5085         enum dlb2_qid_map_state state;
5086         int i;
5087
5088         /* Unused slot available? */
5089         if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5090                 return 0;
5091
5092         /*
5093          * If the queue is already mapped (from the application's perspective),
5094          * this is simply a priority update.
5095          */
5096         state = DLB2_QUEUE_MAPPED;
5097         if (dlb2_port_find_slot_queue(port, state, queue, &i))
5098                 return 0;
5099
5100         state = DLB2_QUEUE_MAP_IN_PROG;
5101         if (dlb2_port_find_slot_queue(port, state, queue, &i))
5102                 return 0;
5103
5104         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5105                 return 0;
5106
5107         /*
5108          * If the slot contains an unmap in progress, it's considered
5109          * available.
5110          */
5111         state = DLB2_QUEUE_UNMAP_IN_PROG;
5112         if (dlb2_port_find_slot(port, state, &i))
5113                 return 0;
5114
5115         state = DLB2_QUEUE_UNMAPPED;
5116         if (dlb2_port_find_slot(port, state, &i))
5117                 return 0;
5118
5119         resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5120         return -EINVAL;
5121 }
5122
5123 static struct dlb2_ldb_queue *
5124 dlb2_get_domain_ldb_queue(u32 id,
5125                           bool vdev_req,
5126                           struct dlb2_hw_domain *domain)
5127 {
5128         struct dlb2_list_entry *iter;
5129         struct dlb2_ldb_queue *queue;
5130         RTE_SET_USED(iter);
5131
5132         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5133                 return NULL;
5134
5135         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
5136                 if ((!vdev_req && queue->id.phys_id == id) ||
5137                     (vdev_req && queue->id.virt_id == id))
5138                         return queue;
5139         }
5140
5141         return NULL;
5142 }
5143
5144 static struct dlb2_ldb_port *
5145 dlb2_get_domain_used_ldb_port(u32 id,
5146                               bool vdev_req,
5147                               struct dlb2_hw_domain *domain)
5148 {
5149         struct dlb2_list_entry *iter;
5150         struct dlb2_ldb_port *port;
5151         int i;
5152         RTE_SET_USED(iter);
5153
5154         if (id >= DLB2_MAX_NUM_LDB_PORTS)
5155                 return NULL;
5156
5157         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5158                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
5159                         if ((!vdev_req && port->id.phys_id == id) ||
5160                             (vdev_req && port->id.virt_id == id))
5161                                 return port;
5162                 }
5163
5164                 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter) {
5165                         if ((!vdev_req && port->id.phys_id == id) ||
5166                             (vdev_req && port->id.virt_id == id))
5167                                 return port;
5168                 }
5169         }
5170
5171         return NULL;
5172 }
5173
5174 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5175                                               struct dlb2_ldb_port *port,
5176                                               int slot,
5177                                               struct dlb2_map_qid_args *args)
5178 {
5179         u32 cq2priov;
5180
5181         /* Read-modify-write the priority and valid bit register */
5182         cq2priov = DLB2_CSR_RD(hw,
5183                                DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id));
5184
5185         cq2priov |= (1 << (slot + DLB2_LSP_CQ2PRIOV_V_LOC)) &
5186                     DLB2_LSP_CQ2PRIOV_V;
5187         cq2priov |= ((args->priority & 0x7) << slot * 3) &
5188                     DLB2_LSP_CQ2PRIOV_PRIO;
5189
5190         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), cq2priov);
5191
5192         dlb2_flush_csr(hw);
5193
5194         port->qid_map[slot].priority = args->priority;
5195 }
5196
5197 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5198                                     u32 domain_id,
5199                                     struct dlb2_map_qid_args *args,
5200                                     struct dlb2_cmd_response *resp,
5201                                     bool vdev_req,
5202                                     unsigned int vdev_id,
5203                                     struct dlb2_hw_domain **out_domain,
5204                                     struct dlb2_ldb_port **out_port,
5205                                     struct dlb2_ldb_queue **out_queue)
5206 {
5207         struct dlb2_hw_domain *domain;
5208         struct dlb2_ldb_queue *queue;
5209         struct dlb2_ldb_port *port;
5210         int id;
5211
5212         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5213
5214         if (!domain) {
5215                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5216                 return -EINVAL;
5217         }
5218
5219         if (!domain->configured) {
5220                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5221                 return -EINVAL;
5222         }
5223
5224         id = args->port_id;
5225
5226         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5227
5228         if (!port || !port->configured) {
5229                 resp->status = DLB2_ST_INVALID_PORT_ID;
5230                 return -EINVAL;
5231         }
5232
5233         if (args->priority >= DLB2_QID_PRIORITIES) {
5234                 resp->status = DLB2_ST_INVALID_PRIORITY;
5235                 return -EINVAL;
5236         }
5237
5238         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5239
5240         if (!queue || !queue->configured) {
5241                 resp->status = DLB2_ST_INVALID_QID;
5242                 return -EINVAL;
5243         }
5244
5245         if (queue->domain_id.phys_id != domain->id.phys_id) {
5246                 resp->status = DLB2_ST_INVALID_QID;
5247                 return -EINVAL;
5248         }
5249
5250         if (port->domain_id.phys_id != domain->id.phys_id) {
5251                 resp->status = DLB2_ST_INVALID_PORT_ID;
5252                 return -EINVAL;
5253         }
5254
5255         *out_domain = domain;
5256         *out_queue = queue;
5257         *out_port = port;
5258
5259         return 0;
5260 }
5261
5262 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5263                              u32 domain_id,
5264                              struct dlb2_map_qid_args *args,
5265                              bool vdev_req,
5266                              unsigned int vdev_id)
5267 {
5268         DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5269         if (vdev_req)
5270                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5271         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5272                     domain_id);
5273         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5274                     args->port_id);
5275         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5276                     args->qid);
5277         DLB2_HW_DBG(hw, "\tPriority:  %d\n",
5278                     args->priority);
5279 }
5280
5281 /**
5282  * dlb2_hw_map_qid() - map a load-balanced queue to a load-balanced port
5283  * @hw: dlb2_hw handle for a particular device.
5284  * @domain_id: domain ID.
5285  * @args: map QID arguments.
5286  * @resp: response structure.
5287  * @vdev_req: indicates whether this request came from a vdev.
5288  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5289  *
5290  * This function configures the DLB to schedule QEs from the specified queue
5291  * to the specified port. Each load-balanced port can be mapped to up to 8
5292  * queues; each load-balanced queue can potentially map to all the
5293  * load-balanced ports.
5294  *
5295  * A successful return does not necessarily mean the mapping was configured. If
5296  * this function is unable to immediately map the queue to the port, it will
5297  * add the requested operation to a per-port list of pending map/unmap
5298  * operations, and (if it's not already running) launch a kernel thread that
5299  * periodically attempts to process all pending operations. In a sense, this is
5300  * an asynchronous function.
5301  *
5302  * This asynchronicity creates two views of the state of hardware: the actual
5303  * hardware state and the requested state (as if every request completed
5304  * immediately). If there are any pending map/unmap operations, the requested
5305  * state will differ from the actual state. All validation is performed with
5306  * respect to the pending state; for instance, if there are 8 pending map
5307  * operations for port X, a request for a 9th will fail because a load-balanced
5308  * port can only map up to 8 queues.
5309  *
5310  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5311  * device.
5312  *
5313  * Return:
5314  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5315  * assigned a detailed error code from enum dlb2_error.
5316  *
5317  * Errors:
5318  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5319  *          the domain is not configured.
5320  * EFAULT - Internal error (resp->status not set).
5321  */
5322 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5323                     u32 domain_id,
5324                     struct dlb2_map_qid_args *args,
5325                     struct dlb2_cmd_response *resp,
5326                     bool vdev_req,
5327                     unsigned int vdev_id)
5328 {
5329         struct dlb2_hw_domain *domain;
5330         struct dlb2_ldb_queue *queue;
5331         enum dlb2_qid_map_state st;
5332         struct dlb2_ldb_port *port;
5333         int ret, i;
5334         u8 prio;
5335
5336         dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5337
5338         /*
5339          * Verify that hardware resources are available before attempting to
5340          * satisfy the request. This simplifies the error unwinding code.
5341          */
5342         ret = dlb2_verify_map_qid_args(hw,
5343                                        domain_id,
5344                                        args,
5345                                        resp,
5346                                        vdev_req,
5347                                        vdev_id,
5348                                        &domain,
5349                                        &port,
5350                                        &queue);
5351         if (ret)
5352                 return ret;
5353
5354         prio = args->priority;
5355
5356         /*
5357          * If there are any outstanding detach operations for this port,
5358          * attempt to complete them. This may be necessary to free up a QID
5359          * slot for this requested mapping.
5360          */
5361         if (port->num_pending_removals)
5362                 dlb2_domain_finish_unmap_port(hw, domain, port);
5363
5364         ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5365         if (ret)
5366                 return ret;
5367
5368         /* Hardware requires disabling the CQ before mapping QIDs. */
5369         if (port->enabled)
5370                 dlb2_ldb_port_cq_disable(hw, port);
5371
5372         /*
5373          * If this is only a priority change, don't perform the full QID->CQ
5374          * mapping procedure
5375          */
5376         st = DLB2_QUEUE_MAPPED;
5377         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5378                 if (prio != port->qid_map[i].priority) {
5379                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5380                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5381                 }
5382
5383                 st = DLB2_QUEUE_MAPPED;
5384                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5385                 if (ret)
5386                         return ret;
5387
5388                 goto map_qid_done;
5389         }
5390
5391         st = DLB2_QUEUE_UNMAP_IN_PROG;
5392         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5393                 if (prio != port->qid_map[i].priority) {
5394                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5395                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5396                 }
5397
5398                 st = DLB2_QUEUE_MAPPED;
5399                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5400                 if (ret)
5401                         return ret;
5402
5403                 goto map_qid_done;
5404         }
5405
5406         /*
5407          * If this is a priority change on an in-progress mapping, don't
5408          * perform the full QID->CQ mapping procedure.
5409          */
5410         st = DLB2_QUEUE_MAP_IN_PROG;
5411         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5412                 port->qid_map[i].priority = prio;
5413
5414                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5415
5416                 goto map_qid_done;
5417         }
5418
5419         /*
5420          * If this is a priority change on a pending mapping, update the
5421          * pending priority
5422          */
5423         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5424                 port->qid_map[i].pending_priority = prio;
5425
5426                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5427
5428                 goto map_qid_done;
5429         }
5430
5431         /*
5432          * If all the CQ's slots are in use, then there's an unmap in progress
5433          * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5434          * mapping to pending_map and return. When the removal is completed for
5435          * the slot's current occupant, this mapping will be performed.
5436          */
5437         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5438                 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5439                         enum dlb2_qid_map_state new_st;
5440
5441                         port->qid_map[i].pending_qid = queue->id.phys_id;
5442                         port->qid_map[i].pending_priority = prio;
5443
5444                         new_st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5445
5446                         ret = dlb2_port_slot_state_transition(hw, port, queue,
5447                                                               i, new_st);
5448                         if (ret)
5449                                 return ret;
5450
5451                         DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5452
5453                         goto map_qid_done;
5454                 }
5455         }
5456
5457         /*
5458          * If the domain has started, a special "dynamic" CQ->queue mapping
5459          * procedure is required in order to safely update the CQ<->QID tables.
5460          * The "static" procedure cannot be used when traffic is flowing,
5461          * because the CQ<->QID tables cannot be updated atomically and the
5462          * scheduler won't see the new mapping unless the queue's if_status
5463          * changes, which isn't guaranteed.
5464          */
5465         ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5466
5467         /* If ret is less than zero, it's due to an internal error */
5468         if (ret < 0)
5469                 return ret;
5470
5471 map_qid_done:
5472         if (port->enabled)
5473                 dlb2_ldb_port_cq_enable(hw, port);
5474
5475         resp->status = 0;
5476
5477         return 0;
5478 }
5479
5480 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5481                                u32 domain_id,
5482                                struct dlb2_unmap_qid_args *args,
5483                                bool vdev_req,
5484                                unsigned int vdev_id)
5485 {
5486         DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5487         if (vdev_req)
5488                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5489         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5490                     domain_id);
5491         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5492                     args->port_id);
5493         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5494                     args->qid);
5495         if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5496                 DLB2_HW_DBG(hw, "\tQueue's num mappings:  %d\n",
5497                             hw->rsrcs.ldb_queues[args->qid].num_mappings);
5498 }
5499
5500 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5501                                       u32 domain_id,
5502                                       struct dlb2_unmap_qid_args *args,
5503                                       struct dlb2_cmd_response *resp,
5504                                       bool vdev_req,
5505                                       unsigned int vdev_id,
5506                                       struct dlb2_hw_domain **out_domain,
5507                                       struct dlb2_ldb_port **out_port,
5508                                       struct dlb2_ldb_queue **out_queue)
5509 {
5510         enum dlb2_qid_map_state state;
5511         struct dlb2_hw_domain *domain;
5512         struct dlb2_ldb_queue *queue;
5513         struct dlb2_ldb_port *port;
5514         int slot;
5515         int id;
5516
5517         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5518
5519         if (!domain) {
5520                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5521                 return -EINVAL;
5522         }
5523
5524         if (!domain->configured) {
5525                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5526                 return -EINVAL;
5527         }
5528
5529         id = args->port_id;
5530
5531         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5532
5533         if (!port || !port->configured) {
5534                 resp->status = DLB2_ST_INVALID_PORT_ID;
5535                 return -EINVAL;
5536         }
5537
5538         if (port->domain_id.phys_id != domain->id.phys_id) {
5539                 resp->status = DLB2_ST_INVALID_PORT_ID;
5540                 return -EINVAL;
5541         }
5542
5543         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5544
5545         if (!queue || !queue->configured) {
5546                 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5547                             __func__, args->qid);
5548                 resp->status = DLB2_ST_INVALID_QID;
5549                 return -EINVAL;
5550         }
5551
5552         /*
5553          * Verify that the port has the queue mapped. From the application's
5554          * perspective a queue is mapped if it is actually mapped, the map is
5555          * in progress, or the map is blocked pending an unmap.
5556          */
5557         state = DLB2_QUEUE_MAPPED;
5558         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5559                 goto done;
5560
5561         state = DLB2_QUEUE_MAP_IN_PROG;
5562         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5563                 goto done;
5564
5565         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5566                 goto done;
5567
5568         resp->status = DLB2_ST_INVALID_QID;
5569         return -EINVAL;
5570
5571 done:
5572         *out_domain = domain;
5573         *out_port = port;
5574         *out_queue = queue;
5575
5576         return 0;
5577 }
5578
5579 /**
5580  * dlb2_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
5581  * @hw: dlb2_hw handle for a particular device.
5582  * @domain_id: domain ID.
5583  * @args: unmap QID arguments.
5584  * @resp: response structure.
5585  * @vdev_req: indicates whether this request came from a vdev.
5586  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5587  *
5588  * This function configures the DLB to stop scheduling QEs from the specified
5589  * queue to the specified port.
5590  *
5591  * A successful return does not necessarily mean the mapping was removed. If
5592  * this function is unable to immediately unmap the queue from the port, it
5593  * will add the requested operation to a per-port list of pending map/unmap
5594  * operations, and (if it's not already running) launch a kernel thread that
5595  * periodically attempts to process all pending operations. See
5596  * dlb2_hw_map_qid() for more details.
5597  *
5598  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5599  * device.
5600  *
5601  * Return:
5602  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5603  * assigned a detailed error code from enum dlb2_error.
5604  *
5605  * Errors:
5606  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5607  *          the domain is not configured.
5608  * EFAULT - Internal error (resp->status not set).
5609  */
5610 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5611                       u32 domain_id,
5612                       struct dlb2_unmap_qid_args *args,
5613                       struct dlb2_cmd_response *resp,
5614                       bool vdev_req,
5615                       unsigned int vdev_id)
5616 {
5617         struct dlb2_hw_domain *domain;
5618         struct dlb2_ldb_queue *queue;
5619         enum dlb2_qid_map_state st;
5620         struct dlb2_ldb_port *port;
5621         bool unmap_complete;
5622         int i, ret;
5623
5624         dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5625
5626         /*
5627          * Verify that hardware resources are available before attempting to
5628          * satisfy the request. This simplifies the error unwinding code.
5629          */
5630         ret = dlb2_verify_unmap_qid_args(hw,
5631                                          domain_id,
5632                                          args,
5633                                          resp,
5634                                          vdev_req,
5635                                          vdev_id,
5636                                          &domain,
5637                                          &port,
5638                                          &queue);
5639         if (ret)
5640                 return ret;
5641
5642         /*
5643          * If the queue hasn't been mapped yet, we need to update the slot's
5644          * state and re-enable the queue's inflights.
5645          */
5646         st = DLB2_QUEUE_MAP_IN_PROG;
5647         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5648                 /*
5649                  * Since the in-progress map was aborted, re-enable the QID's
5650                  * inflights.
5651                  */
5652                 if (queue->num_pending_additions == 0)
5653                         dlb2_ldb_queue_set_inflight_limit(hw, queue);
5654
5655                 st = DLB2_QUEUE_UNMAPPED;
5656                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5657                 if (ret)
5658                         return ret;
5659
5660                 goto unmap_qid_done;
5661         }
5662
5663         /*
5664          * If the queue mapping is on hold pending an unmap, we simply need to
5665          * update the slot's state.
5666          */
5667         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5668                 st = DLB2_QUEUE_UNMAP_IN_PROG;
5669                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5670                 if (ret)
5671                         return ret;
5672
5673                 goto unmap_qid_done;
5674         }
5675
5676         st = DLB2_QUEUE_MAPPED;
5677         if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5678                 DLB2_HW_ERR(hw,
5679                             "[%s()] Internal error: no available CQ slots\n",
5680                             __func__);
5681                 return -EFAULT;
5682         }
5683
5684         /*
5685          * QID->CQ mapping removal is an asynchronous procedure. It requires
5686          * stopping the DLB2 from scheduling this CQ, draining all inflights
5687          * from the CQ, then unmapping the queue from the CQ. This function
5688          * simply marks the port as needing the queue unmapped, and (if
5689          * necessary) starts the unmapping worker thread.
5690          */
5691         dlb2_ldb_port_cq_disable(hw, port);
5692
5693         st = DLB2_QUEUE_UNMAP_IN_PROG;
5694         ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5695         if (ret)
5696                 return ret;
5697
5698         /*
5699          * Attempt to finish the unmapping now, in case the port has no
5700          * outstanding inflights. If that's not the case, this will fail and
5701          * the unmapping will be completed at a later time.
5702          */
5703         unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
5704
5705         /*
5706          * If the unmapping couldn't complete immediately, launch the worker
5707          * thread (if it isn't already launched) to finish it later.
5708          */
5709         if (!unmap_complete && !os_worker_active(hw))
5710                 os_schedule_work(hw);
5711
5712 unmap_qid_done:
5713         resp->status = 0;
5714
5715         return 0;
5716 }
5717
5718 static void
5719 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
5720                                   struct dlb2_pending_port_unmaps_args *args,
5721                                   bool vdev_req,
5722                                   unsigned int vdev_id)
5723 {
5724         DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
5725         if (vdev_req)
5726                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
5727         DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
5728 }
5729
5730 /**
5731  * dlb2_hw_pending_port_unmaps() - returns the number of unmap operations in
5732  *      progress.
5733  * @hw: dlb2_hw handle for a particular device.
5734  * @domain_id: domain ID.
5735  * @args: number of unmaps in progress args
5736  * @resp: response structure.
5737  * @vdev_req: indicates whether this request came from a vdev.
5738  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5739  *
5740  * Return:
5741  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5742  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5743  * contains the number of unmaps in progress.
5744  *
5745  * Errors:
5746  * EINVAL - Invalid port ID.
5747  */
5748 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
5749                                 u32 domain_id,
5750                                 struct dlb2_pending_port_unmaps_args *args,
5751                                 struct dlb2_cmd_response *resp,
5752                                 bool vdev_req,
5753                                 unsigned int vdev_id)
5754 {
5755         struct dlb2_hw_domain *domain;
5756         struct dlb2_ldb_port *port;
5757
5758         dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
5759
5760         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5761
5762         if (!domain) {
5763                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5764                 return -EINVAL;
5765         }
5766
5767         port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
5768         if (!port || !port->configured) {
5769                 resp->status = DLB2_ST_INVALID_PORT_ID;
5770                 return -EINVAL;
5771         }
5772
5773         resp->id = port->num_pending_removals;
5774
5775         return 0;
5776 }
5777
5778 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
5779                                          u32 domain_id,
5780                                          struct dlb2_cmd_response *resp,
5781                                          bool vdev_req,
5782                                          unsigned int vdev_id,
5783                                          struct dlb2_hw_domain **out_domain)
5784 {
5785         struct dlb2_hw_domain *domain;
5786
5787         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5788
5789         if (!domain) {
5790                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5791                 return -EINVAL;
5792         }
5793
5794         if (!domain->configured) {
5795                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5796                 return -EINVAL;
5797         }
5798
5799         if (domain->started) {
5800                 resp->status = DLB2_ST_DOMAIN_STARTED;
5801                 return -EINVAL;
5802         }
5803
5804         *out_domain = domain;
5805
5806         return 0;
5807 }
5808
5809 static void dlb2_log_start_domain(struct dlb2_hw *hw,
5810                                   u32 domain_id,
5811                                   bool vdev_req,
5812                                   unsigned int vdev_id)
5813 {
5814         DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
5815         if (vdev_req)
5816                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5817         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5818 }
5819
5820 /**
5821  * dlb2_hw_start_domain() - start a scheduling domain
5822  * @hw: dlb2_hw handle for a particular device.
5823  * @domain_id: domain ID.
5824  * @arg: start domain arguments.
5825  * @resp: response structure.
5826  * @vdev_req: indicates whether this request came from a vdev.
5827  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5828  *
5829  * This function starts a scheduling domain, which allows applications to send
5830  * traffic through it. Once a domain is started, its resources can no longer be
5831  * configured (besides QID remapping and port enable/disable).
5832  *
5833  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5834  * device.
5835  *
5836  * Return:
5837  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5838  * assigned a detailed error code from enum dlb2_error.
5839  *
5840  * Errors:
5841  * EINVAL - the domain is not configured, or the domain is already started.
5842  */
5843 int
5844 dlb2_hw_start_domain(struct dlb2_hw *hw,
5845                      u32 domain_id,
5846                      struct dlb2_start_domain_args *args,
5847                      struct dlb2_cmd_response *resp,
5848                      bool vdev_req,
5849                      unsigned int vdev_id)
5850 {
5851         struct dlb2_list_entry *iter;
5852         struct dlb2_dir_pq_pair *dir_queue;
5853         struct dlb2_ldb_queue *ldb_queue;
5854         struct dlb2_hw_domain *domain;
5855         int ret;
5856         RTE_SET_USED(args);
5857         RTE_SET_USED(iter);
5858
5859         dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
5860
5861         ret = dlb2_verify_start_domain_args(hw,
5862                                             domain_id,
5863                                             resp,
5864                                             vdev_req,
5865                                             vdev_id,
5866                                             &domain);
5867         if (ret)
5868                 return ret;
5869
5870         /*
5871          * Enable load-balanced and directed queue write permissions for the
5872          * queues this domain owns. Without this, the DLB2 will drop all
5873          * incoming traffic to those queues.
5874          */
5875         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
5876                 u32 vasqid_v = 0;
5877                 unsigned int offs;
5878
5879                 DLB2_BIT_SET(vasqid_v, DLB2_SYS_LDB_VASQID_V_VASQID_V);
5880
5881                 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
5882                         ldb_queue->id.phys_id;
5883
5884                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), vasqid_v);
5885         }
5886
5887         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
5888                 u32 vasqid_v = 0;
5889                 unsigned int offs;
5890
5891                 DLB2_BIT_SET(vasqid_v, DLB2_SYS_DIR_VASQID_V_VASQID_V);
5892
5893                 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
5894                         dir_queue->id.phys_id;
5895
5896                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), vasqid_v);
5897         }
5898
5899         dlb2_flush_csr(hw);
5900
5901         domain->started = true;
5902
5903         resp->status = 0;
5904
5905         return 0;
5906 }
5907
5908 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
5909                                          u32 domain_id,
5910                                          u32 queue_id,
5911                                          bool vdev_req,
5912                                          unsigned int vf_id)
5913 {
5914         DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
5915         if (vdev_req)
5916                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5917         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5918         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5919 }
5920
5921 /**
5922  * dlb2_hw_get_dir_queue_depth() - returns the depth of a directed queue
5923  * @hw: dlb2_hw handle for a particular device.
5924  * @domain_id: domain ID.
5925  * @args: queue depth args
5926  * @resp: response structure.
5927  * @vdev_req: indicates whether this request came from a vdev.
5928  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5929  *
5930  * This function returns the depth of a directed queue.
5931  *
5932  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5933  * device.
5934  *
5935  * Return:
5936  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5937  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5938  * contains the depth.
5939  *
5940  * Errors:
5941  * EINVAL - Invalid domain ID or queue ID.
5942  */
5943 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
5944                                 u32 domain_id,
5945                                 struct dlb2_get_dir_queue_depth_args *args,
5946                                 struct dlb2_cmd_response *resp,
5947                                 bool vdev_req,
5948                                 unsigned int vdev_id)
5949 {
5950         struct dlb2_dir_pq_pair *queue;
5951         struct dlb2_hw_domain *domain;
5952         int id;
5953
5954         id = domain_id;
5955
5956         dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
5957                                      vdev_req, vdev_id);
5958
5959         domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
5960         if (!domain) {
5961                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5962                 return -EINVAL;
5963         }
5964
5965         id = args->queue_id;
5966
5967         queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
5968         if (!queue) {
5969                 resp->status = DLB2_ST_INVALID_QID;
5970                 return -EINVAL;
5971         }
5972
5973         resp->id = dlb2_dir_queue_depth(hw, queue);
5974
5975         return 0;
5976 }
5977
5978 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
5979                                          u32 domain_id,
5980                                          u32 queue_id,
5981                                          bool vdev_req,
5982                                          unsigned int vf_id)
5983 {
5984         DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
5985         if (vdev_req)
5986                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5987         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5988         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5989 }
5990
5991 /**
5992  * dlb2_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
5993  * @hw: dlb2_hw handle for a particular device.
5994  * @domain_id: domain ID.
5995  * @args: queue depth args
5996  * @resp: response structure.
5997  * @vdev_req: indicates whether this request came from a vdev.
5998  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5999  *
6000  * This function returns the depth of a load-balanced queue.
6001  *
6002  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
6003  * device.
6004  *
6005  * Return:
6006  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6007  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
6008  * contains the depth.
6009  *
6010  * Errors:
6011  * EINVAL - Invalid domain ID or queue ID.
6012  */
6013 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
6014                                 u32 domain_id,
6015                                 struct dlb2_get_ldb_queue_depth_args *args,
6016                                 struct dlb2_cmd_response *resp,
6017                                 bool vdev_req,
6018                                 unsigned int vdev_id)
6019 {
6020         struct dlb2_hw_domain *domain;
6021         struct dlb2_ldb_queue *queue;
6022
6023         dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
6024                                      vdev_req, vdev_id);
6025
6026         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6027         if (!domain) {
6028                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6029                 return -EINVAL;
6030         }
6031
6032         queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
6033         if (!queue) {
6034                 resp->status = DLB2_ST_INVALID_QID;
6035                 return -EINVAL;
6036         }
6037
6038         resp->id = dlb2_ldb_queue_depth(hw, queue);
6039
6040         return 0;
6041 }
6042
6043 /**
6044  * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures
6045  * @hw: dlb2_hw handle for a particular device.
6046  *
6047  * This function attempts to finish any outstanding unmap procedures.
6048  * This function should be called by the kernel thread responsible for
6049  * finishing map/unmap procedures.
6050  *
6051  * Return:
6052  * Returns the number of procedures that weren't completed.
6053  */
6054 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
6055 {
6056         int i, num = 0;
6057
6058         /* Finish queue unmap jobs for any domain that needs it */
6059         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6060                 struct dlb2_hw_domain *domain = &hw->domains[i];
6061
6062                 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
6063         }
6064
6065         return num;
6066 }
6067
6068 /**
6069  * dlb2_finish_map_qid_procedures() - finish any pending map procedures
6070  * @hw: dlb2_hw handle for a particular device.
6071  *
6072  * This function attempts to finish any outstanding map procedures.
6073  * This function should be called by the kernel thread responsible for
6074  * finishing map/unmap procedures.
6075  *
6076  * Return:
6077  * Returns the number of procedures that weren't completed.
6078  */
6079 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
6080 {
6081         int i, num = 0;
6082
6083         /* Finish queue map jobs for any domain that needs it */
6084         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6085                 struct dlb2_hw_domain *domain = &hw->domains[i];
6086
6087                 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
6088         }
6089
6090         return num;
6091 }
6092
6093 /**
6094  * dlb2_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports.
6095  * @hw: dlb2_hw handle for a particular device.
6096  *
6097  * This function must be called prior to configuring scheduling domains.
6098  */
6099
6100 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
6101 {
6102         u32 ctrl;
6103
6104         ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6105
6106         DLB2_BIT_SET(ctrl,
6107                      DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_DIR_CQ_MODE);
6108
6109         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6110 }
6111
6112 /**
6113  * dlb2_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
6114  *      ports.
6115  * @hw: dlb2_hw handle for a particular device.
6116  *
6117  * This function must be called prior to configuring scheduling domains.
6118  */
6119 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
6120 {
6121         u32 ctrl;
6122
6123         ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6124
6125         DLB2_BIT_SET(ctrl,
6126                      DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_LDB_CQ_MODE);
6127
6128         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6129 }
6130