event/dlb2: add v2.5 domain reset
[dpdk.git] / drivers / event / dlb2 / pf / base / dlb2_resource.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include "dlb2_user.h"
6
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
13
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
16
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18         DLB2_LIST_HEAD((head), type, domain_list)
19
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21         DLB2_LIST_HEAD((head), type, func_list)
22
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24         DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
25
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27         DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
28
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
31
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
34
35 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
36 {
37         union dlb2_chp_cfg_chp_csr_ctrl r0;
38
39         r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
40
41         r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
42
43         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
44 }
45
46 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
47 {
48         union dlb2_chp_cfg_chp_csr_ctrl r0;
49
50         r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
51
52         r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
53
54         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
55 }
56
57 /*
58  * The PF driver cannot assume that a register write will affect subsequent HCW
59  * writes. To ensure a write completes, the driver must read back a CSR. This
60  * function only need be called for configuration that can occur after the
61  * domain has started; prior to starting, applications can't send HCWs.
62  */
63 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
64 {
65         DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
66 }
67
68 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
69                                     struct dlb2_dir_pq_pair *port)
70 {
71         union dlb2_lsp_cq_dir_dsbl reg;
72
73         reg.field.disabled = 0;
74
75         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
76
77         dlb2_flush_csr(hw);
78 }
79
80 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
81                                 struct dlb2_dir_pq_pair *queue)
82 {
83         union dlb2_lsp_qid_dir_enqueue_cnt r0;
84
85         r0.val = DLB2_CSR_RD(hw,
86                              DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
87
88         return r0.field.count;
89 }
90
91 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
92                                     struct dlb2_ldb_port *port)
93 {
94         union dlb2_lsp_cq_ldb_dsbl reg;
95
96         /*
97          * Don't re-enable the port if a removal is pending. The caller should
98          * mark this port as enabled (if it isn't already), and when the
99          * removal completes the port will be enabled.
100          */
101         if (port->num_pending_removals)
102                 return;
103
104         reg.field.disabled = 0;
105
106         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
107
108         dlb2_flush_csr(hw);
109 }
110
111 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
112                                      struct dlb2_ldb_port *port)
113 {
114         union dlb2_lsp_cq_ldb_dsbl reg;
115
116         reg.field.disabled = 1;
117
118         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
119
120         dlb2_flush_csr(hw);
121 }
122
123 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
124                                 struct dlb2_ldb_queue *queue)
125 {
126         union dlb2_lsp_qid_aqed_active_cnt r0;
127         union dlb2_lsp_qid_atm_active r1;
128         union dlb2_lsp_qid_ldb_enqueue_cnt r2;
129
130         r0.val = DLB2_CSR_RD(hw,
131                              DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
132         r1.val = DLB2_CSR_RD(hw,
133                              DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
134
135         r2.val = DLB2_CSR_RD(hw,
136                              DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
137
138         return r0.field.count + r1.field.count + r2.field.count;
139 }
140
141 static struct dlb2_ldb_queue *
142 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
143                            u32 id,
144                            bool vdev_req,
145                            unsigned int vdev_id)
146 {
147         struct dlb2_list_entry *iter1;
148         struct dlb2_list_entry *iter2;
149         struct dlb2_function_resources *rsrcs;
150         struct dlb2_hw_domain *domain;
151         struct dlb2_ldb_queue *queue;
152         RTE_SET_USED(iter1);
153         RTE_SET_USED(iter2);
154
155         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
156                 return NULL;
157
158         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
159
160         if (!vdev_req)
161                 return &hw->rsrcs.ldb_queues[id];
162
163         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
164                 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
165                         if (queue->id.virt_id == id)
166                                 return queue;
167         }
168
169         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
170                 if (queue->id.virt_id == id)
171                         return queue;
172
173         return NULL;
174 }
175
176 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
177                                                       u32 id,
178                                                       bool vdev_req,
179                                                       unsigned int vdev_id)
180 {
181         struct dlb2_list_entry *iteration;
182         struct dlb2_function_resources *rsrcs;
183         struct dlb2_hw_domain *domain;
184         RTE_SET_USED(iteration);
185
186         if (id >= DLB2_MAX_NUM_DOMAINS)
187                 return NULL;
188
189         if (!vdev_req)
190                 return &hw->domains[id];
191
192         rsrcs = &hw->vdev[vdev_id];
193
194         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
195                 if (domain->id.virt_id == id)
196                         return domain;
197
198         return NULL;
199 }
200
201 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
202                                            struct dlb2_ldb_port *port,
203                                            struct dlb2_ldb_queue *queue,
204                                            int slot,
205                                            enum dlb2_qid_map_state new_state)
206 {
207         enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
208         struct dlb2_hw_domain *domain;
209         int domain_id;
210
211         domain_id = port->domain_id.phys_id;
212
213         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
214         if (domain == NULL) {
215                 DLB2_HW_ERR(hw,
216                             "[%s()] Internal error: unable to find domain %d\n",
217                             __func__, domain_id);
218                 return -EINVAL;
219         }
220
221         switch (curr_state) {
222         case DLB2_QUEUE_UNMAPPED:
223                 switch (new_state) {
224                 case DLB2_QUEUE_MAPPED:
225                         queue->num_mappings++;
226                         port->num_mappings++;
227                         break;
228                 case DLB2_QUEUE_MAP_IN_PROG:
229                         queue->num_pending_additions++;
230                         domain->num_pending_additions++;
231                         break;
232                 default:
233                         goto error;
234                 }
235                 break;
236         case DLB2_QUEUE_MAPPED:
237                 switch (new_state) {
238                 case DLB2_QUEUE_UNMAPPED:
239                         queue->num_mappings--;
240                         port->num_mappings--;
241                         break;
242                 case DLB2_QUEUE_UNMAP_IN_PROG:
243                         port->num_pending_removals++;
244                         domain->num_pending_removals++;
245                         break;
246                 case DLB2_QUEUE_MAPPED:
247                         /* Priority change, nothing to update */
248                         break;
249                 default:
250                         goto error;
251                 }
252                 break;
253         case DLB2_QUEUE_MAP_IN_PROG:
254                 switch (new_state) {
255                 case DLB2_QUEUE_UNMAPPED:
256                         queue->num_pending_additions--;
257                         domain->num_pending_additions--;
258                         break;
259                 case DLB2_QUEUE_MAPPED:
260                         queue->num_mappings++;
261                         port->num_mappings++;
262                         queue->num_pending_additions--;
263                         domain->num_pending_additions--;
264                         break;
265                 default:
266                         goto error;
267                 }
268                 break;
269         case DLB2_QUEUE_UNMAP_IN_PROG:
270                 switch (new_state) {
271                 case DLB2_QUEUE_UNMAPPED:
272                         port->num_pending_removals--;
273                         domain->num_pending_removals--;
274                         queue->num_mappings--;
275                         port->num_mappings--;
276                         break;
277                 case DLB2_QUEUE_MAPPED:
278                         port->num_pending_removals--;
279                         domain->num_pending_removals--;
280                         break;
281                 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
282                         /* Nothing to update */
283                         break;
284                 default:
285                         goto error;
286                 }
287                 break;
288         case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
289                 switch (new_state) {
290                 case DLB2_QUEUE_UNMAP_IN_PROG:
291                         /* Nothing to update */
292                         break;
293                 case DLB2_QUEUE_UNMAPPED:
294                         /*
295                          * An UNMAP_IN_PROG_PENDING_MAP slot briefly
296                          * becomes UNMAPPED before it transitions to
297                          * MAP_IN_PROG.
298                          */
299                         queue->num_mappings--;
300                         port->num_mappings--;
301                         port->num_pending_removals--;
302                         domain->num_pending_removals--;
303                         break;
304                 default:
305                         goto error;
306                 }
307                 break;
308         default:
309                 goto error;
310         }
311
312         port->qid_map[slot].state = new_state;
313
314         DLB2_HW_DBG(hw,
315                     "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
316                     __func__, queue->id.phys_id, port->id.phys_id,
317                     curr_state, new_state);
318         return 0;
319
320 error:
321         DLB2_HW_ERR(hw,
322                     "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
323                     __func__, queue->id.phys_id, port->id.phys_id,
324                     curr_state, new_state);
325         return -EFAULT;
326 }
327
328 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
329                                 enum dlb2_qid_map_state state,
330                                 int *slot)
331 {
332         int i;
333
334         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
335                 if (port->qid_map[i].state == state)
336                         break;
337         }
338
339         *slot = i;
340
341         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
342 }
343
344 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
345                                       enum dlb2_qid_map_state state,
346                                       struct dlb2_ldb_queue *queue,
347                                       int *slot)
348 {
349         int i;
350
351         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
352                 if (port->qid_map[i].state == state &&
353                     port->qid_map[i].qid == queue->id.phys_id)
354                         break;
355         }
356
357         *slot = i;
358
359         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
360 }
361
362 /*
363  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
364  * their function names imply, and should only be called by the dynamic CQ
365  * mapping code.
366  */
367 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
368                                               struct dlb2_hw_domain *domain,
369                                               struct dlb2_ldb_queue *queue)
370 {
371         struct dlb2_list_entry *iter;
372         struct dlb2_ldb_port *port;
373         int slot, i;
374         RTE_SET_USED(iter);
375
376         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
377                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
378                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
379
380                         if (!dlb2_port_find_slot_queue(port, state,
381                                                        queue, &slot))
382                                 continue;
383
384                         if (port->enabled)
385                                 dlb2_ldb_port_cq_disable(hw, port);
386                 }
387         }
388 }
389
390 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
391                                              struct dlb2_hw_domain *domain,
392                                              struct dlb2_ldb_queue *queue)
393 {
394         struct dlb2_list_entry *iter;
395         struct dlb2_ldb_port *port;
396         int slot, i;
397         RTE_SET_USED(iter);
398
399         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
400                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
401                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
402
403                         if (!dlb2_port_find_slot_queue(port, state,
404                                                        queue, &slot))
405                                 continue;
406
407                         if (port->enabled)
408                                 dlb2_ldb_port_cq_enable(hw, port);
409                 }
410         }
411 }
412
413 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
414                                                 struct dlb2_ldb_port *port,
415                                                 int slot)
416 {
417         union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
418
419         r0.field.cq = port->id.phys_id;
420         r0.field.qidix = slot;
421         r0.field.value = 0;
422         r0.field.inflight_ok_v = 1;
423
424         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
425
426         dlb2_flush_csr(hw);
427 }
428
429 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
430                                               struct dlb2_ldb_port *port,
431                                               int slot)
432 {
433         union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
434
435         r0.field.cq = port->id.phys_id;
436         r0.field.qidix = slot;
437         r0.field.value = 1;
438         r0.field.inflight_ok_v = 1;
439
440         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
441
442         dlb2_flush_csr(hw);
443 }
444
445 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
446                                         struct dlb2_ldb_port *p,
447                                         struct dlb2_ldb_queue *q,
448                                         u8 priority)
449 {
450         union dlb2_lsp_cq2priov r0;
451         union dlb2_lsp_cq2qid0 r1;
452         union dlb2_atm_qid2cqidix_00 r2;
453         union dlb2_lsp_qid2cqidix_00 r3;
454         union dlb2_lsp_qid2cqidix2_00 r4;
455         enum dlb2_qid_map_state state;
456         int i;
457
458         /* Look for a pending or already mapped slot, else an unused slot */
459         if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
460             !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
461             !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
462                 DLB2_HW_ERR(hw,
463                             "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
464                             __func__, __LINE__);
465                 return -EFAULT;
466         }
467
468         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
469                 DLB2_HW_ERR(hw,
470                             "[%s():%d] Internal error: port slot tracking failed\n",
471                             __func__, __LINE__);
472                 return -EFAULT;
473         }
474
475         /* Read-modify-write the priority and valid bit register */
476         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
477
478         r0.field.v |= 1 << i;
479         r0.field.prio |= (priority & 0x7) << i * 3;
480
481         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
482
483         /* Read-modify-write the QID map register */
484         if (i < 4)
485                 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
486         else
487                 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
488
489         if (i == 0 || i == 4)
490                 r1.field.qid_p0 = q->id.phys_id;
491         if (i == 1 || i == 5)
492                 r1.field.qid_p1 = q->id.phys_id;
493         if (i == 2 || i == 6)
494                 r1.field.qid_p2 = q->id.phys_id;
495         if (i == 3 || i == 7)
496                 r1.field.qid_p3 = q->id.phys_id;
497
498         if (i < 4)
499                 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
500         else
501                 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
502
503         r2.val = DLB2_CSR_RD(hw,
504                              DLB2_ATM_QID2CQIDIX(q->id.phys_id,
505                                                  p->id.phys_id / 4));
506
507         r3.val = DLB2_CSR_RD(hw,
508                              DLB2_LSP_QID2CQIDIX(q->id.phys_id,
509                                                  p->id.phys_id / 4));
510
511         r4.val = DLB2_CSR_RD(hw,
512                              DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
513                                                   p->id.phys_id / 4));
514
515         switch (p->id.phys_id % 4) {
516         case 0:
517                 r2.field.cq_p0 |= 1 << i;
518                 r3.field.cq_p0 |= 1 << i;
519                 r4.field.cq_p0 |= 1 << i;
520                 break;
521
522         case 1:
523                 r2.field.cq_p1 |= 1 << i;
524                 r3.field.cq_p1 |= 1 << i;
525                 r4.field.cq_p1 |= 1 << i;
526                 break;
527
528         case 2:
529                 r2.field.cq_p2 |= 1 << i;
530                 r3.field.cq_p2 |= 1 << i;
531                 r4.field.cq_p2 |= 1 << i;
532                 break;
533
534         case 3:
535                 r2.field.cq_p3 |= 1 << i;
536                 r3.field.cq_p3 |= 1 << i;
537                 r4.field.cq_p3 |= 1 << i;
538                 break;
539         }
540
541         DLB2_CSR_WR(hw,
542                     DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
543                     r2.val);
544
545         DLB2_CSR_WR(hw,
546                     DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
547                     r3.val);
548
549         DLB2_CSR_WR(hw,
550                     DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
551                     r4.val);
552
553         dlb2_flush_csr(hw);
554
555         p->qid_map[i].qid = q->id.phys_id;
556         p->qid_map[i].priority = priority;
557
558         state = DLB2_QUEUE_MAPPED;
559
560         return dlb2_port_slot_state_transition(hw, p, q, i, state);
561 }
562
563 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
564                                            struct dlb2_ldb_port *port,
565                                            struct dlb2_ldb_queue *queue,
566                                            int slot)
567 {
568         union dlb2_lsp_qid_aqed_active_cnt r0;
569         union dlb2_lsp_qid_ldb_enqueue_cnt r1;
570         union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
571
572         /* Set the atomic scheduling haswork bit */
573         r0.val = DLB2_CSR_RD(hw,
574                              DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
575
576         r2.field.cq = port->id.phys_id;
577         r2.field.qidix = slot;
578         r2.field.value = 1;
579         r2.field.rlist_haswork_v = r0.field.count > 0;
580
581         /* Set the non-atomic scheduling haswork bit */
582         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
583
584         r1.val = DLB2_CSR_RD(hw,
585                              DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
586
587         memset(&r2, 0, sizeof(r2));
588
589         r2.field.cq = port->id.phys_id;
590         r2.field.qidix = slot;
591         r2.field.value = 1;
592         r2.field.nalb_haswork_v = (r1.field.count > 0);
593
594         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
595
596         dlb2_flush_csr(hw);
597
598         return 0;
599 }
600
601 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
602                                               struct dlb2_ldb_port *port,
603                                               u8 slot)
604 {
605         union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
606
607         r2.field.cq = port->id.phys_id;
608         r2.field.qidix = slot;
609         r2.field.value = 0;
610         r2.field.rlist_haswork_v = 1;
611
612         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
613
614         memset(&r2, 0, sizeof(r2));
615
616         r2.field.cq = port->id.phys_id;
617         r2.field.qidix = slot;
618         r2.field.value = 0;
619         r2.field.nalb_haswork_v = 1;
620
621         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
622
623         dlb2_flush_csr(hw);
624 }
625
626 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
627                                               struct dlb2_ldb_queue *queue)
628 {
629         union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
630
631         r0.field.limit = queue->num_qid_inflights;
632
633         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
634 }
635
636 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
637                                                 struct dlb2_ldb_queue *queue)
638 {
639         DLB2_CSR_WR(hw,
640                     DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
641                     DLB2_LSP_QID_LDB_INFL_LIM_RST);
642 }
643
644 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
645                                                 struct dlb2_hw_domain *domain,
646                                                 struct dlb2_ldb_port *port,
647                                                 struct dlb2_ldb_queue *queue)
648 {
649         struct dlb2_list_entry *iter;
650         union dlb2_lsp_qid_ldb_infl_cnt r0;
651         enum dlb2_qid_map_state state;
652         int slot, ret, i;
653         u8 prio;
654         RTE_SET_USED(iter);
655
656         r0.val = DLB2_CSR_RD(hw,
657                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
658
659         if (r0.field.count) {
660                 DLB2_HW_ERR(hw,
661                             "[%s()] Internal error: non-zero QID inflight count\n",
662                             __func__);
663                 return -EINVAL;
664         }
665
666         /*
667          * Static map the port and set its corresponding has_work bits.
668          */
669         state = DLB2_QUEUE_MAP_IN_PROG;
670         if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
671                 return -EINVAL;
672
673         if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
674                 DLB2_HW_ERR(hw,
675                             "[%s():%d] Internal error: port slot tracking failed\n",
676                             __func__, __LINE__);
677                 return -EFAULT;
678         }
679
680         prio = port->qid_map[slot].priority;
681
682         /*
683          * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
684          * the port's qid_map state.
685          */
686         ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
687         if (ret)
688                 return ret;
689
690         ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
691         if (ret)
692                 return ret;
693
694         /*
695          * Ensure IF_status(cq,qid) is 0 before enabling the port to
696          * prevent spurious schedules to cause the queue's inflight
697          * count to increase.
698          */
699         dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
700
701         /* Reset the queue's inflight status */
702         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
703                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
704                         state = DLB2_QUEUE_MAPPED;
705                         if (!dlb2_port_find_slot_queue(port, state,
706                                                        queue, &slot))
707                                 continue;
708
709                         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
710                 }
711         }
712
713         dlb2_ldb_queue_set_inflight_limit(hw, queue);
714
715         /* Re-enable CQs mapped to this queue */
716         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
717
718         /* If this queue has other mappings pending, clear its inflight limit */
719         if (queue->num_pending_additions > 0)
720                 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
721
722         return 0;
723 }
724
725 /**
726  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
727  * @hw: dlb2_hw handle for a particular device.
728  * @port: load-balanced port
729  * @queue: load-balanced queue
730  * @priority: queue servicing priority
731  *
732  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
733  * at a later point, and <0 if an error occurred.
734  */
735 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
736                                          struct dlb2_ldb_port *port,
737                                          struct dlb2_ldb_queue *queue,
738                                          u8 priority)
739 {
740         union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
741         enum dlb2_qid_map_state state;
742         struct dlb2_hw_domain *domain;
743         int domain_id, slot, ret;
744
745         domain_id = port->domain_id.phys_id;
746
747         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
748         if (domain == NULL) {
749                 DLB2_HW_ERR(hw,
750                             "[%s()] Internal error: unable to find domain %d\n",
751                             __func__, port->domain_id.phys_id);
752                 return -EINVAL;
753         }
754
755         /*
756          * Set the QID inflight limit to 0 to prevent further scheduling of the
757          * queue.
758          */
759         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
760
761         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
762                 DLB2_HW_ERR(hw,
763                             "Internal error: No available unmapped slots\n");
764                 return -EFAULT;
765         }
766
767         if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
768                 DLB2_HW_ERR(hw,
769                             "[%s():%d] Internal error: port slot tracking failed\n",
770                             __func__, __LINE__);
771                 return -EFAULT;
772         }
773
774         port->qid_map[slot].qid = queue->id.phys_id;
775         port->qid_map[slot].priority = priority;
776
777         state = DLB2_QUEUE_MAP_IN_PROG;
778         ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
779         if (ret)
780                 return ret;
781
782         r0.val = DLB2_CSR_RD(hw,
783                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
784
785         if (r0.field.count) {
786                 /*
787                  * The queue is owed completions so it's not safe to map it
788                  * yet. Schedule a kernel thread to complete the mapping later,
789                  * once software has completed all the queue's inflight events.
790                  */
791                 if (!os_worker_active(hw))
792                         os_schedule_work(hw);
793
794                 return 1;
795         }
796
797         /*
798          * Disable the affected CQ, and the CQs already mapped to the QID,
799          * before reading the QID's inflight count a second time. There is an
800          * unlikely race in which the QID may schedule one more QE after we
801          * read an inflight count of 0, and disabling the CQs guarantees that
802          * the race will not occur after a re-read of the inflight count
803          * register.
804          */
805         if (port->enabled)
806                 dlb2_ldb_port_cq_disable(hw, port);
807
808         dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
809
810         r0.val = DLB2_CSR_RD(hw,
811                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
812
813         if (r0.field.count) {
814                 if (port->enabled)
815                         dlb2_ldb_port_cq_enable(hw, port);
816
817                 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
818
819                 /*
820                  * The queue is owed completions so it's not safe to map it
821                  * yet. Schedule a kernel thread to complete the mapping later,
822                  * once software has completed all the queue's inflight events.
823                  */
824                 if (!os_worker_active(hw))
825                         os_schedule_work(hw);
826
827                 return 1;
828         }
829
830         return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
831 }
832
833 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
834                                         struct dlb2_hw_domain *domain,
835                                         struct dlb2_ldb_port *port)
836 {
837         int i;
838
839         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
840                 union dlb2_lsp_qid_ldb_infl_cnt r0;
841                 struct dlb2_ldb_queue *queue;
842                 int qid;
843
844                 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
845                         continue;
846
847                 qid = port->qid_map[i].qid;
848
849                 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
850
851                 if (queue == NULL) {
852                         DLB2_HW_ERR(hw,
853                                     "[%s()] Internal error: unable to find queue %d\n",
854                                     __func__, qid);
855                         continue;
856                 }
857
858                 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
859
860                 if (r0.field.count)
861                         continue;
862
863                 /*
864                  * Disable the affected CQ, and the CQs already mapped to the
865                  * QID, before reading the QID's inflight count a second time.
866                  * There is an unlikely race in which the QID may schedule one
867                  * more QE after we read an inflight count of 0, and disabling
868                  * the CQs guarantees that the race will not occur after a
869                  * re-read of the inflight count register.
870                  */
871                 if (port->enabled)
872                         dlb2_ldb_port_cq_disable(hw, port);
873
874                 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
875
876                 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
877
878                 if (r0.field.count) {
879                         if (port->enabled)
880                                 dlb2_ldb_port_cq_enable(hw, port);
881
882                         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
883
884                         continue;
885                 }
886
887                 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
888         }
889 }
890
891 static unsigned int
892 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
893                                       struct dlb2_hw_domain *domain)
894 {
895         struct dlb2_list_entry *iter;
896         struct dlb2_ldb_port *port;
897         int i;
898         RTE_SET_USED(iter);
899
900         if (!domain->configured || domain->num_pending_additions == 0)
901                 return 0;
902
903         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
904                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
905                         dlb2_domain_finish_map_port(hw, domain, port);
906         }
907
908         return domain->num_pending_additions;
909 }
910
911 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
912                                    struct dlb2_ldb_port *port,
913                                    struct dlb2_ldb_queue *queue)
914 {
915         enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
916         union dlb2_lsp_cq2priov r0;
917         union dlb2_atm_qid2cqidix_00 r1;
918         union dlb2_lsp_qid2cqidix_00 r2;
919         union dlb2_lsp_qid2cqidix2_00 r3;
920         u32 queue_id;
921         u32 port_id;
922         int i;
923
924         /* Find the queue's slot */
925         mapped = DLB2_QUEUE_MAPPED;
926         in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
927         pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
928
929         if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
930             !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
931             !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
932                 DLB2_HW_ERR(hw,
933                             "[%s():%d] Internal error: QID %d isn't mapped\n",
934                             __func__, __LINE__, queue->id.phys_id);
935                 return -EFAULT;
936         }
937
938         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
939                 DLB2_HW_ERR(hw,
940                             "[%s():%d] Internal error: port slot tracking failed\n",
941                             __func__, __LINE__);
942                 return -EFAULT;
943         }
944
945         port_id = port->id.phys_id;
946         queue_id = queue->id.phys_id;
947
948         /* Read-modify-write the priority and valid bit register */
949         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
950
951         r0.field.v &= ~(1 << i);
952
953         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
954
955         r1.val = DLB2_CSR_RD(hw,
956                              DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
957
958         r2.val = DLB2_CSR_RD(hw,
959                              DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
960
961         r3.val = DLB2_CSR_RD(hw,
962                              DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
963
964         switch (port_id % 4) {
965         case 0:
966                 r1.field.cq_p0 &= ~(1 << i);
967                 r2.field.cq_p0 &= ~(1 << i);
968                 r3.field.cq_p0 &= ~(1 << i);
969                 break;
970
971         case 1:
972                 r1.field.cq_p1 &= ~(1 << i);
973                 r2.field.cq_p1 &= ~(1 << i);
974                 r3.field.cq_p1 &= ~(1 << i);
975                 break;
976
977         case 2:
978                 r1.field.cq_p2 &= ~(1 << i);
979                 r2.field.cq_p2 &= ~(1 << i);
980                 r3.field.cq_p2 &= ~(1 << i);
981                 break;
982
983         case 3:
984                 r1.field.cq_p3 &= ~(1 << i);
985                 r2.field.cq_p3 &= ~(1 << i);
986                 r3.field.cq_p3 &= ~(1 << i);
987                 break;
988         }
989
990         DLB2_CSR_WR(hw,
991                     DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
992                     r1.val);
993
994         DLB2_CSR_WR(hw,
995                     DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
996                     r2.val);
997
998         DLB2_CSR_WR(hw,
999                     DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
1000                     r3.val);
1001
1002         dlb2_flush_csr(hw);
1003
1004         unmapped = DLB2_QUEUE_UNMAPPED;
1005
1006         return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
1007 }
1008
1009 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
1010                                  struct dlb2_hw_domain *domain,
1011                                  struct dlb2_ldb_port *port,
1012                                  struct dlb2_ldb_queue *queue,
1013                                  u8 prio)
1014 {
1015         if (domain->started)
1016                 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
1017         else
1018                 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1019 }
1020
1021 static void
1022 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
1023                                    struct dlb2_hw_domain *domain,
1024                                    struct dlb2_ldb_port *port,
1025                                    int slot)
1026 {
1027         enum dlb2_qid_map_state state;
1028         struct dlb2_ldb_queue *queue;
1029
1030         queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
1031
1032         state = port->qid_map[slot].state;
1033
1034         /* Update the QID2CQIDX and CQ2QID vectors */
1035         dlb2_ldb_port_unmap_qid(hw, port, queue);
1036
1037         /*
1038          * Ensure the QID will not be serviced by this {CQ, slot} by clearing
1039          * the has_work bits
1040          */
1041         dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
1042
1043         /* Reset the {CQ, slot} to its default state */
1044         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
1045
1046         /* Re-enable the CQ if it wasn't manually disabled by the user */
1047         if (port->enabled)
1048                 dlb2_ldb_port_cq_enable(hw, port);
1049
1050         /*
1051          * If there is a mapping that is pending this slot's removal, perform
1052          * the mapping now.
1053          */
1054         if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
1055                 struct dlb2_ldb_port_qid_map *map;
1056                 struct dlb2_ldb_queue *map_queue;
1057                 u8 prio;
1058
1059                 map = &port->qid_map[slot];
1060
1061                 map->qid = map->pending_qid;
1062                 map->priority = map->pending_priority;
1063
1064                 map_queue = &hw->rsrcs.ldb_queues[map->qid];
1065                 prio = map->priority;
1066
1067                 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
1068         }
1069 }
1070
1071 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
1072                                           struct dlb2_hw_domain *domain,
1073                                           struct dlb2_ldb_port *port)
1074 {
1075         union dlb2_lsp_cq_ldb_infl_cnt r0;
1076         int i;
1077
1078         if (port->num_pending_removals == 0)
1079                 return false;
1080
1081         /*
1082          * The unmap requires all the CQ's outstanding inflights to be
1083          * completed.
1084          */
1085         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
1086         if (r0.field.count > 0)
1087                 return false;
1088
1089         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1090                 struct dlb2_ldb_port_qid_map *map;
1091
1092                 map = &port->qid_map[i];
1093
1094                 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
1095                     map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
1096                         continue;
1097
1098                 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
1099         }
1100
1101         return true;
1102 }
1103
1104 static unsigned int
1105 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
1106                                         struct dlb2_hw_domain *domain)
1107 {
1108         struct dlb2_list_entry *iter;
1109         struct dlb2_ldb_port *port;
1110         int i;
1111         RTE_SET_USED(iter);
1112
1113         if (!domain->configured || domain->num_pending_removals == 0)
1114                 return 0;
1115
1116         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1117                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
1118                         dlb2_domain_finish_unmap_port(hw, domain, port);
1119         }
1120
1121         return domain->num_pending_removals;
1122 }
1123
1124 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
1125 {
1126         int i, num = 0;
1127
1128         /* Finish queue unmap jobs for any domain that needs it */
1129         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
1130                 struct dlb2_hw_domain *domain = &hw->domains[i];
1131
1132                 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
1133         }
1134
1135         return num;
1136 }
1137
1138 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
1139 {
1140         int i, num = 0;
1141
1142         /* Finish queue map jobs for any domain that needs it */
1143         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
1144                 struct dlb2_hw_domain *domain = &hw->domains[i];
1145
1146                 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
1147         }
1148
1149         return num;
1150 }
1151
1152
1153 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
1154                                      struct dlb2_hw_domain *domain,
1155                                      struct dlb2_ldb_queue *queue,
1156                                      struct dlb2_create_ldb_queue_args *args,
1157                                      bool vdev_req,
1158                                      unsigned int vdev_id)
1159 {
1160         union dlb2_sys_vf_ldb_vqid_v r0 = { {0} };
1161         union dlb2_sys_vf_ldb_vqid2qid r1 = { {0} };
1162         union dlb2_sys_ldb_qid2vqid r2 = { {0} };
1163         union dlb2_sys_ldb_vasqid_v r3 = { {0} };
1164         union dlb2_lsp_qid_ldb_infl_lim r4 = { {0} };
1165         union dlb2_lsp_qid_aqed_active_lim r5 = { {0} };
1166         union dlb2_aqed_pipe_qid_hid_width r6 = { {0} };
1167         union dlb2_sys_ldb_qid_its r7 = { {0} };
1168         union dlb2_lsp_qid_atm_depth_thrsh r8 = { {0} };
1169         union dlb2_lsp_qid_naldb_depth_thrsh r9 = { {0} };
1170         union dlb2_aqed_pipe_qid_fid_lim r10 = { {0} };
1171         union dlb2_chp_ord_qid_sn_map r11 = { {0} };
1172         union dlb2_sys_ldb_qid_cfg_v r12 = { {0} };
1173         union dlb2_sys_ldb_qid_v r13 = { {0} };
1174
1175         struct dlb2_sn_group *sn_group;
1176         unsigned int offs;
1177
1178         /* QID write permissions are turned on when the domain is started */
1179         r3.field.vasqid_v = 0;
1180
1181         offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
1182                 queue->id.phys_id;
1183
1184         DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r3.val);
1185
1186         /*
1187          * Unordered QIDs get 4K inflights, ordered get as many as the number
1188          * of sequence numbers.
1189          */
1190         r4.field.limit = args->num_qid_inflights;
1191
1192         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r4.val);
1193
1194         r5.field.limit = queue->aqed_limit;
1195
1196         if (r5.field.limit > DLB2_MAX_NUM_AQED_ENTRIES)
1197                 r5.field.limit = DLB2_MAX_NUM_AQED_ENTRIES;
1198
1199         DLB2_CSR_WR(hw,
1200                     DLB2_LSP_QID_AQED_ACTIVE_LIM(queue->id.phys_id),
1201                     r5.val);
1202
1203         switch (args->lock_id_comp_level) {
1204         case 64:
1205                 r6.field.compress_code = 1;
1206                 break;
1207         case 128:
1208                 r6.field.compress_code = 2;
1209                 break;
1210         case 256:
1211                 r6.field.compress_code = 3;
1212                 break;
1213         case 512:
1214                 r6.field.compress_code = 4;
1215                 break;
1216         case 1024:
1217                 r6.field.compress_code = 5;
1218                 break;
1219         case 2048:
1220                 r6.field.compress_code = 6;
1221                 break;
1222         case 4096:
1223                 r6.field.compress_code = 7;
1224                 break;
1225         case 0:
1226         case 65536:
1227                 r6.field.compress_code = 0;
1228         }
1229
1230         DLB2_CSR_WR(hw,
1231                     DLB2_AQED_PIPE_QID_HID_WIDTH(queue->id.phys_id),
1232                     r6.val);
1233
1234         /* Don't timestamp QEs that pass through this queue */
1235         r7.field.qid_its = 0;
1236
1237         DLB2_CSR_WR(hw,
1238                     DLB2_SYS_LDB_QID_ITS(queue->id.phys_id),
1239                     r7.val);
1240
1241         r8.field.thresh = args->depth_threshold;
1242
1243         DLB2_CSR_WR(hw,
1244                     DLB2_LSP_QID_ATM_DEPTH_THRSH(queue->id.phys_id),
1245                     r8.val);
1246
1247         r9.field.thresh = args->depth_threshold;
1248
1249         DLB2_CSR_WR(hw,
1250                     DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue->id.phys_id),
1251                     r9.val);
1252
1253         /*
1254          * This register limits the number of inflight flows a queue can have
1255          * at one time.  It has an upper bound of 2048, but can be
1256          * over-subscribed. 512 is chosen so that a single queue doesn't use
1257          * the entire atomic storage, but can use a substantial portion if
1258          * needed.
1259          */
1260         r10.field.qid_fid_limit = 512;
1261
1262         DLB2_CSR_WR(hw,
1263                     DLB2_AQED_PIPE_QID_FID_LIM(queue->id.phys_id),
1264                     r10.val);
1265
1266         /* Configure SNs */
1267         sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
1268         r11.field.mode = sn_group->mode;
1269         r11.field.slot = queue->sn_slot;
1270         r11.field.grp  = sn_group->id;
1271
1272         DLB2_CSR_WR(hw, DLB2_CHP_ORD_QID_SN_MAP(queue->id.phys_id), r11.val);
1273
1274         r12.field.sn_cfg_v = (args->num_sequence_numbers != 0);
1275         r12.field.fid_cfg_v = (args->num_atomic_inflights != 0);
1276
1277         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), r12.val);
1278
1279         if (vdev_req) {
1280                 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
1281
1282                 r0.field.vqid_v = 1;
1283
1284                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), r0.val);
1285
1286                 r1.field.qid = queue->id.phys_id;
1287
1288                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), r1.val);
1289
1290                 r2.field.vqid = queue->id.virt_id;
1291
1292                 DLB2_CSR_WR(hw,
1293                             DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
1294                             r2.val);
1295         }
1296
1297         r13.field.qid_v = 1;
1298
1299         DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), r13.val);
1300 }
1301
1302 static int
1303 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
1304                                   struct dlb2_ldb_queue *queue,
1305                                   struct dlb2_create_ldb_queue_args *args)
1306 {
1307         int slot = -1;
1308         int i;
1309
1310         queue->sn_cfg_valid = false;
1311
1312         if (args->num_sequence_numbers == 0)
1313                 return 0;
1314
1315         for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
1316                 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
1317
1318                 if (group->sequence_numbers_per_queue ==
1319                     args->num_sequence_numbers &&
1320                     !dlb2_sn_group_full(group)) {
1321                         slot = dlb2_sn_group_alloc_slot(group);
1322                         if (slot >= 0)
1323                                 break;
1324                 }
1325         }
1326
1327         if (slot == -1) {
1328                 DLB2_HW_ERR(hw,
1329                             "[%s():%d] Internal error: no sequence number slots available\n",
1330                             __func__, __LINE__);
1331                 return -EFAULT;
1332         }
1333
1334         queue->sn_cfg_valid = true;
1335         queue->sn_group = i;
1336         queue->sn_slot = slot;
1337         return 0;
1338 }
1339
1340 static int
1341 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
1342                                 struct dlb2_hw_domain *domain,
1343                                 struct dlb2_ldb_queue *queue,
1344                                 struct dlb2_create_ldb_queue_args *args)
1345 {
1346         int ret;
1347
1348         ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
1349         if (ret)
1350                 return ret;
1351
1352         /* Attach QID inflights */
1353         queue->num_qid_inflights = args->num_qid_inflights;
1354
1355         /* Attach atomic inflights */
1356         queue->aqed_limit = args->num_atomic_inflights;
1357
1358         domain->num_avail_aqed_entries -= args->num_atomic_inflights;
1359         domain->num_used_aqed_entries += args->num_atomic_inflights;
1360
1361         return 0;
1362 }
1363
1364 static int
1365 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
1366                                   u32 domain_id,
1367                                   struct dlb2_create_ldb_queue_args *args,
1368                                   struct dlb2_cmd_response *resp,
1369                                   bool vdev_req,
1370                                   unsigned int vdev_id)
1371 {
1372         struct dlb2_hw_domain *domain;
1373         int i;
1374
1375         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1376
1377         if (domain == NULL) {
1378                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1379                 return -EINVAL;
1380         }
1381
1382         if (!domain->configured) {
1383                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
1384                 return -EINVAL;
1385         }
1386
1387         if (domain->started) {
1388                 resp->status = DLB2_ST_DOMAIN_STARTED;
1389                 return -EINVAL;
1390         }
1391
1392         if (dlb2_list_empty(&domain->avail_ldb_queues)) {
1393                 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
1394                 return -EINVAL;
1395         }
1396
1397         if (args->num_sequence_numbers) {
1398                 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
1399                         struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
1400
1401                         if (group->sequence_numbers_per_queue ==
1402                             args->num_sequence_numbers &&
1403                             !dlb2_sn_group_full(group))
1404                                 break;
1405                 }
1406
1407                 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
1408                         resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
1409                         return -EINVAL;
1410                 }
1411         }
1412
1413         if (args->num_qid_inflights > 4096) {
1414                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
1415                 return -EINVAL;
1416         }
1417
1418         /* Inflights must be <= number of sequence numbers if ordered */
1419         if (args->num_sequence_numbers != 0 &&
1420             args->num_qid_inflights > args->num_sequence_numbers) {
1421                 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
1422                 return -EINVAL;
1423         }
1424
1425         if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
1426                 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
1427                 return -EINVAL;
1428         }
1429
1430         if (args->num_atomic_inflights &&
1431             args->lock_id_comp_level != 0 &&
1432             args->lock_id_comp_level != 64 &&
1433             args->lock_id_comp_level != 128 &&
1434             args->lock_id_comp_level != 256 &&
1435             args->lock_id_comp_level != 512 &&
1436             args->lock_id_comp_level != 1024 &&
1437             args->lock_id_comp_level != 2048 &&
1438             args->lock_id_comp_level != 4096 &&
1439             args->lock_id_comp_level != 65536) {
1440                 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
1441                 return -EINVAL;
1442         }
1443
1444         return 0;
1445 }
1446
1447 static void
1448 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
1449                                u32 domain_id,
1450                                struct dlb2_create_ldb_queue_args *args,
1451                                bool vdev_req,
1452                                unsigned int vdev_id)
1453 {
1454         DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
1455         if (vdev_req)
1456                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
1457         DLB2_HW_DBG(hw, "\tDomain ID:                  %d\n",
1458                     domain_id);
1459         DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
1460                     args->num_sequence_numbers);
1461         DLB2_HW_DBG(hw, "\tNumber of QID inflights:    %d\n",
1462                     args->num_qid_inflights);
1463         DLB2_HW_DBG(hw, "\tNumber of ATM inflights:    %d\n",
1464                     args->num_atomic_inflights);
1465 }
1466
1467 /**
1468  * dlb2_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.
1469  * @hw: Contains the current state of the DLB2 hardware.
1470  * @domain_id: Domain ID
1471  * @args: User-provided arguments.
1472  * @resp: Response to user.
1473  * @vdev_req: Request came from a virtual device.
1474  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
1475  *
1476  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
1477  * satisfy a request, resp->status will be set accordingly.
1478  */
1479 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
1480                              u32 domain_id,
1481                              struct dlb2_create_ldb_queue_args *args,
1482                              struct dlb2_cmd_response *resp,
1483                              bool vdev_req,
1484                              unsigned int vdev_id)
1485 {
1486         struct dlb2_hw_domain *domain;
1487         struct dlb2_ldb_queue *queue;
1488         int ret;
1489
1490         dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
1491
1492         /*
1493          * Verify that hardware resources are available before attempting to
1494          * satisfy the request. This simplifies the error unwinding code.
1495          */
1496         ret = dlb2_verify_create_ldb_queue_args(hw,
1497                                                 domain_id,
1498                                                 args,
1499                                                 resp,
1500                                                 vdev_req,
1501                                                 vdev_id);
1502         if (ret)
1503                 return ret;
1504
1505         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1506         if (domain == NULL) {
1507                 DLB2_HW_ERR(hw,
1508                             "[%s():%d] Internal error: domain not found\n",
1509                             __func__, __LINE__);
1510                 return -EFAULT;
1511         }
1512
1513         queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
1514         if (queue == NULL) {
1515                 DLB2_HW_ERR(hw,
1516                             "[%s():%d] Internal error: no available ldb queues\n",
1517                             __func__, __LINE__);
1518                 return -EFAULT;
1519         }
1520
1521         ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
1522         if (ret < 0) {
1523                 DLB2_HW_ERR(hw,
1524                             "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
1525                             __func__, __LINE__);
1526                 return ret;
1527         }
1528
1529         dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
1530
1531         queue->num_mappings = 0;
1532
1533         queue->configured = true;
1534
1535         /*
1536          * Configuration succeeded, so move the resource from the 'avail' to
1537          * the 'used' list.
1538          */
1539         dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
1540
1541         dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
1542
1543         resp->status = 0;
1544         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
1545
1546         return 0;
1547 }
1548
1549 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id)
1550 {
1551         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1552                 return -EINVAL;
1553
1554         return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
1555 }
1556
1557 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw,
1558                                              unsigned int group_id)
1559 {
1560         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1561                 return -EINVAL;
1562
1563         return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
1564 }
1565
1566 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
1567                                                 unsigned int group_id,
1568                                                 unsigned long val)
1569 {
1570         DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
1571         DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
1572         DLB2_HW_DBG(hw, "\tValue:    %lu\n", val);
1573 }
1574
1575 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
1576                                     unsigned int group_id,
1577                                     unsigned long val)
1578 {
1579         u32 valid_allocations[] = {64, 128, 256, 512, 1024};
1580         union dlb2_ro_pipe_grp_sn_mode r0 = { {0} };
1581         struct dlb2_sn_group *group;
1582         int mode;
1583
1584         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1585                 return -EINVAL;
1586
1587         group = &hw->rsrcs.sn_groups[group_id];
1588
1589         /*
1590          * Once the first load-balanced queue using an SN group is configured,
1591          * the group cannot be changed.
1592          */
1593         if (group->slot_use_bitmap != 0)
1594                 return -EPERM;
1595
1596         for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
1597                 if (val == valid_allocations[mode])
1598                         break;
1599
1600         if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
1601                 return -EINVAL;
1602
1603         group->mode = mode;
1604         group->sequence_numbers_per_queue = val;
1605
1606         r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
1607         r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
1608
1609         DLB2_CSR_WR(hw, DLB2_RO_PIPE_GRP_SN_MODE, r0.val);
1610
1611         dlb2_log_set_group_sequence_numbers(hw, group_id, val);
1612
1613         return 0;
1614 }
1615
1616 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
1617                                        struct dlb2_hw_domain *domain,
1618                                        struct dlb2_ldb_port *port,
1619                                        bool vdev_req,
1620                                        unsigned int vdev_id)
1621 {
1622         union dlb2_sys_ldb_pp2vas r0 = { {0} };
1623         union dlb2_sys_ldb_pp_v r4 = { {0} };
1624
1625         r0.field.vas = domain->id.phys_id;
1626
1627         DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), r0.val);
1628
1629         if (vdev_req) {
1630                 union dlb2_sys_vf_ldb_vpp2pp r1 = { {0} };
1631                 union dlb2_sys_ldb_pp2vdev r2 = { {0} };
1632                 union dlb2_sys_vf_ldb_vpp_v r3 = { {0} };
1633                 unsigned int offs;
1634                 u32 virt_id;
1635
1636                 /*
1637                  * DLB uses producer port address bits 17:12 to determine the
1638                  * producer port ID. In Scalable IOV mode, PP accesses come
1639                  * through the PF MMIO window for the physical producer port,
1640                  * so for translation purposes the virtual and physical port
1641                  * IDs are equal.
1642                  */
1643                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
1644                         virt_id = port->id.virt_id;
1645                 else
1646                         virt_id = port->id.phys_id;
1647
1648                 r1.field.pp = port->id.phys_id;
1649
1650                 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
1651
1652                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), r1.val);
1653
1654                 r2.field.vdev = vdev_id;
1655
1656                 DLB2_CSR_WR(hw,
1657                             DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
1658                             r2.val);
1659
1660                 r3.field.vpp_v = 1;
1661
1662                 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r3.val);
1663         }
1664
1665         r4.field.pp_v = 1;
1666
1667         DLB2_CSR_WR(hw,
1668                     DLB2_SYS_LDB_PP_V(port->id.phys_id),
1669                     r4.val);
1670 }
1671
1672 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
1673                                       struct dlb2_hw_domain *domain,
1674                                       struct dlb2_ldb_port *port,
1675                                       uintptr_t cq_dma_base,
1676                                       struct dlb2_create_ldb_port_args *args,
1677                                       bool vdev_req,
1678                                       unsigned int vdev_id)
1679 {
1680         union dlb2_sys_ldb_cq_addr_l r0 = { {0} };
1681         union dlb2_sys_ldb_cq_addr_u r1 = { {0} };
1682         union dlb2_sys_ldb_cq2vf_pf_ro r2 = { {0} };
1683         union dlb2_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
1684         union dlb2_lsp_cq_ldb_tkn_depth_sel r4 = { {0} };
1685         union dlb2_chp_hist_list_lim r5 = { {0} };
1686         union dlb2_chp_hist_list_base r6 = { {0} };
1687         union dlb2_lsp_cq_ldb_infl_lim r7 = { {0} };
1688         union dlb2_chp_hist_list_push_ptr r8 = { {0} };
1689         union dlb2_chp_hist_list_pop_ptr r9 = { {0} };
1690         union dlb2_sys_ldb_cq_at r10 = { {0} };
1691         union dlb2_sys_ldb_cq_pasid r11 = { {0} };
1692         union dlb2_chp_ldb_cq2vas r12 = { {0} };
1693         union dlb2_lsp_cq2priov r13 = { {0} };
1694
1695         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
1696         r0.field.addr_l = cq_dma_base >> 6;
1697
1698         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), r0.val);
1699
1700         r1.field.addr_u = cq_dma_base >> 32;
1701
1702         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), r1.val);
1703
1704         /*
1705          * 'ro' == relaxed ordering. This setting allows DLB2 to write
1706          * cache lines out-of-order (but QEs within a cache line are always
1707          * updated in-order).
1708          */
1709         r2.field.vf = vdev_id;
1710         r2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);
1711         r2.field.ro = 1;
1712
1713         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), r2.val);
1714
1715         if (args->cq_depth <= 8) {
1716                 r3.field.token_depth_select = 1;
1717         } else if (args->cq_depth == 16) {
1718                 r3.field.token_depth_select = 2;
1719         } else if (args->cq_depth == 32) {
1720                 r3.field.token_depth_select = 3;
1721         } else if (args->cq_depth == 64) {
1722                 r3.field.token_depth_select = 4;
1723         } else if (args->cq_depth == 128) {
1724                 r3.field.token_depth_select = 5;
1725         } else if (args->cq_depth == 256) {
1726                 r3.field.token_depth_select = 6;
1727         } else if (args->cq_depth == 512) {
1728                 r3.field.token_depth_select = 7;
1729         } else if (args->cq_depth == 1024) {
1730                 r3.field.token_depth_select = 8;
1731         } else {
1732                 DLB2_HW_ERR(hw,
1733                             "[%s():%d] Internal error: invalid CQ depth\n",
1734                             __func__, __LINE__);
1735                 return -EFAULT;
1736         }
1737
1738         DLB2_CSR_WR(hw,
1739                     DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
1740                     r3.val);
1741
1742         /*
1743          * To support CQs with depth less than 8, program the token count
1744          * register with a non-zero initial value. Operations such as domain
1745          * reset must take this initial value into account when quiescing the
1746          * CQ.
1747          */
1748         port->init_tkn_cnt = 0;
1749
1750         if (args->cq_depth < 8) {
1751                 union dlb2_lsp_cq_ldb_tkn_cnt r14 = { {0} };
1752
1753                 port->init_tkn_cnt = 8 - args->cq_depth;
1754
1755                 r14.field.token_count = port->init_tkn_cnt;
1756
1757                 DLB2_CSR_WR(hw,
1758                             DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
1759                             r14.val);
1760         } else {
1761                 DLB2_CSR_WR(hw,
1762                             DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
1763                             DLB2_LSP_CQ_LDB_TKN_CNT_RST);
1764         }
1765
1766         r4.field.token_depth_select = r3.field.token_depth_select;
1767         r4.field.ignore_depth = 0;
1768
1769         DLB2_CSR_WR(hw,
1770                     DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
1771                     r4.val);
1772
1773         /* Reset the CQ write pointer */
1774         DLB2_CSR_WR(hw,
1775                     DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
1776                     DLB2_CHP_LDB_CQ_WPTR_RST);
1777
1778         r5.field.limit = port->hist_list_entry_limit - 1;
1779
1780         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(port->id.phys_id), r5.val);
1781
1782         r6.field.base = port->hist_list_entry_base;
1783
1784         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_BASE(port->id.phys_id), r6.val);
1785
1786         /*
1787          * The inflight limit sets a cap on the number of QEs for which this CQ
1788          * can owe completions at one time.
1789          */
1790         r7.field.limit = args->cq_history_list_size;
1791
1792         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id), r7.val);
1793
1794         r8.field.push_ptr = r6.field.base;
1795         r8.field.generation = 0;
1796
1797         DLB2_CSR_WR(hw,
1798                     DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
1799                     r8.val);
1800
1801         r9.field.pop_ptr = r6.field.base;
1802         r9.field.generation = 0;
1803
1804         DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id), r9.val);
1805
1806         /*
1807          * Address translation (AT) settings: 0: untranslated, 2: translated
1808          * (see ATS spec regarding Address Type field for more details)
1809          */
1810         r10.field.cq_at = 0;
1811
1812         DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), r10.val);
1813
1814         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
1815                 r11.field.pasid = hw->pasid[vdev_id];
1816                 r11.field.fmt2 = 1;
1817         }
1818
1819         DLB2_CSR_WR(hw,
1820                     DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
1821                     r11.val);
1822
1823         r12.field.cq2vas = domain->id.phys_id;
1824
1825         DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(port->id.phys_id), r12.val);
1826
1827         /* Disable the port's QID mappings */
1828         r13.field.v = 0;
1829
1830         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r13.val);
1831
1832         return 0;
1833 }
1834
1835 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
1836                                    struct dlb2_hw_domain *domain,
1837                                    struct dlb2_ldb_port *port,
1838                                    uintptr_t cq_dma_base,
1839                                    struct dlb2_create_ldb_port_args *args,
1840                                    bool vdev_req,
1841                                    unsigned int vdev_id)
1842 {
1843         int ret, i;
1844
1845         port->hist_list_entry_base = domain->hist_list_entry_base +
1846                                      domain->hist_list_entry_offset;
1847         port->hist_list_entry_limit = port->hist_list_entry_base +
1848                                       args->cq_history_list_size;
1849
1850         domain->hist_list_entry_offset += args->cq_history_list_size;
1851         domain->avail_hist_list_entries -= args->cq_history_list_size;
1852
1853         ret = dlb2_ldb_port_configure_cq(hw,
1854                                          domain,
1855                                          port,
1856                                          cq_dma_base,
1857                                          args,
1858                                          vdev_req,
1859                                          vdev_id);
1860         if (ret < 0)
1861                 return ret;
1862
1863         dlb2_ldb_port_configure_pp(hw,
1864                                    domain,
1865                                    port,
1866                                    vdev_req,
1867                                    vdev_id);
1868
1869         dlb2_ldb_port_cq_enable(hw, port);
1870
1871         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
1872                 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
1873         port->num_mappings = 0;
1874
1875         port->enabled = true;
1876
1877         port->configured = true;
1878
1879         return 0;
1880 }
1881
1882 static void
1883 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
1884                               u32 domain_id,
1885                               uintptr_t cq_dma_base,
1886                               struct dlb2_create_ldb_port_args *args,
1887                               bool vdev_req,
1888                               unsigned int vdev_id)
1889 {
1890         DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
1891         if (vdev_req)
1892                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
1893         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
1894                     domain_id);
1895         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
1896                     args->cq_depth);
1897         DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
1898                     args->cq_history_list_size);
1899         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
1900                     cq_dma_base);
1901         DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
1902         DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
1903                     args->cos_strict);
1904 }
1905
1906 static int
1907 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
1908                                  u32 domain_id,
1909                                  uintptr_t cq_dma_base,
1910                                  struct dlb2_create_ldb_port_args *args,
1911                                  struct dlb2_cmd_response *resp,
1912                                  bool vdev_req,
1913                                  unsigned int vdev_id)
1914 {
1915         struct dlb2_hw_domain *domain;
1916         int i;
1917
1918         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1919
1920         if (domain == NULL) {
1921                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1922                 return -EINVAL;
1923         }
1924
1925         if (!domain->configured) {
1926                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
1927                 return -EINVAL;
1928         }
1929
1930         if (domain->started) {
1931                 resp->status = DLB2_ST_DOMAIN_STARTED;
1932                 return -EINVAL;
1933         }
1934
1935         if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
1936                 resp->status = DLB2_ST_INVALID_COS_ID;
1937                 return -EINVAL;
1938         }
1939
1940         if (args->cos_strict) {
1941                 if (dlb2_list_empty(&domain->avail_ldb_ports[args->cos_id])) {
1942                         resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
1943                         return -EINVAL;
1944                 }
1945         } else {
1946                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1947                         if (!dlb2_list_empty(&domain->avail_ldb_ports[i]))
1948                                 break;
1949                 }
1950
1951                 if (i == DLB2_NUM_COS_DOMAINS) {
1952                         resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
1953                         return -EINVAL;
1954                 }
1955         }
1956
1957         /* Check cache-line alignment */
1958         if ((cq_dma_base & 0x3F) != 0) {
1959                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
1960                 return -EINVAL;
1961         }
1962
1963         if (args->cq_depth != 1 &&
1964             args->cq_depth != 2 &&
1965             args->cq_depth != 4 &&
1966             args->cq_depth != 8 &&
1967             args->cq_depth != 16 &&
1968             args->cq_depth != 32 &&
1969             args->cq_depth != 64 &&
1970             args->cq_depth != 128 &&
1971             args->cq_depth != 256 &&
1972             args->cq_depth != 512 &&
1973             args->cq_depth != 1024) {
1974                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
1975                 return -EINVAL;
1976         }
1977
1978         /* The history list size must be >= 1 */
1979         if (!args->cq_history_list_size) {
1980                 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
1981                 return -EINVAL;
1982         }
1983
1984         if (args->cq_history_list_size > domain->avail_hist_list_entries) {
1985                 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
1986                 return -EINVAL;
1987         }
1988
1989         return 0;
1990 }
1991
1992
1993 /**
1994  * dlb2_hw_create_ldb_port() - Allocate and initialize a load-balanced port and
1995  *      its resources.
1996  * @hw: Contains the current state of the DLB2 hardware.
1997  * @domain_id: Domain ID
1998  * @args: User-provided arguments.
1999  * @cq_dma_base: Base DMA address for consumer queue memory
2000  * @resp: Response to user.
2001  * @vdev_req: Request came from a virtual device.
2002  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
2003  *
2004  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
2005  * satisfy a request, resp->status will be set accordingly.
2006  */
2007 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
2008                             u32 domain_id,
2009                             struct dlb2_create_ldb_port_args *args,
2010                             uintptr_t cq_dma_base,
2011                             struct dlb2_cmd_response *resp,
2012                             bool vdev_req,
2013                             unsigned int vdev_id)
2014 {
2015         struct dlb2_hw_domain *domain;
2016         struct dlb2_ldb_port *port;
2017         int ret, cos_id, i;
2018
2019         dlb2_log_create_ldb_port_args(hw,
2020                                       domain_id,
2021                                       cq_dma_base,
2022                                       args,
2023                                       vdev_req,
2024                                       vdev_id);
2025
2026         /*
2027          * Verify that hardware resources are available before attempting to
2028          * satisfy the request. This simplifies the error unwinding code.
2029          */
2030         ret = dlb2_verify_create_ldb_port_args(hw,
2031                                                domain_id,
2032                                                cq_dma_base,
2033                                                args,
2034                                                resp,
2035                                                vdev_req,
2036                                                vdev_id);
2037         if (ret)
2038                 return ret;
2039
2040         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2041         if (domain == NULL) {
2042                 DLB2_HW_ERR(hw,
2043                             "[%s():%d] Internal error: domain not found\n",
2044                             __func__, __LINE__);
2045                 return -EFAULT;
2046         }
2047
2048         if (args->cos_strict) {
2049                 cos_id = args->cos_id;
2050
2051                 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[cos_id],
2052                                           typeof(*port));
2053         } else {
2054                 int idx;
2055
2056                 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2057                         idx = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
2058
2059                         port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[idx],
2060                                                   typeof(*port));
2061                         if (port)
2062                                 break;
2063                 }
2064
2065                 cos_id = idx;
2066         }
2067
2068         if (port == NULL) {
2069                 DLB2_HW_ERR(hw,
2070                             "[%s():%d] Internal error: no available ldb ports\n",
2071                             __func__, __LINE__);
2072                 return -EFAULT;
2073         }
2074
2075         if (port->configured) {
2076                 DLB2_HW_ERR(hw,
2077                             "[%s()] Internal error: avail_ldb_ports contains configured ports.\n",
2078                             __func__);
2079                 return -EFAULT;
2080         }
2081
2082         ret = dlb2_configure_ldb_port(hw,
2083                                       domain,
2084                                       port,
2085                                       cq_dma_base,
2086                                       args,
2087                                       vdev_req,
2088                                       vdev_id);
2089         if (ret < 0)
2090                 return ret;
2091
2092         /*
2093          * Configuration succeeded, so move the resource from the 'avail' to
2094          * the 'used' list.
2095          */
2096         dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
2097
2098         dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
2099
2100         resp->status = 0;
2101         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
2102
2103         return 0;
2104 }
2105
2106 static void
2107 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
2108                               u32 domain_id,
2109                               uintptr_t cq_dma_base,
2110                               struct dlb2_create_dir_port_args *args,
2111                               bool vdev_req,
2112                               unsigned int vdev_id)
2113 {
2114         DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
2115         if (vdev_req)
2116                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2117         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
2118                     domain_id);
2119         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
2120                     args->cq_depth);
2121         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
2122                     cq_dma_base);
2123 }
2124
2125 static struct dlb2_dir_pq_pair *
2126 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
2127                             u32 id,
2128                             bool vdev_req,
2129                             struct dlb2_hw_domain *domain)
2130 {
2131         struct dlb2_list_entry *iter;
2132         struct dlb2_dir_pq_pair *port;
2133         RTE_SET_USED(iter);
2134
2135         if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
2136                 return NULL;
2137
2138         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2139                 if ((!vdev_req && port->id.phys_id == id) ||
2140                     (vdev_req && port->id.virt_id == id))
2141                         return port;
2142
2143         return NULL;
2144 }
2145
2146 static int
2147 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
2148                                  u32 domain_id,
2149                                  uintptr_t cq_dma_base,
2150                                  struct dlb2_create_dir_port_args *args,
2151                                  struct dlb2_cmd_response *resp,
2152                                  bool vdev_req,
2153                                  unsigned int vdev_id)
2154 {
2155         struct dlb2_hw_domain *domain;
2156
2157         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2158
2159         if (domain == NULL) {
2160                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2161                 return -EINVAL;
2162         }
2163
2164         if (!domain->configured) {
2165                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
2166                 return -EINVAL;
2167         }
2168
2169         if (domain->started) {
2170                 resp->status = DLB2_ST_DOMAIN_STARTED;
2171                 return -EINVAL;
2172         }
2173
2174         /*
2175          * If the user claims the queue is already configured, validate
2176          * the queue ID, its domain, and whether the queue is configured.
2177          */
2178         if (args->queue_id != -1) {
2179                 struct dlb2_dir_pq_pair *queue;
2180
2181                 queue = dlb2_get_domain_used_dir_pq(hw,
2182                                                     args->queue_id,
2183                                                     vdev_req,
2184                                                     domain);
2185
2186                 if (queue == NULL || queue->domain_id.phys_id !=
2187                                 domain->id.phys_id ||
2188                                 !queue->queue_configured) {
2189                         resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
2190                         return -EINVAL;
2191                 }
2192         }
2193
2194         /*
2195          * If the port's queue is not configured, validate that a free
2196          * port-queue pair is available.
2197          */
2198         if (args->queue_id == -1 &&
2199             dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
2200                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
2201                 return -EINVAL;
2202         }
2203
2204         /* Check cache-line alignment */
2205         if ((cq_dma_base & 0x3F) != 0) {
2206                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
2207                 return -EINVAL;
2208         }
2209
2210         if (args->cq_depth != 1 &&
2211             args->cq_depth != 2 &&
2212             args->cq_depth != 4 &&
2213             args->cq_depth != 8 &&
2214             args->cq_depth != 16 &&
2215             args->cq_depth != 32 &&
2216             args->cq_depth != 64 &&
2217             args->cq_depth != 128 &&
2218             args->cq_depth != 256 &&
2219             args->cq_depth != 512 &&
2220             args->cq_depth != 1024) {
2221                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
2222                 return -EINVAL;
2223         }
2224
2225         return 0;
2226 }
2227
2228 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
2229                                        struct dlb2_hw_domain *domain,
2230                                        struct dlb2_dir_pq_pair *port,
2231                                        bool vdev_req,
2232                                        unsigned int vdev_id)
2233 {
2234         union dlb2_sys_dir_pp2vas r0 = { {0} };
2235         union dlb2_sys_dir_pp_v r4 = { {0} };
2236
2237         r0.field.vas = domain->id.phys_id;
2238
2239         DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), r0.val);
2240
2241         if (vdev_req) {
2242                 union dlb2_sys_vf_dir_vpp2pp r1 = { {0} };
2243                 union dlb2_sys_dir_pp2vdev r2 = { {0} };
2244                 union dlb2_sys_vf_dir_vpp_v r3 = { {0} };
2245                 unsigned int offs;
2246                 u32 virt_id;
2247
2248                 /*
2249                  * DLB uses producer port address bits 17:12 to determine the
2250                  * producer port ID. In Scalable IOV mode, PP accesses come
2251                  * through the PF MMIO window for the physical producer port,
2252                  * so for translation purposes the virtual and physical port
2253                  * IDs are equal.
2254                  */
2255                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2256                         virt_id = port->id.virt_id;
2257                 else
2258                         virt_id = port->id.phys_id;
2259
2260                 r1.field.pp = port->id.phys_id;
2261
2262                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2263
2264                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), r1.val);
2265
2266                 r2.field.vdev = vdev_id;
2267
2268                 DLB2_CSR_WR(hw,
2269                             DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
2270                             r2.val);
2271
2272                 r3.field.vpp_v = 1;
2273
2274                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r3.val);
2275         }
2276
2277         r4.field.pp_v = 1;
2278
2279         DLB2_CSR_WR(hw,
2280                     DLB2_SYS_DIR_PP_V(port->id.phys_id),
2281                     r4.val);
2282 }
2283
2284 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
2285                                       struct dlb2_hw_domain *domain,
2286                                       struct dlb2_dir_pq_pair *port,
2287                                       uintptr_t cq_dma_base,
2288                                       struct dlb2_create_dir_port_args *args,
2289                                       bool vdev_req,
2290                                       unsigned int vdev_id)
2291 {
2292         union dlb2_sys_dir_cq_addr_l r0 = { {0} };
2293         union dlb2_sys_dir_cq_addr_u r1 = { {0} };
2294         union dlb2_sys_dir_cq2vf_pf_ro r2 = { {0} };
2295         union dlb2_chp_dir_cq_tkn_depth_sel r3 = { {0} };
2296         union dlb2_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
2297         union dlb2_sys_dir_cq_fmt r9 = { {0} };
2298         union dlb2_sys_dir_cq_at r10 = { {0} };
2299         union dlb2_sys_dir_cq_pasid r11 = { {0} };
2300         union dlb2_chp_dir_cq2vas r12 = { {0} };
2301
2302         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
2303         r0.field.addr_l = cq_dma_base >> 6;
2304
2305         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), r0.val);
2306
2307         r1.field.addr_u = cq_dma_base >> 32;
2308
2309         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), r1.val);
2310
2311         /*
2312          * 'ro' == relaxed ordering. This setting allows DLB2 to write
2313          * cache lines out-of-order (but QEs within a cache line are always
2314          * updated in-order).
2315          */
2316         r2.field.vf = vdev_id;
2317         r2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);
2318         r2.field.ro = 1;
2319
2320         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), r2.val);
2321
2322         if (args->cq_depth <= 8) {
2323                 r3.field.token_depth_select = 1;
2324         } else if (args->cq_depth == 16) {
2325                 r3.field.token_depth_select = 2;
2326         } else if (args->cq_depth == 32) {
2327                 r3.field.token_depth_select = 3;
2328         } else if (args->cq_depth == 64) {
2329                 r3.field.token_depth_select = 4;
2330         } else if (args->cq_depth == 128) {
2331                 r3.field.token_depth_select = 5;
2332         } else if (args->cq_depth == 256) {
2333                 r3.field.token_depth_select = 6;
2334         } else if (args->cq_depth == 512) {
2335                 r3.field.token_depth_select = 7;
2336         } else if (args->cq_depth == 1024) {
2337                 r3.field.token_depth_select = 8;
2338         } else {
2339                 DLB2_HW_ERR(hw,
2340                             "[%s():%d] Internal error: invalid CQ depth\n",
2341                             __func__, __LINE__);
2342                 return -EFAULT;
2343         }
2344
2345         DLB2_CSR_WR(hw,
2346                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2347                     r3.val);
2348
2349         /*
2350          * To support CQs with depth less than 8, program the token count
2351          * register with a non-zero initial value. Operations such as domain
2352          * reset must take this initial value into account when quiescing the
2353          * CQ.
2354          */
2355         port->init_tkn_cnt = 0;
2356
2357         if (args->cq_depth < 8) {
2358                 union dlb2_lsp_cq_dir_tkn_cnt r13 = { {0} };
2359
2360                 port->init_tkn_cnt = 8 - args->cq_depth;
2361
2362                 r13.field.count = port->init_tkn_cnt;
2363
2364                 DLB2_CSR_WR(hw,
2365                             DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
2366                             r13.val);
2367         } else {
2368                 DLB2_CSR_WR(hw,
2369                             DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
2370                             DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2371         }
2372
2373         r4.field.token_depth_select = r3.field.token_depth_select;
2374         r4.field.disable_wb_opt = 0;
2375         r4.field.ignore_depth = 0;
2376
2377         DLB2_CSR_WR(hw,
2378                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
2379                     r4.val);
2380
2381         /* Reset the CQ write pointer */
2382         DLB2_CSR_WR(hw,
2383                     DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
2384                     DLB2_CHP_DIR_CQ_WPTR_RST);
2385
2386         /* Virtualize the PPID */
2387         r9.field.keep_pf_ppid = 0;
2388
2389         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), r9.val);
2390
2391         /*
2392          * Address translation (AT) settings: 0: untranslated, 2: translated
2393          * (see ATS spec regarding Address Type field for more details)
2394          */
2395         r10.field.cq_at = 0;
2396
2397         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), r10.val);
2398
2399         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
2400                 r11.field.pasid = hw->pasid[vdev_id];
2401                 r11.field.fmt2 = 1;
2402         }
2403
2404         DLB2_CSR_WR(hw,
2405                     DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
2406                     r11.val);
2407
2408         r12.field.cq2vas = domain->id.phys_id;
2409
2410         DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(port->id.phys_id), r12.val);
2411
2412         return 0;
2413 }
2414
2415 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
2416                                    struct dlb2_hw_domain *domain,
2417                                    struct dlb2_dir_pq_pair *port,
2418                                    uintptr_t cq_dma_base,
2419                                    struct dlb2_create_dir_port_args *args,
2420                                    bool vdev_req,
2421                                    unsigned int vdev_id)
2422 {
2423         int ret;
2424
2425         ret = dlb2_dir_port_configure_cq(hw,
2426                                          domain,
2427                                          port,
2428                                          cq_dma_base,
2429                                          args,
2430                                          vdev_req,
2431                                          vdev_id);
2432
2433         if (ret < 0)
2434                 return ret;
2435
2436         dlb2_dir_port_configure_pp(hw,
2437                                    domain,
2438                                    port,
2439                                    vdev_req,
2440                                    vdev_id);
2441
2442         dlb2_dir_port_cq_enable(hw, port);
2443
2444         port->enabled = true;
2445
2446         port->port_configured = true;
2447
2448         return 0;
2449 }
2450
2451 /**
2452  * dlb2_hw_create_dir_port() - Allocate and initialize a DLB directed port
2453  *      and queue. The port/queue pair have the same ID and name.
2454  * @hw: Contains the current state of the DLB2 hardware.
2455  * @domain_id: Domain ID
2456  * @args: User-provided arguments.
2457  * @cq_dma_base: Base DMA address for consumer queue memory
2458  * @resp: Response to user.
2459  * @vdev_req: Request came from a virtual device.
2460  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
2461  *
2462  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
2463  * satisfy a request, resp->status will be set accordingly.
2464  */
2465 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
2466                             u32 domain_id,
2467                             struct dlb2_create_dir_port_args *args,
2468                             uintptr_t cq_dma_base,
2469                             struct dlb2_cmd_response *resp,
2470                             bool vdev_req,
2471                             unsigned int vdev_id)
2472 {
2473         struct dlb2_dir_pq_pair *port;
2474         struct dlb2_hw_domain *domain;
2475         int ret;
2476
2477         dlb2_log_create_dir_port_args(hw,
2478                                       domain_id,
2479                                       cq_dma_base,
2480                                       args,
2481                                       vdev_req,
2482                                       vdev_id);
2483
2484         /*
2485          * Verify that hardware resources are available before attempting to
2486          * satisfy the request. This simplifies the error unwinding code.
2487          */
2488         ret = dlb2_verify_create_dir_port_args(hw,
2489                                                domain_id,
2490                                                cq_dma_base,
2491                                                args,
2492                                                resp,
2493                                                vdev_req,
2494                                                vdev_id);
2495         if (ret)
2496                 return ret;
2497
2498         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2499
2500         if (args->queue_id != -1)
2501                 port = dlb2_get_domain_used_dir_pq(hw,
2502                                                    args->queue_id,
2503                                                    vdev_req,
2504                                                    domain);
2505         else
2506                 port = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
2507                                           typeof(*port));
2508         if (port == NULL) {
2509                 DLB2_HW_ERR(hw,
2510                             "[%s():%d] Internal error: no available dir ports\n",
2511                             __func__, __LINE__);
2512                 return -EFAULT;
2513         }
2514
2515         ret = dlb2_configure_dir_port(hw,
2516                                       domain,
2517                                       port,
2518                                       cq_dma_base,
2519                                       args,
2520                                       vdev_req,
2521                                       vdev_id);
2522         if (ret < 0)
2523                 return ret;
2524
2525         /*
2526          * Configuration succeeded, so move the resource from the 'avail' to
2527          * the 'used' list (if it's not already there).
2528          */
2529         if (args->queue_id == -1) {
2530                 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
2531
2532                 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
2533         }
2534
2535         resp->status = 0;
2536         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
2537
2538         return 0;
2539 }
2540
2541 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
2542                                      struct dlb2_hw_domain *domain,
2543                                      struct dlb2_dir_pq_pair *queue,
2544                                      struct dlb2_create_dir_queue_args *args,
2545                                      bool vdev_req,
2546                                      unsigned int vdev_id)
2547 {
2548         union dlb2_sys_dir_vasqid_v r0 = { {0} };
2549         union dlb2_sys_dir_qid_its r1 = { {0} };
2550         union dlb2_lsp_qid_dir_depth_thrsh r2 = { {0} };
2551         union dlb2_sys_dir_qid_v r5 = { {0} };
2552
2553         unsigned int offs;
2554
2555         /* QID write permissions are turned on when the domain is started */
2556         r0.field.vasqid_v = 0;
2557
2558         offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
2559                 queue->id.phys_id;
2560
2561         DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
2562
2563         /* Don't timestamp QEs that pass through this queue */
2564         r1.field.qid_its = 0;
2565
2566         DLB2_CSR_WR(hw,
2567                     DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
2568                     r1.val);
2569
2570         r2.field.thresh = args->depth_threshold;
2571
2572         DLB2_CSR_WR(hw,
2573                     DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
2574                     r2.val);
2575
2576         if (vdev_req) {
2577                 union dlb2_sys_vf_dir_vqid_v r3 = { {0} };
2578                 union dlb2_sys_vf_dir_vqid2qid r4 = { {0} };
2579
2580                 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver)
2581                         + queue->id.virt_id;
2582
2583                 r3.field.vqid_v = 1;
2584
2585                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), r3.val);
2586
2587                 r4.field.qid = queue->id.phys_id;
2588
2589                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), r4.val);
2590         }
2591
2592         r5.field.qid_v = 1;
2593
2594         DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), r5.val);
2595
2596         queue->queue_configured = true;
2597 }
2598
2599 static void
2600 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
2601                                u32 domain_id,
2602                                struct dlb2_create_dir_queue_args *args,
2603                                bool vdev_req,
2604                                unsigned int vdev_id)
2605 {
2606         DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
2607         if (vdev_req)
2608                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2609         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2610         DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
2611 }
2612
2613 static int
2614 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
2615                                   u32 domain_id,
2616                                   struct dlb2_create_dir_queue_args *args,
2617                                   struct dlb2_cmd_response *resp,
2618                                   bool vdev_req,
2619                                   unsigned int vdev_id)
2620 {
2621         struct dlb2_hw_domain *domain;
2622
2623         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2624
2625         if (domain == NULL) {
2626                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2627                 return -EINVAL;
2628         }
2629
2630         if (!domain->configured) {
2631                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
2632                 return -EINVAL;
2633         }
2634
2635         if (domain->started) {
2636                 resp->status = DLB2_ST_DOMAIN_STARTED;
2637                 return -EINVAL;
2638         }
2639
2640         /*
2641          * If the user claims the port is already configured, validate the port
2642          * ID, its domain, and whether the port is configured.
2643          */
2644         if (args->port_id != -1) {
2645                 struct dlb2_dir_pq_pair *port;
2646
2647                 port = dlb2_get_domain_used_dir_pq(hw,
2648                                                    args->port_id,
2649                                                    vdev_req,
2650                                                    domain);
2651
2652                 if (port == NULL || port->domain_id.phys_id !=
2653                                 domain->id.phys_id || !port->port_configured) {
2654                         resp->status = DLB2_ST_INVALID_PORT_ID;
2655                         return -EINVAL;
2656                 }
2657         }
2658
2659         /*
2660          * If the queue's port is not configured, validate that a free
2661          * port-queue pair is available.
2662          */
2663         if (args->port_id == -1 &&
2664             dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
2665                 resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
2666                 return -EINVAL;
2667         }
2668
2669         return 0;
2670 }
2671
2672 /**
2673  * dlb2_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
2674  * @hw: Contains the current state of the DLB2 hardware.
2675  * @domain_id: Domain ID
2676  * @args: User-provided arguments.
2677  * @resp: Response to user.
2678  * @vdev_req: Request came from a virtual device.
2679  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
2680  *
2681  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
2682  * satisfy a request, resp->status will be set accordingly.
2683  */
2684 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
2685                              u32 domain_id,
2686                              struct dlb2_create_dir_queue_args *args,
2687                              struct dlb2_cmd_response *resp,
2688                              bool vdev_req,
2689                              unsigned int vdev_id)
2690 {
2691         struct dlb2_dir_pq_pair *queue;
2692         struct dlb2_hw_domain *domain;
2693         int ret;
2694
2695         dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
2696
2697         /*
2698          * Verify that hardware resources are available before attempting to
2699          * satisfy the request. This simplifies the error unwinding code.
2700          */
2701         ret = dlb2_verify_create_dir_queue_args(hw,
2702                                                 domain_id,
2703                                                 args,
2704                                                 resp,
2705                                                 vdev_req,
2706                                                 vdev_id);
2707         if (ret)
2708                 return ret;
2709
2710         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2711         if (domain == NULL) {
2712                 DLB2_HW_ERR(hw,
2713                             "[%s():%d] Internal error: domain not found\n",
2714                             __func__, __LINE__);
2715                 return -EFAULT;
2716         }
2717
2718         if (args->port_id != -1)
2719                 queue = dlb2_get_domain_used_dir_pq(hw,
2720                                                     args->port_id,
2721                                                     vdev_req,
2722                                                     domain);
2723         else
2724                 queue = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
2725                                            typeof(*queue));
2726         if (queue == NULL) {
2727                 DLB2_HW_ERR(hw,
2728                             "[%s():%d] Internal error: no available dir queues\n",
2729                             __func__, __LINE__);
2730                 return -EFAULT;
2731         }
2732
2733         dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
2734
2735         /*
2736          * Configuration succeeded, so move the resource from the 'avail' to
2737          * the 'used' list (if it's not already there).
2738          */
2739         if (args->port_id == -1) {
2740                 dlb2_list_del(&domain->avail_dir_pq_pairs,
2741                               &queue->domain_list);
2742
2743                 dlb2_list_add(&domain->used_dir_pq_pairs,
2744                               &queue->domain_list);
2745         }
2746
2747         resp->status = 0;
2748
2749         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
2750
2751         return 0;
2752 }
2753
2754 static bool
2755 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
2756                                            struct dlb2_ldb_queue *queue,
2757                                            int *slot)
2758 {
2759         int i;
2760
2761         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2762                 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
2763
2764                 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
2765                     map->pending_qid == queue->id.phys_id)
2766                         break;
2767         }
2768
2769         *slot = i;
2770
2771         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
2772 }
2773
2774 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
2775                                               struct dlb2_ldb_port *port,
2776                                               int slot,
2777                                               struct dlb2_map_qid_args *args)
2778 {
2779         union dlb2_lsp_cq2priov r0;
2780
2781         /* Read-modify-write the priority and valid bit register */
2782         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id));
2783
2784         r0.field.v |= 1 << slot;
2785         r0.field.prio |= (args->priority & 0x7) << slot * 3;
2786
2787         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r0.val);
2788
2789         dlb2_flush_csr(hw);
2790
2791         port->qid_map[slot].priority = args->priority;
2792 }
2793
2794 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
2795                                               struct dlb2_ldb_queue *queue,
2796                                               struct dlb2_cmd_response *resp)
2797 {
2798         enum dlb2_qid_map_state state;
2799         int i;
2800
2801         /* Unused slot available? */
2802         if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
2803                 return 0;
2804
2805         /*
2806          * If the queue is already mapped (from the application's perspective),
2807          * this is simply a priority update.
2808          */
2809         state = DLB2_QUEUE_MAPPED;
2810         if (dlb2_port_find_slot_queue(port, state, queue, &i))
2811                 return 0;
2812
2813         state = DLB2_QUEUE_MAP_IN_PROG;
2814         if (dlb2_port_find_slot_queue(port, state, queue, &i))
2815                 return 0;
2816
2817         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
2818                 return 0;
2819
2820         /*
2821          * If the slot contains an unmap in progress, it's considered
2822          * available.
2823          */
2824         state = DLB2_QUEUE_UNMAP_IN_PROG;
2825         if (dlb2_port_find_slot(port, state, &i))
2826                 return 0;
2827
2828         state = DLB2_QUEUE_UNMAPPED;
2829         if (dlb2_port_find_slot(port, state, &i))
2830                 return 0;
2831
2832         resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
2833         return -EINVAL;
2834 }
2835
2836 static struct dlb2_ldb_queue *
2837 dlb2_get_domain_ldb_queue(u32 id,
2838                           bool vdev_req,
2839                           struct dlb2_hw_domain *domain)
2840 {
2841         struct dlb2_list_entry *iter;
2842         struct dlb2_ldb_queue *queue;
2843         RTE_SET_USED(iter);
2844
2845         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
2846                 return NULL;
2847
2848         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
2849                 if ((!vdev_req && queue->id.phys_id == id) ||
2850                     (vdev_req && queue->id.virt_id == id))
2851                         return queue;
2852
2853         return NULL;
2854 }
2855
2856 static struct dlb2_ldb_port *
2857 dlb2_get_domain_used_ldb_port(u32 id,
2858                               bool vdev_req,
2859                               struct dlb2_hw_domain *domain)
2860 {
2861         struct dlb2_list_entry *iter;
2862         struct dlb2_ldb_port *port;
2863         int i;
2864         RTE_SET_USED(iter);
2865
2866         if (id >= DLB2_MAX_NUM_LDB_PORTS)
2867                 return NULL;
2868
2869         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2870                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2871                         if ((!vdev_req && port->id.phys_id == id) ||
2872                             (vdev_req && port->id.virt_id == id))
2873                                 return port;
2874
2875                 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter)
2876                         if ((!vdev_req && port->id.phys_id == id) ||
2877                             (vdev_req && port->id.virt_id == id))
2878                                 return port;
2879         }
2880
2881         return NULL;
2882 }
2883
2884 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
2885                                     u32 domain_id,
2886                                     struct dlb2_map_qid_args *args,
2887                                     struct dlb2_cmd_response *resp,
2888                                     bool vdev_req,
2889                                     unsigned int vdev_id)
2890 {
2891         struct dlb2_hw_domain *domain;
2892         struct dlb2_ldb_port *port;
2893         struct dlb2_ldb_queue *queue;
2894         int id;
2895
2896         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2897
2898         if (domain == NULL) {
2899                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2900                 return -EINVAL;
2901         }
2902
2903         if (!domain->configured) {
2904                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
2905                 return -EINVAL;
2906         }
2907
2908         id = args->port_id;
2909
2910         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
2911
2912         if (port == NULL || !port->configured) {
2913                 resp->status = DLB2_ST_INVALID_PORT_ID;
2914                 return -EINVAL;
2915         }
2916
2917         if (args->priority >= DLB2_QID_PRIORITIES) {
2918                 resp->status = DLB2_ST_INVALID_PRIORITY;
2919                 return -EINVAL;
2920         }
2921
2922         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
2923
2924         if (queue == NULL || !queue->configured) {
2925                 resp->status = DLB2_ST_INVALID_QID;
2926                 return -EINVAL;
2927         }
2928
2929         if (queue->domain_id.phys_id != domain->id.phys_id) {
2930                 resp->status = DLB2_ST_INVALID_QID;
2931                 return -EINVAL;
2932         }
2933
2934         if (port->domain_id.phys_id != domain->id.phys_id) {
2935                 resp->status = DLB2_ST_INVALID_PORT_ID;
2936                 return -EINVAL;
2937         }
2938
2939         return 0;
2940 }
2941
2942 static void dlb2_log_map_qid(struct dlb2_hw *hw,
2943                              u32 domain_id,
2944                              struct dlb2_map_qid_args *args,
2945                              bool vdev_req,
2946                              unsigned int vdev_id)
2947 {
2948         DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
2949         if (vdev_req)
2950                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2951         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
2952                     domain_id);
2953         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
2954                     args->port_id);
2955         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
2956                     args->qid);
2957         DLB2_HW_DBG(hw, "\tPriority:  %d\n",
2958                     args->priority);
2959 }
2960
2961 int dlb2_hw_map_qid(struct dlb2_hw *hw,
2962                     u32 domain_id,
2963                     struct dlb2_map_qid_args *args,
2964                     struct dlb2_cmd_response *resp,
2965                     bool vdev_req,
2966                     unsigned int vdev_id)
2967 {
2968         struct dlb2_hw_domain *domain;
2969         struct dlb2_ldb_queue *queue;
2970         enum dlb2_qid_map_state st;
2971         struct dlb2_ldb_port *port;
2972         int ret, i, id;
2973         u8 prio;
2974
2975         dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
2976
2977         /*
2978          * Verify that hardware resources are available before attempting to
2979          * satisfy the request. This simplifies the error unwinding code.
2980          */
2981         ret = dlb2_verify_map_qid_args(hw,
2982                                        domain_id,
2983                                        args,
2984                                        resp,
2985                                        vdev_req,
2986                                        vdev_id);
2987         if (ret)
2988                 return ret;
2989
2990         prio = args->priority;
2991
2992         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2993         if (domain == NULL) {
2994                 DLB2_HW_ERR(hw,
2995                             "[%s():%d] Internal error: domain not found\n",
2996                             __func__, __LINE__);
2997                 return -EFAULT;
2998         }
2999
3000         id = args->port_id;
3001
3002         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
3003         if (port == NULL) {
3004                 DLB2_HW_ERR(hw,
3005                             "[%s():%d] Internal error: port not found\n",
3006                             __func__, __LINE__);
3007                 return -EFAULT;
3008         }
3009
3010         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
3011         if (queue == NULL) {
3012                 DLB2_HW_ERR(hw,
3013                             "[%s():%d] Internal error: queue not found\n",
3014                             __func__, __LINE__);
3015                 return -EFAULT;
3016         }
3017
3018         /*
3019          * If there are any outstanding detach operations for this port,
3020          * attempt to complete them. This may be necessary to free up a QID
3021          * slot for this requested mapping.
3022          */
3023         if (port->num_pending_removals)
3024                 dlb2_domain_finish_unmap_port(hw, domain, port);
3025
3026         ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
3027         if (ret)
3028                 return ret;
3029
3030         /* Hardware requires disabling the CQ before mapping QIDs. */
3031         if (port->enabled)
3032                 dlb2_ldb_port_cq_disable(hw, port);
3033
3034         /*
3035          * If this is only a priority change, don't perform the full QID->CQ
3036          * mapping procedure
3037          */
3038         st = DLB2_QUEUE_MAPPED;
3039         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
3040                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3041                         DLB2_HW_ERR(hw,
3042                                     "[%s():%d] Internal error: port slot tracking failed\n",
3043                                     __func__, __LINE__);
3044                         return -EFAULT;
3045                 }
3046
3047                 if (prio != port->qid_map[i].priority) {
3048                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
3049                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
3050                 }
3051
3052                 st = DLB2_QUEUE_MAPPED;
3053                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
3054                 if (ret)
3055                         return ret;
3056
3057                 goto map_qid_done;
3058         }
3059
3060         st = DLB2_QUEUE_UNMAP_IN_PROG;
3061         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
3062                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3063                         DLB2_HW_ERR(hw,
3064                                     "[%s():%d] Internal error: port slot tracking failed\n",
3065                                     __func__, __LINE__);
3066                         return -EFAULT;
3067                 }
3068
3069                 if (prio != port->qid_map[i].priority) {
3070                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
3071                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
3072                 }
3073
3074                 st = DLB2_QUEUE_MAPPED;
3075                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
3076                 if (ret)
3077                         return ret;
3078
3079                 goto map_qid_done;
3080         }
3081
3082         /*
3083          * If this is a priority change on an in-progress mapping, don't
3084          * perform the full QID->CQ mapping procedure.
3085          */
3086         st = DLB2_QUEUE_MAP_IN_PROG;
3087         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
3088                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3089                         DLB2_HW_ERR(hw,
3090                                     "[%s():%d] Internal error: port slot tracking failed\n",
3091                                     __func__, __LINE__);
3092                         return -EFAULT;
3093                 }
3094
3095                 port->qid_map[i].priority = prio;
3096
3097                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
3098
3099                 goto map_qid_done;
3100         }
3101
3102         /*
3103          * If this is a priority change on a pending mapping, update the
3104          * pending priority
3105          */
3106         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
3107                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3108                         DLB2_HW_ERR(hw,
3109                                     "[%s():%d] Internal error: port slot tracking failed\n",
3110                                     __func__, __LINE__);
3111                         return -EFAULT;
3112                 }
3113
3114                 port->qid_map[i].pending_priority = prio;
3115
3116                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
3117
3118                 goto map_qid_done;
3119         }
3120
3121         /*
3122          * If all the CQ's slots are in use, then there's an unmap in progress
3123          * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
3124          * mapping to pending_map and return. When the removal is completed for
3125          * the slot's current occupant, this mapping will be performed.
3126          */
3127         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
3128                 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
3129                         enum dlb2_qid_map_state st;
3130
3131                         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3132                                 DLB2_HW_ERR(hw,
3133                                             "[%s():%d] Internal error: port slot tracking failed\n",
3134                                             __func__, __LINE__);
3135                                 return -EFAULT;
3136                         }
3137
3138                         port->qid_map[i].pending_qid = queue->id.phys_id;
3139                         port->qid_map[i].pending_priority = prio;
3140
3141                         st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
3142
3143                         ret = dlb2_port_slot_state_transition(hw, port, queue,
3144                                                               i, st);
3145                         if (ret)
3146                                 return ret;
3147
3148                         DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
3149
3150                         goto map_qid_done;
3151                 }
3152         }
3153
3154         /*
3155          * If the domain has started, a special "dynamic" CQ->queue mapping
3156          * procedure is required in order to safely update the CQ<->QID tables.
3157          * The "static" procedure cannot be used when traffic is flowing,
3158          * because the CQ<->QID tables cannot be updated atomically and the
3159          * scheduler won't see the new mapping unless the queue's if_status
3160          * changes, which isn't guaranteed.
3161          */
3162         ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
3163
3164         /* If ret is less than zero, it's due to an internal error */
3165         if (ret < 0)
3166                 return ret;
3167
3168 map_qid_done:
3169         if (port->enabled)
3170                 dlb2_ldb_port_cq_enable(hw, port);
3171
3172         resp->status = 0;
3173
3174         return 0;
3175 }
3176
3177 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
3178                                u32 domain_id,
3179                                struct dlb2_unmap_qid_args *args,
3180                                bool vdev_req,
3181                                unsigned int vdev_id)
3182 {
3183         DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
3184         if (vdev_req)
3185                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3186         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
3187                     domain_id);
3188         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
3189                     args->port_id);
3190         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
3191                     args->qid);
3192         if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
3193                 DLB2_HW_DBG(hw, "\tQueue's num mappings:  %d\n",
3194                             hw->rsrcs.ldb_queues[args->qid].num_mappings);
3195 }
3196
3197 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
3198                                       u32 domain_id,
3199                                       struct dlb2_unmap_qid_args *args,
3200                                       struct dlb2_cmd_response *resp,
3201                                       bool vdev_req,
3202                                       unsigned int vdev_id)
3203 {
3204         enum dlb2_qid_map_state state;
3205         struct dlb2_hw_domain *domain;
3206         struct dlb2_ldb_queue *queue;
3207         struct dlb2_ldb_port *port;
3208         int slot;
3209         int id;
3210
3211         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3212
3213         if (domain == NULL) {
3214                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3215                 return -EINVAL;
3216         }
3217
3218         if (!domain->configured) {
3219                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3220                 return -EINVAL;
3221         }
3222
3223         id = args->port_id;
3224
3225         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
3226
3227         if (port == NULL || !port->configured) {
3228                 resp->status = DLB2_ST_INVALID_PORT_ID;
3229                 return -EINVAL;
3230         }
3231
3232         if (port->domain_id.phys_id != domain->id.phys_id) {
3233                 resp->status = DLB2_ST_INVALID_PORT_ID;
3234                 return -EINVAL;
3235         }
3236
3237         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
3238
3239         if (queue == NULL || !queue->configured) {
3240                 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
3241                             __func__, args->qid);
3242                 resp->status = DLB2_ST_INVALID_QID;
3243                 return -EINVAL;
3244         }
3245
3246         /*
3247          * Verify that the port has the queue mapped. From the application's
3248          * perspective a queue is mapped if it is actually mapped, the map is
3249          * in progress, or the map is blocked pending an unmap.
3250          */
3251         state = DLB2_QUEUE_MAPPED;
3252         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
3253                 return 0;
3254
3255         state = DLB2_QUEUE_MAP_IN_PROG;
3256         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
3257                 return 0;
3258
3259         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
3260                 return 0;
3261
3262         resp->status = DLB2_ST_INVALID_QID;
3263         return -EINVAL;
3264 }
3265
3266 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
3267                       u32 domain_id,
3268                       struct dlb2_unmap_qid_args *args,
3269                       struct dlb2_cmd_response *resp,
3270                       bool vdev_req,
3271                       unsigned int vdev_id)
3272 {
3273         struct dlb2_hw_domain *domain;
3274         struct dlb2_ldb_queue *queue;
3275         enum dlb2_qid_map_state st;
3276         struct dlb2_ldb_port *port;
3277         bool unmap_complete;
3278         int i, ret, id;
3279
3280         dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
3281
3282         /*
3283          * Verify that hardware resources are available before attempting to
3284          * satisfy the request. This simplifies the error unwinding code.
3285          */
3286         ret = dlb2_verify_unmap_qid_args(hw,
3287                                          domain_id,
3288                                          args,
3289                                          resp,
3290                                          vdev_req,
3291                                          vdev_id);
3292         if (ret)
3293                 return ret;
3294
3295         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3296         if (domain == NULL) {
3297                 DLB2_HW_ERR(hw,
3298                             "[%s():%d] Internal error: domain not found\n",
3299                             __func__, __LINE__);
3300                 return -EFAULT;
3301         }
3302
3303         id = args->port_id;
3304
3305         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
3306         if (port == NULL) {
3307                 DLB2_HW_ERR(hw,
3308                             "[%s():%d] Internal error: port not found\n",
3309                             __func__, __LINE__);
3310                 return -EFAULT;
3311         }
3312
3313         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
3314         if (queue == NULL) {
3315                 DLB2_HW_ERR(hw,
3316                             "[%s():%d] Internal error: queue not found\n",
3317                             __func__, __LINE__);
3318                 return -EFAULT;
3319         }
3320
3321         /*
3322          * If the queue hasn't been mapped yet, we need to update the slot's
3323          * state and re-enable the queue's inflights.
3324          */
3325         st = DLB2_QUEUE_MAP_IN_PROG;
3326         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
3327                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3328                         DLB2_HW_ERR(hw,
3329                                     "[%s():%d] Internal error: port slot tracking failed\n",
3330                                     __func__, __LINE__);
3331                         return -EFAULT;
3332                 }
3333
3334                 /*
3335                  * Since the in-progress map was aborted, re-enable the QID's
3336                  * inflights.
3337                  */
3338                 if (queue->num_pending_additions == 0)
3339                         dlb2_ldb_queue_set_inflight_limit(hw, queue);
3340
3341                 st = DLB2_QUEUE_UNMAPPED;
3342                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
3343                 if (ret)
3344                         return ret;
3345
3346                 goto unmap_qid_done;
3347         }
3348
3349         /*
3350          * If the queue mapping is on hold pending an unmap, we simply need to
3351          * update the slot's state.
3352          */
3353         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
3354                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3355                         DLB2_HW_ERR(hw,
3356                                     "[%s():%d] Internal error: port slot tracking failed\n",
3357                                     __func__, __LINE__);
3358                         return -EFAULT;
3359                 }
3360
3361                 st = DLB2_QUEUE_UNMAP_IN_PROG;
3362                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
3363                 if (ret)
3364                         return ret;
3365
3366                 goto unmap_qid_done;
3367         }
3368
3369         st = DLB2_QUEUE_MAPPED;
3370         if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
3371                 DLB2_HW_ERR(hw,
3372                             "[%s()] Internal error: no available CQ slots\n",
3373                             __func__);
3374                 return -EFAULT;
3375         }
3376
3377         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3378                 DLB2_HW_ERR(hw,
3379                             "[%s():%d] Internal error: port slot tracking failed\n",
3380                             __func__, __LINE__);
3381                 return -EFAULT;
3382         }
3383
3384         /*
3385          * QID->CQ mapping removal is an asynchronous procedure. It requires
3386          * stopping the DLB2 from scheduling this CQ, draining all inflights
3387          * from the CQ, then unmapping the queue from the CQ. This function
3388          * simply marks the port as needing the queue unmapped, and (if
3389          * necessary) starts the unmapping worker thread.
3390          */
3391         dlb2_ldb_port_cq_disable(hw, port);
3392
3393         st = DLB2_QUEUE_UNMAP_IN_PROG;
3394         ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
3395         if (ret)
3396                 return ret;
3397
3398         /*
3399          * Attempt to finish the unmapping now, in case the port has no
3400          * outstanding inflights. If that's not the case, this will fail and
3401          * the unmapping will be completed at a later time.
3402          */
3403         unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
3404
3405         /*
3406          * If the unmapping couldn't complete immediately, launch the worker
3407          * thread (if it isn't already launched) to finish it later.
3408          */
3409         if (!unmap_complete && !os_worker_active(hw))
3410                 os_schedule_work(hw);
3411
3412 unmap_qid_done:
3413         resp->status = 0;
3414
3415         return 0;
3416 }
3417
3418 static void
3419 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
3420                                   struct dlb2_pending_port_unmaps_args *args,
3421                                   bool vdev_req,
3422                                   unsigned int vdev_id)
3423 {
3424         DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
3425         if (vdev_req)
3426                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
3427         DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
3428 }
3429
3430 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
3431                                 u32 domain_id,
3432                                 struct dlb2_pending_port_unmaps_args *args,
3433                                 struct dlb2_cmd_response *resp,
3434                                 bool vdev_req,
3435                                 unsigned int vdev_id)
3436 {
3437         struct dlb2_hw_domain *domain;
3438         struct dlb2_ldb_port *port;
3439
3440         dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
3441
3442         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3443
3444         if (domain == NULL) {
3445                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3446                 return -EINVAL;
3447         }
3448
3449         port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
3450         if (port == NULL || !port->configured) {
3451                 resp->status = DLB2_ST_INVALID_PORT_ID;
3452                 return -EINVAL;
3453         }
3454
3455         resp->id = port->num_pending_removals;
3456
3457         return 0;
3458 }
3459
3460 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
3461                                          u32 domain_id,
3462                                          struct dlb2_cmd_response *resp,
3463                                          bool vdev_req,
3464                                          unsigned int vdev_id)
3465 {
3466         struct dlb2_hw_domain *domain;
3467
3468         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3469
3470         if (domain == NULL) {
3471                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3472                 return -EINVAL;
3473         }
3474
3475         if (!domain->configured) {
3476                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3477                 return -EINVAL;
3478         }
3479
3480         if (domain->started) {
3481                 resp->status = DLB2_ST_DOMAIN_STARTED;
3482                 return -EINVAL;
3483         }
3484
3485         return 0;
3486 }
3487
3488 static void dlb2_log_start_domain(struct dlb2_hw *hw,
3489                                   u32 domain_id,
3490                                   bool vdev_req,
3491                                   unsigned int vdev_id)
3492 {
3493         DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
3494         if (vdev_req)
3495                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3496         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
3497 }
3498
3499 /**
3500  * dlb2_hw_start_domain() - Lock the domain configuration
3501  * @hw: Contains the current state of the DLB2 hardware.
3502  * @domain_id: Domain ID
3503  * @arg: User-provided arguments (unused, here for ioctl callback template).
3504  * @resp: Response to user.
3505  * @vdev_req: Request came from a virtual device.
3506  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
3507  *
3508  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
3509  * satisfy a request, resp->status will be set accordingly.
3510  */
3511 int
3512 dlb2_hw_start_domain(struct dlb2_hw *hw,
3513                      u32 domain_id,
3514                      struct dlb2_start_domain_args *arg,
3515                      struct dlb2_cmd_response *resp,
3516                      bool vdev_req,
3517                      unsigned int vdev_id)
3518 {
3519         struct dlb2_list_entry *iter;
3520         struct dlb2_dir_pq_pair *dir_queue;
3521         struct dlb2_ldb_queue *ldb_queue;
3522         struct dlb2_hw_domain *domain;
3523         int ret;
3524         RTE_SET_USED(arg);
3525         RTE_SET_USED(iter);
3526
3527         dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
3528
3529         ret = dlb2_verify_start_domain_args(hw,
3530                                             domain_id,
3531                                             resp,
3532                                             vdev_req,
3533                                             vdev_id);
3534         if (ret)
3535                 return ret;
3536
3537         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3538         if (domain == NULL) {
3539                 DLB2_HW_ERR(hw,
3540                             "[%s():%d] Internal error: domain not found\n",
3541                             __func__, __LINE__);
3542                 return -EFAULT;
3543         }
3544
3545         /*
3546          * Enable load-balanced and directed queue write permissions for the
3547          * queues this domain owns. Without this, the DLB2 will drop all
3548          * incoming traffic to those queues.
3549          */
3550         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
3551                 union dlb2_sys_ldb_vasqid_v r0 = { {0} };
3552                 unsigned int offs;
3553
3554                 r0.field.vasqid_v = 1;
3555
3556                 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
3557                         ldb_queue->id.phys_id;
3558
3559                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val);
3560         }
3561
3562         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
3563                 union dlb2_sys_dir_vasqid_v r0 = { {0} };
3564                 unsigned int offs;
3565
3566                 r0.field.vasqid_v = 1;
3567
3568                 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3569                         dir_queue->id.phys_id;
3570
3571                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
3572         }
3573
3574         dlb2_flush_csr(hw);
3575
3576         domain->started = true;
3577
3578         resp->status = 0;
3579
3580         return 0;
3581 }
3582
3583 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
3584                                          u32 domain_id,
3585                                          u32 queue_id,
3586                                          bool vdev_req,
3587                                          unsigned int vf_id)
3588 {
3589         DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
3590         if (vdev_req)
3591                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
3592         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
3593         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
3594 }
3595
3596 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
3597                                 u32 domain_id,
3598                                 struct dlb2_get_dir_queue_depth_args *args,
3599                                 struct dlb2_cmd_response *resp,
3600                                 bool vdev_req,
3601                                 unsigned int vdev_id)
3602 {
3603         struct dlb2_dir_pq_pair *queue;
3604         struct dlb2_hw_domain *domain;
3605         int id;
3606
3607         id = domain_id;
3608
3609         dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
3610                                      vdev_req, vdev_id);
3611
3612         domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
3613         if (domain == NULL) {
3614                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3615                 return -EINVAL;
3616         }
3617
3618         id = args->queue_id;
3619
3620         queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
3621         if (queue == NULL) {
3622                 resp->status = DLB2_ST_INVALID_QID;
3623                 return -EINVAL;
3624         }
3625
3626         resp->id = dlb2_dir_queue_depth(hw, queue);
3627
3628         return 0;
3629 }
3630
3631 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
3632                                          u32 domain_id,
3633                                          u32 queue_id,
3634                                          bool vdev_req,
3635                                          unsigned int vf_id)
3636 {
3637         DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
3638         if (vdev_req)
3639                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
3640         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
3641         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
3642 }
3643
3644 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
3645                                 u32 domain_id,
3646                                 struct dlb2_get_ldb_queue_depth_args *args,
3647                                 struct dlb2_cmd_response *resp,
3648                                 bool vdev_req,
3649                                 unsigned int vdev_id)
3650 {
3651         struct dlb2_hw_domain *domain;
3652         struct dlb2_ldb_queue *queue;
3653
3654         dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
3655                                      vdev_req, vdev_id);
3656
3657         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3658         if (domain == NULL) {
3659                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3660                 return -EINVAL;
3661         }
3662
3663         queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
3664         if (queue == NULL) {
3665                 resp->status = DLB2_ST_INVALID_QID;
3666                 return -EINVAL;
3667         }
3668
3669         resp->id = dlb2_ldb_queue_depth(hw, queue);
3670
3671         return 0;
3672 }