event/dlb2: add v2.5 create LDB port
[dpdk.git] / drivers / event / dlb2 / pf / base / dlb2_resource.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include "dlb2_user.h"
6
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
13
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
16
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18         DLB2_LIST_HEAD((head), type, domain_list)
19
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21         DLB2_LIST_HEAD((head), type, func_list)
22
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24         DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
25
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27         DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
28
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
31
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
34
35 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
36 {
37         union dlb2_chp_cfg_chp_csr_ctrl r0;
38
39         r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
40
41         r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
42
43         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
44 }
45
46 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
47 {
48         union dlb2_chp_cfg_chp_csr_ctrl r0;
49
50         r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
51
52         r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
53
54         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
55 }
56
57 /*
58  * The PF driver cannot assume that a register write will affect subsequent HCW
59  * writes. To ensure a write completes, the driver must read back a CSR. This
60  * function only need be called for configuration that can occur after the
61  * domain has started; prior to starting, applications can't send HCWs.
62  */
63 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
64 {
65         DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
66 }
67
68 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
69                                     struct dlb2_dir_pq_pair *port)
70 {
71         union dlb2_lsp_cq_dir_dsbl reg;
72
73         reg.field.disabled = 0;
74
75         DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
76
77         dlb2_flush_csr(hw);
78 }
79
80 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
81                                 struct dlb2_dir_pq_pair *queue)
82 {
83         union dlb2_lsp_qid_dir_enqueue_cnt r0;
84
85         r0.val = DLB2_CSR_RD(hw,
86                              DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
87
88         return r0.field.count;
89 }
90
91 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
92                                     struct dlb2_ldb_port *port)
93 {
94         union dlb2_lsp_cq_ldb_dsbl reg;
95
96         /*
97          * Don't re-enable the port if a removal is pending. The caller should
98          * mark this port as enabled (if it isn't already), and when the
99          * removal completes the port will be enabled.
100          */
101         if (port->num_pending_removals)
102                 return;
103
104         reg.field.disabled = 0;
105
106         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
107
108         dlb2_flush_csr(hw);
109 }
110
111 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
112                                      struct dlb2_ldb_port *port)
113 {
114         union dlb2_lsp_cq_ldb_dsbl reg;
115
116         reg.field.disabled = 1;
117
118         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
119
120         dlb2_flush_csr(hw);
121 }
122
123 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
124                                 struct dlb2_ldb_queue *queue)
125 {
126         union dlb2_lsp_qid_aqed_active_cnt r0;
127         union dlb2_lsp_qid_atm_active r1;
128         union dlb2_lsp_qid_ldb_enqueue_cnt r2;
129
130         r0.val = DLB2_CSR_RD(hw,
131                              DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
132         r1.val = DLB2_CSR_RD(hw,
133                              DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
134
135         r2.val = DLB2_CSR_RD(hw,
136                              DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
137
138         return r0.field.count + r1.field.count + r2.field.count;
139 }
140
141 static struct dlb2_ldb_queue *
142 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
143                            u32 id,
144                            bool vdev_req,
145                            unsigned int vdev_id)
146 {
147         struct dlb2_list_entry *iter1;
148         struct dlb2_list_entry *iter2;
149         struct dlb2_function_resources *rsrcs;
150         struct dlb2_hw_domain *domain;
151         struct dlb2_ldb_queue *queue;
152         RTE_SET_USED(iter1);
153         RTE_SET_USED(iter2);
154
155         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
156                 return NULL;
157
158         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
159
160         if (!vdev_req)
161                 return &hw->rsrcs.ldb_queues[id];
162
163         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
164                 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
165                         if (queue->id.virt_id == id)
166                                 return queue;
167         }
168
169         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
170                 if (queue->id.virt_id == id)
171                         return queue;
172
173         return NULL;
174 }
175
176 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
177                                                       u32 id,
178                                                       bool vdev_req,
179                                                       unsigned int vdev_id)
180 {
181         struct dlb2_list_entry *iteration;
182         struct dlb2_function_resources *rsrcs;
183         struct dlb2_hw_domain *domain;
184         RTE_SET_USED(iteration);
185
186         if (id >= DLB2_MAX_NUM_DOMAINS)
187                 return NULL;
188
189         if (!vdev_req)
190                 return &hw->domains[id];
191
192         rsrcs = &hw->vdev[vdev_id];
193
194         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
195                 if (domain->id.virt_id == id)
196                         return domain;
197
198         return NULL;
199 }
200
201 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
202                                            struct dlb2_ldb_port *port,
203                                            struct dlb2_ldb_queue *queue,
204                                            int slot,
205                                            enum dlb2_qid_map_state new_state)
206 {
207         enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
208         struct dlb2_hw_domain *domain;
209         int domain_id;
210
211         domain_id = port->domain_id.phys_id;
212
213         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
214         if (domain == NULL) {
215                 DLB2_HW_ERR(hw,
216                             "[%s()] Internal error: unable to find domain %d\n",
217                             __func__, domain_id);
218                 return -EINVAL;
219         }
220
221         switch (curr_state) {
222         case DLB2_QUEUE_UNMAPPED:
223                 switch (new_state) {
224                 case DLB2_QUEUE_MAPPED:
225                         queue->num_mappings++;
226                         port->num_mappings++;
227                         break;
228                 case DLB2_QUEUE_MAP_IN_PROG:
229                         queue->num_pending_additions++;
230                         domain->num_pending_additions++;
231                         break;
232                 default:
233                         goto error;
234                 }
235                 break;
236         case DLB2_QUEUE_MAPPED:
237                 switch (new_state) {
238                 case DLB2_QUEUE_UNMAPPED:
239                         queue->num_mappings--;
240                         port->num_mappings--;
241                         break;
242                 case DLB2_QUEUE_UNMAP_IN_PROG:
243                         port->num_pending_removals++;
244                         domain->num_pending_removals++;
245                         break;
246                 case DLB2_QUEUE_MAPPED:
247                         /* Priority change, nothing to update */
248                         break;
249                 default:
250                         goto error;
251                 }
252                 break;
253         case DLB2_QUEUE_MAP_IN_PROG:
254                 switch (new_state) {
255                 case DLB2_QUEUE_UNMAPPED:
256                         queue->num_pending_additions--;
257                         domain->num_pending_additions--;
258                         break;
259                 case DLB2_QUEUE_MAPPED:
260                         queue->num_mappings++;
261                         port->num_mappings++;
262                         queue->num_pending_additions--;
263                         domain->num_pending_additions--;
264                         break;
265                 default:
266                         goto error;
267                 }
268                 break;
269         case DLB2_QUEUE_UNMAP_IN_PROG:
270                 switch (new_state) {
271                 case DLB2_QUEUE_UNMAPPED:
272                         port->num_pending_removals--;
273                         domain->num_pending_removals--;
274                         queue->num_mappings--;
275                         port->num_mappings--;
276                         break;
277                 case DLB2_QUEUE_MAPPED:
278                         port->num_pending_removals--;
279                         domain->num_pending_removals--;
280                         break;
281                 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
282                         /* Nothing to update */
283                         break;
284                 default:
285                         goto error;
286                 }
287                 break;
288         case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
289                 switch (new_state) {
290                 case DLB2_QUEUE_UNMAP_IN_PROG:
291                         /* Nothing to update */
292                         break;
293                 case DLB2_QUEUE_UNMAPPED:
294                         /*
295                          * An UNMAP_IN_PROG_PENDING_MAP slot briefly
296                          * becomes UNMAPPED before it transitions to
297                          * MAP_IN_PROG.
298                          */
299                         queue->num_mappings--;
300                         port->num_mappings--;
301                         port->num_pending_removals--;
302                         domain->num_pending_removals--;
303                         break;
304                 default:
305                         goto error;
306                 }
307                 break;
308         default:
309                 goto error;
310         }
311
312         port->qid_map[slot].state = new_state;
313
314         DLB2_HW_DBG(hw,
315                     "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
316                     __func__, queue->id.phys_id, port->id.phys_id,
317                     curr_state, new_state);
318         return 0;
319
320 error:
321         DLB2_HW_ERR(hw,
322                     "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
323                     __func__, queue->id.phys_id, port->id.phys_id,
324                     curr_state, new_state);
325         return -EFAULT;
326 }
327
328 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
329                                 enum dlb2_qid_map_state state,
330                                 int *slot)
331 {
332         int i;
333
334         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
335                 if (port->qid_map[i].state == state)
336                         break;
337         }
338
339         *slot = i;
340
341         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
342 }
343
344 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
345                                       enum dlb2_qid_map_state state,
346                                       struct dlb2_ldb_queue *queue,
347                                       int *slot)
348 {
349         int i;
350
351         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
352                 if (port->qid_map[i].state == state &&
353                     port->qid_map[i].qid == queue->id.phys_id)
354                         break;
355         }
356
357         *slot = i;
358
359         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
360 }
361
362 /*
363  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
364  * their function names imply, and should only be called by the dynamic CQ
365  * mapping code.
366  */
367 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
368                                               struct dlb2_hw_domain *domain,
369                                               struct dlb2_ldb_queue *queue)
370 {
371         struct dlb2_list_entry *iter;
372         struct dlb2_ldb_port *port;
373         int slot, i;
374         RTE_SET_USED(iter);
375
376         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
377                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
378                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
379
380                         if (!dlb2_port_find_slot_queue(port, state,
381                                                        queue, &slot))
382                                 continue;
383
384                         if (port->enabled)
385                                 dlb2_ldb_port_cq_disable(hw, port);
386                 }
387         }
388 }
389
390 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
391                                              struct dlb2_hw_domain *domain,
392                                              struct dlb2_ldb_queue *queue)
393 {
394         struct dlb2_list_entry *iter;
395         struct dlb2_ldb_port *port;
396         int slot, i;
397         RTE_SET_USED(iter);
398
399         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
400                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
401                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
402
403                         if (!dlb2_port_find_slot_queue(port, state,
404                                                        queue, &slot))
405                                 continue;
406
407                         if (port->enabled)
408                                 dlb2_ldb_port_cq_enable(hw, port);
409                 }
410         }
411 }
412
413 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
414                                                 struct dlb2_ldb_port *port,
415                                                 int slot)
416 {
417         union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
418
419         r0.field.cq = port->id.phys_id;
420         r0.field.qidix = slot;
421         r0.field.value = 0;
422         r0.field.inflight_ok_v = 1;
423
424         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
425
426         dlb2_flush_csr(hw);
427 }
428
429 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
430                                               struct dlb2_ldb_port *port,
431                                               int slot)
432 {
433         union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
434
435         r0.field.cq = port->id.phys_id;
436         r0.field.qidix = slot;
437         r0.field.value = 1;
438         r0.field.inflight_ok_v = 1;
439
440         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
441
442         dlb2_flush_csr(hw);
443 }
444
445 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
446                                         struct dlb2_ldb_port *p,
447                                         struct dlb2_ldb_queue *q,
448                                         u8 priority)
449 {
450         union dlb2_lsp_cq2priov r0;
451         union dlb2_lsp_cq2qid0 r1;
452         union dlb2_atm_qid2cqidix_00 r2;
453         union dlb2_lsp_qid2cqidix_00 r3;
454         union dlb2_lsp_qid2cqidix2_00 r4;
455         enum dlb2_qid_map_state state;
456         int i;
457
458         /* Look for a pending or already mapped slot, else an unused slot */
459         if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
460             !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
461             !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
462                 DLB2_HW_ERR(hw,
463                             "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
464                             __func__, __LINE__);
465                 return -EFAULT;
466         }
467
468         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
469                 DLB2_HW_ERR(hw,
470                             "[%s():%d] Internal error: port slot tracking failed\n",
471                             __func__, __LINE__);
472                 return -EFAULT;
473         }
474
475         /* Read-modify-write the priority and valid bit register */
476         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
477
478         r0.field.v |= 1 << i;
479         r0.field.prio |= (priority & 0x7) << i * 3;
480
481         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
482
483         /* Read-modify-write the QID map register */
484         if (i < 4)
485                 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
486         else
487                 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
488
489         if (i == 0 || i == 4)
490                 r1.field.qid_p0 = q->id.phys_id;
491         if (i == 1 || i == 5)
492                 r1.field.qid_p1 = q->id.phys_id;
493         if (i == 2 || i == 6)
494                 r1.field.qid_p2 = q->id.phys_id;
495         if (i == 3 || i == 7)
496                 r1.field.qid_p3 = q->id.phys_id;
497
498         if (i < 4)
499                 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
500         else
501                 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
502
503         r2.val = DLB2_CSR_RD(hw,
504                              DLB2_ATM_QID2CQIDIX(q->id.phys_id,
505                                                  p->id.phys_id / 4));
506
507         r3.val = DLB2_CSR_RD(hw,
508                              DLB2_LSP_QID2CQIDIX(q->id.phys_id,
509                                                  p->id.phys_id / 4));
510
511         r4.val = DLB2_CSR_RD(hw,
512                              DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
513                                                   p->id.phys_id / 4));
514
515         switch (p->id.phys_id % 4) {
516         case 0:
517                 r2.field.cq_p0 |= 1 << i;
518                 r3.field.cq_p0 |= 1 << i;
519                 r4.field.cq_p0 |= 1 << i;
520                 break;
521
522         case 1:
523                 r2.field.cq_p1 |= 1 << i;
524                 r3.field.cq_p1 |= 1 << i;
525                 r4.field.cq_p1 |= 1 << i;
526                 break;
527
528         case 2:
529                 r2.field.cq_p2 |= 1 << i;
530                 r3.field.cq_p2 |= 1 << i;
531                 r4.field.cq_p2 |= 1 << i;
532                 break;
533
534         case 3:
535                 r2.field.cq_p3 |= 1 << i;
536                 r3.field.cq_p3 |= 1 << i;
537                 r4.field.cq_p3 |= 1 << i;
538                 break;
539         }
540
541         DLB2_CSR_WR(hw,
542                     DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
543                     r2.val);
544
545         DLB2_CSR_WR(hw,
546                     DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
547                     r3.val);
548
549         DLB2_CSR_WR(hw,
550                     DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
551                     r4.val);
552
553         dlb2_flush_csr(hw);
554
555         p->qid_map[i].qid = q->id.phys_id;
556         p->qid_map[i].priority = priority;
557
558         state = DLB2_QUEUE_MAPPED;
559
560         return dlb2_port_slot_state_transition(hw, p, q, i, state);
561 }
562
563 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
564                                            struct dlb2_ldb_port *port,
565                                            struct dlb2_ldb_queue *queue,
566                                            int slot)
567 {
568         union dlb2_lsp_qid_aqed_active_cnt r0;
569         union dlb2_lsp_qid_ldb_enqueue_cnt r1;
570         union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
571
572         /* Set the atomic scheduling haswork bit */
573         r0.val = DLB2_CSR_RD(hw,
574                              DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
575
576         r2.field.cq = port->id.phys_id;
577         r2.field.qidix = slot;
578         r2.field.value = 1;
579         r2.field.rlist_haswork_v = r0.field.count > 0;
580
581         /* Set the non-atomic scheduling haswork bit */
582         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
583
584         r1.val = DLB2_CSR_RD(hw,
585                              DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
586
587         memset(&r2, 0, sizeof(r2));
588
589         r2.field.cq = port->id.phys_id;
590         r2.field.qidix = slot;
591         r2.field.value = 1;
592         r2.field.nalb_haswork_v = (r1.field.count > 0);
593
594         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
595
596         dlb2_flush_csr(hw);
597
598         return 0;
599 }
600
601 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
602                                               struct dlb2_ldb_port *port,
603                                               u8 slot)
604 {
605         union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
606
607         r2.field.cq = port->id.phys_id;
608         r2.field.qidix = slot;
609         r2.field.value = 0;
610         r2.field.rlist_haswork_v = 1;
611
612         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
613
614         memset(&r2, 0, sizeof(r2));
615
616         r2.field.cq = port->id.phys_id;
617         r2.field.qidix = slot;
618         r2.field.value = 0;
619         r2.field.nalb_haswork_v = 1;
620
621         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
622
623         dlb2_flush_csr(hw);
624 }
625
626 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
627                                               struct dlb2_ldb_queue *queue)
628 {
629         union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
630
631         r0.field.limit = queue->num_qid_inflights;
632
633         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
634 }
635
636 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
637                                                 struct dlb2_ldb_queue *queue)
638 {
639         DLB2_CSR_WR(hw,
640                     DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
641                     DLB2_LSP_QID_LDB_INFL_LIM_RST);
642 }
643
644 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
645                                                 struct dlb2_hw_domain *domain,
646                                                 struct dlb2_ldb_port *port,
647                                                 struct dlb2_ldb_queue *queue)
648 {
649         struct dlb2_list_entry *iter;
650         union dlb2_lsp_qid_ldb_infl_cnt r0;
651         enum dlb2_qid_map_state state;
652         int slot, ret, i;
653         u8 prio;
654         RTE_SET_USED(iter);
655
656         r0.val = DLB2_CSR_RD(hw,
657                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
658
659         if (r0.field.count) {
660                 DLB2_HW_ERR(hw,
661                             "[%s()] Internal error: non-zero QID inflight count\n",
662                             __func__);
663                 return -EINVAL;
664         }
665
666         /*
667          * Static map the port and set its corresponding has_work bits.
668          */
669         state = DLB2_QUEUE_MAP_IN_PROG;
670         if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
671                 return -EINVAL;
672
673         if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
674                 DLB2_HW_ERR(hw,
675                             "[%s():%d] Internal error: port slot tracking failed\n",
676                             __func__, __LINE__);
677                 return -EFAULT;
678         }
679
680         prio = port->qid_map[slot].priority;
681
682         /*
683          * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
684          * the port's qid_map state.
685          */
686         ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
687         if (ret)
688                 return ret;
689
690         ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
691         if (ret)
692                 return ret;
693
694         /*
695          * Ensure IF_status(cq,qid) is 0 before enabling the port to
696          * prevent spurious schedules to cause the queue's inflight
697          * count to increase.
698          */
699         dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
700
701         /* Reset the queue's inflight status */
702         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
703                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
704                         state = DLB2_QUEUE_MAPPED;
705                         if (!dlb2_port_find_slot_queue(port, state,
706                                                        queue, &slot))
707                                 continue;
708
709                         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
710                 }
711         }
712
713         dlb2_ldb_queue_set_inflight_limit(hw, queue);
714
715         /* Re-enable CQs mapped to this queue */
716         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
717
718         /* If this queue has other mappings pending, clear its inflight limit */
719         if (queue->num_pending_additions > 0)
720                 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
721
722         return 0;
723 }
724
725 /**
726  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
727  * @hw: dlb2_hw handle for a particular device.
728  * @port: load-balanced port
729  * @queue: load-balanced queue
730  * @priority: queue servicing priority
731  *
732  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
733  * at a later point, and <0 if an error occurred.
734  */
735 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
736                                          struct dlb2_ldb_port *port,
737                                          struct dlb2_ldb_queue *queue,
738                                          u8 priority)
739 {
740         union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
741         enum dlb2_qid_map_state state;
742         struct dlb2_hw_domain *domain;
743         int domain_id, slot, ret;
744
745         domain_id = port->domain_id.phys_id;
746
747         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
748         if (domain == NULL) {
749                 DLB2_HW_ERR(hw,
750                             "[%s()] Internal error: unable to find domain %d\n",
751                             __func__, port->domain_id.phys_id);
752                 return -EINVAL;
753         }
754
755         /*
756          * Set the QID inflight limit to 0 to prevent further scheduling of the
757          * queue.
758          */
759         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
760
761         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
762                 DLB2_HW_ERR(hw,
763                             "Internal error: No available unmapped slots\n");
764                 return -EFAULT;
765         }
766
767         if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
768                 DLB2_HW_ERR(hw,
769                             "[%s():%d] Internal error: port slot tracking failed\n",
770                             __func__, __LINE__);
771                 return -EFAULT;
772         }
773
774         port->qid_map[slot].qid = queue->id.phys_id;
775         port->qid_map[slot].priority = priority;
776
777         state = DLB2_QUEUE_MAP_IN_PROG;
778         ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
779         if (ret)
780                 return ret;
781
782         r0.val = DLB2_CSR_RD(hw,
783                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
784
785         if (r0.field.count) {
786                 /*
787                  * The queue is owed completions so it's not safe to map it
788                  * yet. Schedule a kernel thread to complete the mapping later,
789                  * once software has completed all the queue's inflight events.
790                  */
791                 if (!os_worker_active(hw))
792                         os_schedule_work(hw);
793
794                 return 1;
795         }
796
797         /*
798          * Disable the affected CQ, and the CQs already mapped to the QID,
799          * before reading the QID's inflight count a second time. There is an
800          * unlikely race in which the QID may schedule one more QE after we
801          * read an inflight count of 0, and disabling the CQs guarantees that
802          * the race will not occur after a re-read of the inflight count
803          * register.
804          */
805         if (port->enabled)
806                 dlb2_ldb_port_cq_disable(hw, port);
807
808         dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
809
810         r0.val = DLB2_CSR_RD(hw,
811                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
812
813         if (r0.field.count) {
814                 if (port->enabled)
815                         dlb2_ldb_port_cq_enable(hw, port);
816
817                 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
818
819                 /*
820                  * The queue is owed completions so it's not safe to map it
821                  * yet. Schedule a kernel thread to complete the mapping later,
822                  * once software has completed all the queue's inflight events.
823                  */
824                 if (!os_worker_active(hw))
825                         os_schedule_work(hw);
826
827                 return 1;
828         }
829
830         return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
831 }
832
833 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
834                                         struct dlb2_hw_domain *domain,
835                                         struct dlb2_ldb_port *port)
836 {
837         int i;
838
839         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
840                 union dlb2_lsp_qid_ldb_infl_cnt r0;
841                 struct dlb2_ldb_queue *queue;
842                 int qid;
843
844                 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
845                         continue;
846
847                 qid = port->qid_map[i].qid;
848
849                 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
850
851                 if (queue == NULL) {
852                         DLB2_HW_ERR(hw,
853                                     "[%s()] Internal error: unable to find queue %d\n",
854                                     __func__, qid);
855                         continue;
856                 }
857
858                 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
859
860                 if (r0.field.count)
861                         continue;
862
863                 /*
864                  * Disable the affected CQ, and the CQs already mapped to the
865                  * QID, before reading the QID's inflight count a second time.
866                  * There is an unlikely race in which the QID may schedule one
867                  * more QE after we read an inflight count of 0, and disabling
868                  * the CQs guarantees that the race will not occur after a
869                  * re-read of the inflight count register.
870                  */
871                 if (port->enabled)
872                         dlb2_ldb_port_cq_disable(hw, port);
873
874                 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
875
876                 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
877
878                 if (r0.field.count) {
879                         if (port->enabled)
880                                 dlb2_ldb_port_cq_enable(hw, port);
881
882                         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
883
884                         continue;
885                 }
886
887                 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
888         }
889 }
890
891 static unsigned int
892 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
893                                       struct dlb2_hw_domain *domain)
894 {
895         struct dlb2_list_entry *iter;
896         struct dlb2_ldb_port *port;
897         int i;
898         RTE_SET_USED(iter);
899
900         if (!domain->configured || domain->num_pending_additions == 0)
901                 return 0;
902
903         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
904                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
905                         dlb2_domain_finish_map_port(hw, domain, port);
906         }
907
908         return domain->num_pending_additions;
909 }
910
911 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
912                                    struct dlb2_ldb_port *port,
913                                    struct dlb2_ldb_queue *queue)
914 {
915         enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
916         union dlb2_lsp_cq2priov r0;
917         union dlb2_atm_qid2cqidix_00 r1;
918         union dlb2_lsp_qid2cqidix_00 r2;
919         union dlb2_lsp_qid2cqidix2_00 r3;
920         u32 queue_id;
921         u32 port_id;
922         int i;
923
924         /* Find the queue's slot */
925         mapped = DLB2_QUEUE_MAPPED;
926         in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
927         pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
928
929         if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
930             !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
931             !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
932                 DLB2_HW_ERR(hw,
933                             "[%s():%d] Internal error: QID %d isn't mapped\n",
934                             __func__, __LINE__, queue->id.phys_id);
935                 return -EFAULT;
936         }
937
938         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
939                 DLB2_HW_ERR(hw,
940                             "[%s():%d] Internal error: port slot tracking failed\n",
941                             __func__, __LINE__);
942                 return -EFAULT;
943         }
944
945         port_id = port->id.phys_id;
946         queue_id = queue->id.phys_id;
947
948         /* Read-modify-write the priority and valid bit register */
949         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
950
951         r0.field.v &= ~(1 << i);
952
953         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
954
955         r1.val = DLB2_CSR_RD(hw,
956                              DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
957
958         r2.val = DLB2_CSR_RD(hw,
959                              DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
960
961         r3.val = DLB2_CSR_RD(hw,
962                              DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
963
964         switch (port_id % 4) {
965         case 0:
966                 r1.field.cq_p0 &= ~(1 << i);
967                 r2.field.cq_p0 &= ~(1 << i);
968                 r3.field.cq_p0 &= ~(1 << i);
969                 break;
970
971         case 1:
972                 r1.field.cq_p1 &= ~(1 << i);
973                 r2.field.cq_p1 &= ~(1 << i);
974                 r3.field.cq_p1 &= ~(1 << i);
975                 break;
976
977         case 2:
978                 r1.field.cq_p2 &= ~(1 << i);
979                 r2.field.cq_p2 &= ~(1 << i);
980                 r3.field.cq_p2 &= ~(1 << i);
981                 break;
982
983         case 3:
984                 r1.field.cq_p3 &= ~(1 << i);
985                 r2.field.cq_p3 &= ~(1 << i);
986                 r3.field.cq_p3 &= ~(1 << i);
987                 break;
988         }
989
990         DLB2_CSR_WR(hw,
991                     DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
992                     r1.val);
993
994         DLB2_CSR_WR(hw,
995                     DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
996                     r2.val);
997
998         DLB2_CSR_WR(hw,
999                     DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
1000                     r3.val);
1001
1002         dlb2_flush_csr(hw);
1003
1004         unmapped = DLB2_QUEUE_UNMAPPED;
1005
1006         return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
1007 }
1008
1009 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
1010                                  struct dlb2_hw_domain *domain,
1011                                  struct dlb2_ldb_port *port,
1012                                  struct dlb2_ldb_queue *queue,
1013                                  u8 prio)
1014 {
1015         if (domain->started)
1016                 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
1017         else
1018                 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1019 }
1020
1021 static void
1022 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
1023                                    struct dlb2_hw_domain *domain,
1024                                    struct dlb2_ldb_port *port,
1025                                    int slot)
1026 {
1027         enum dlb2_qid_map_state state;
1028         struct dlb2_ldb_queue *queue;
1029
1030         queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
1031
1032         state = port->qid_map[slot].state;
1033
1034         /* Update the QID2CQIDX and CQ2QID vectors */
1035         dlb2_ldb_port_unmap_qid(hw, port, queue);
1036
1037         /*
1038          * Ensure the QID will not be serviced by this {CQ, slot} by clearing
1039          * the has_work bits
1040          */
1041         dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
1042
1043         /* Reset the {CQ, slot} to its default state */
1044         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
1045
1046         /* Re-enable the CQ if it wasn't manually disabled by the user */
1047         if (port->enabled)
1048                 dlb2_ldb_port_cq_enable(hw, port);
1049
1050         /*
1051          * If there is a mapping that is pending this slot's removal, perform
1052          * the mapping now.
1053          */
1054         if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
1055                 struct dlb2_ldb_port_qid_map *map;
1056                 struct dlb2_ldb_queue *map_queue;
1057                 u8 prio;
1058
1059                 map = &port->qid_map[slot];
1060
1061                 map->qid = map->pending_qid;
1062                 map->priority = map->pending_priority;
1063
1064                 map_queue = &hw->rsrcs.ldb_queues[map->qid];
1065                 prio = map->priority;
1066
1067                 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
1068         }
1069 }
1070
1071 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
1072                                           struct dlb2_hw_domain *domain,
1073                                           struct dlb2_ldb_port *port)
1074 {
1075         union dlb2_lsp_cq_ldb_infl_cnt r0;
1076         int i;
1077
1078         if (port->num_pending_removals == 0)
1079                 return false;
1080
1081         /*
1082          * The unmap requires all the CQ's outstanding inflights to be
1083          * completed.
1084          */
1085         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
1086         if (r0.field.count > 0)
1087                 return false;
1088
1089         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1090                 struct dlb2_ldb_port_qid_map *map;
1091
1092                 map = &port->qid_map[i];
1093
1094                 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
1095                     map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
1096                         continue;
1097
1098                 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
1099         }
1100
1101         return true;
1102 }
1103
1104 static unsigned int
1105 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
1106                                         struct dlb2_hw_domain *domain)
1107 {
1108         struct dlb2_list_entry *iter;
1109         struct dlb2_ldb_port *port;
1110         int i;
1111         RTE_SET_USED(iter);
1112
1113         if (!domain->configured || domain->num_pending_removals == 0)
1114                 return 0;
1115
1116         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1117                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
1118                         dlb2_domain_finish_unmap_port(hw, domain, port);
1119         }
1120
1121         return domain->num_pending_removals;
1122 }
1123
1124 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
1125 {
1126         int i, num = 0;
1127
1128         /* Finish queue unmap jobs for any domain that needs it */
1129         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
1130                 struct dlb2_hw_domain *domain = &hw->domains[i];
1131
1132                 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
1133         }
1134
1135         return num;
1136 }
1137
1138 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
1139 {
1140         int i, num = 0;
1141
1142         /* Finish queue map jobs for any domain that needs it */
1143         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
1144                 struct dlb2_hw_domain *domain = &hw->domains[i];
1145
1146                 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
1147         }
1148
1149         return num;
1150 }
1151
1152 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id)
1153 {
1154         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1155                 return -EINVAL;
1156
1157         return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
1158 }
1159
1160 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw,
1161                                              unsigned int group_id)
1162 {
1163         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1164                 return -EINVAL;
1165
1166         return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
1167 }
1168
1169 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
1170                                                 unsigned int group_id,
1171                                                 unsigned long val)
1172 {
1173         DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
1174         DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
1175         DLB2_HW_DBG(hw, "\tValue:    %lu\n", val);
1176 }
1177
1178 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
1179                                     unsigned int group_id,
1180                                     unsigned long val)
1181 {
1182         u32 valid_allocations[] = {64, 128, 256, 512, 1024};
1183         union dlb2_ro_pipe_grp_sn_mode r0 = { {0} };
1184         struct dlb2_sn_group *group;
1185         int mode;
1186
1187         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1188                 return -EINVAL;
1189
1190         group = &hw->rsrcs.sn_groups[group_id];
1191
1192         /*
1193          * Once the first load-balanced queue using an SN group is configured,
1194          * the group cannot be changed.
1195          */
1196         if (group->slot_use_bitmap != 0)
1197                 return -EPERM;
1198
1199         for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
1200                 if (val == valid_allocations[mode])
1201                         break;
1202
1203         if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
1204                 return -EINVAL;
1205
1206         group->mode = mode;
1207         group->sequence_numbers_per_queue = val;
1208
1209         r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
1210         r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
1211
1212         DLB2_CSR_WR(hw, DLB2_RO_PIPE_GRP_SN_MODE, r0.val);
1213
1214         dlb2_log_set_group_sequence_numbers(hw, group_id, val);
1215
1216         return 0;
1217 }
1218
1219 static void
1220 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
1221                               u32 domain_id,
1222                               uintptr_t cq_dma_base,
1223                               struct dlb2_create_dir_port_args *args,
1224                               bool vdev_req,
1225                               unsigned int vdev_id)
1226 {
1227         DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
1228         if (vdev_req)
1229                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
1230         DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
1231                     domain_id);
1232         DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
1233                     args->cq_depth);
1234         DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
1235                     cq_dma_base);
1236 }
1237
1238 static struct dlb2_dir_pq_pair *
1239 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
1240                             u32 id,
1241                             bool vdev_req,
1242                             struct dlb2_hw_domain *domain)
1243 {
1244         struct dlb2_list_entry *iter;
1245         struct dlb2_dir_pq_pair *port;
1246         RTE_SET_USED(iter);
1247
1248         if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
1249                 return NULL;
1250
1251         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
1252                 if ((!vdev_req && port->id.phys_id == id) ||
1253                     (vdev_req && port->id.virt_id == id))
1254                         return port;
1255
1256         return NULL;
1257 }
1258
1259 static int
1260 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
1261                                  u32 domain_id,
1262                                  uintptr_t cq_dma_base,
1263                                  struct dlb2_create_dir_port_args *args,
1264                                  struct dlb2_cmd_response *resp,
1265                                  bool vdev_req,
1266                                  unsigned int vdev_id)
1267 {
1268         struct dlb2_hw_domain *domain;
1269
1270         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1271
1272         if (domain == NULL) {
1273                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1274                 return -EINVAL;
1275         }
1276
1277         if (!domain->configured) {
1278                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
1279                 return -EINVAL;
1280         }
1281
1282         if (domain->started) {
1283                 resp->status = DLB2_ST_DOMAIN_STARTED;
1284                 return -EINVAL;
1285         }
1286
1287         /*
1288          * If the user claims the queue is already configured, validate
1289          * the queue ID, its domain, and whether the queue is configured.
1290          */
1291         if (args->queue_id != -1) {
1292                 struct dlb2_dir_pq_pair *queue;
1293
1294                 queue = dlb2_get_domain_used_dir_pq(hw,
1295                                                     args->queue_id,
1296                                                     vdev_req,
1297                                                     domain);
1298
1299                 if (queue == NULL || queue->domain_id.phys_id !=
1300                                 domain->id.phys_id ||
1301                                 !queue->queue_configured) {
1302                         resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
1303                         return -EINVAL;
1304                 }
1305         }
1306
1307         /*
1308          * If the port's queue is not configured, validate that a free
1309          * port-queue pair is available.
1310          */
1311         if (args->queue_id == -1 &&
1312             dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
1313                 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
1314                 return -EINVAL;
1315         }
1316
1317         /* Check cache-line alignment */
1318         if ((cq_dma_base & 0x3F) != 0) {
1319                 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
1320                 return -EINVAL;
1321         }
1322
1323         if (args->cq_depth != 1 &&
1324             args->cq_depth != 2 &&
1325             args->cq_depth != 4 &&
1326             args->cq_depth != 8 &&
1327             args->cq_depth != 16 &&
1328             args->cq_depth != 32 &&
1329             args->cq_depth != 64 &&
1330             args->cq_depth != 128 &&
1331             args->cq_depth != 256 &&
1332             args->cq_depth != 512 &&
1333             args->cq_depth != 1024) {
1334                 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
1335                 return -EINVAL;
1336         }
1337
1338         return 0;
1339 }
1340
1341 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
1342                                        struct dlb2_hw_domain *domain,
1343                                        struct dlb2_dir_pq_pair *port,
1344                                        bool vdev_req,
1345                                        unsigned int vdev_id)
1346 {
1347         union dlb2_sys_dir_pp2vas r0 = { {0} };
1348         union dlb2_sys_dir_pp_v r4 = { {0} };
1349
1350         r0.field.vas = domain->id.phys_id;
1351
1352         DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), r0.val);
1353
1354         if (vdev_req) {
1355                 union dlb2_sys_vf_dir_vpp2pp r1 = { {0} };
1356                 union dlb2_sys_dir_pp2vdev r2 = { {0} };
1357                 union dlb2_sys_vf_dir_vpp_v r3 = { {0} };
1358                 unsigned int offs;
1359                 u32 virt_id;
1360
1361                 /*
1362                  * DLB uses producer port address bits 17:12 to determine the
1363                  * producer port ID. In Scalable IOV mode, PP accesses come
1364                  * through the PF MMIO window for the physical producer port,
1365                  * so for translation purposes the virtual and physical port
1366                  * IDs are equal.
1367                  */
1368                 if (hw->virt_mode == DLB2_VIRT_SRIOV)
1369                         virt_id = port->id.virt_id;
1370                 else
1371                         virt_id = port->id.phys_id;
1372
1373                 r1.field.pp = port->id.phys_id;
1374
1375                 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
1376
1377                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), r1.val);
1378
1379                 r2.field.vdev = vdev_id;
1380
1381                 DLB2_CSR_WR(hw,
1382                             DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
1383                             r2.val);
1384
1385                 r3.field.vpp_v = 1;
1386
1387                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r3.val);
1388         }
1389
1390         r4.field.pp_v = 1;
1391
1392         DLB2_CSR_WR(hw,
1393                     DLB2_SYS_DIR_PP_V(port->id.phys_id),
1394                     r4.val);
1395 }
1396
1397 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
1398                                       struct dlb2_hw_domain *domain,
1399                                       struct dlb2_dir_pq_pair *port,
1400                                       uintptr_t cq_dma_base,
1401                                       struct dlb2_create_dir_port_args *args,
1402                                       bool vdev_req,
1403                                       unsigned int vdev_id)
1404 {
1405         union dlb2_sys_dir_cq_addr_l r0 = { {0} };
1406         union dlb2_sys_dir_cq_addr_u r1 = { {0} };
1407         union dlb2_sys_dir_cq2vf_pf_ro r2 = { {0} };
1408         union dlb2_chp_dir_cq_tkn_depth_sel r3 = { {0} };
1409         union dlb2_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
1410         union dlb2_sys_dir_cq_fmt r9 = { {0} };
1411         union dlb2_sys_dir_cq_at r10 = { {0} };
1412         union dlb2_sys_dir_cq_pasid r11 = { {0} };
1413         union dlb2_chp_dir_cq2vas r12 = { {0} };
1414
1415         /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
1416         r0.field.addr_l = cq_dma_base >> 6;
1417
1418         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), r0.val);
1419
1420         r1.field.addr_u = cq_dma_base >> 32;
1421
1422         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), r1.val);
1423
1424         /*
1425          * 'ro' == relaxed ordering. This setting allows DLB2 to write
1426          * cache lines out-of-order (but QEs within a cache line are always
1427          * updated in-order).
1428          */
1429         r2.field.vf = vdev_id;
1430         r2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);
1431         r2.field.ro = 1;
1432
1433         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), r2.val);
1434
1435         if (args->cq_depth <= 8) {
1436                 r3.field.token_depth_select = 1;
1437         } else if (args->cq_depth == 16) {
1438                 r3.field.token_depth_select = 2;
1439         } else if (args->cq_depth == 32) {
1440                 r3.field.token_depth_select = 3;
1441         } else if (args->cq_depth == 64) {
1442                 r3.field.token_depth_select = 4;
1443         } else if (args->cq_depth == 128) {
1444                 r3.field.token_depth_select = 5;
1445         } else if (args->cq_depth == 256) {
1446                 r3.field.token_depth_select = 6;
1447         } else if (args->cq_depth == 512) {
1448                 r3.field.token_depth_select = 7;
1449         } else if (args->cq_depth == 1024) {
1450                 r3.field.token_depth_select = 8;
1451         } else {
1452                 DLB2_HW_ERR(hw,
1453                             "[%s():%d] Internal error: invalid CQ depth\n",
1454                             __func__, __LINE__);
1455                 return -EFAULT;
1456         }
1457
1458         DLB2_CSR_WR(hw,
1459                     DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
1460                     r3.val);
1461
1462         /*
1463          * To support CQs with depth less than 8, program the token count
1464          * register with a non-zero initial value. Operations such as domain
1465          * reset must take this initial value into account when quiescing the
1466          * CQ.
1467          */
1468         port->init_tkn_cnt = 0;
1469
1470         if (args->cq_depth < 8) {
1471                 union dlb2_lsp_cq_dir_tkn_cnt r13 = { {0} };
1472
1473                 port->init_tkn_cnt = 8 - args->cq_depth;
1474
1475                 r13.field.count = port->init_tkn_cnt;
1476
1477                 DLB2_CSR_WR(hw,
1478                             DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
1479                             r13.val);
1480         } else {
1481                 DLB2_CSR_WR(hw,
1482                             DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
1483                             DLB2_LSP_CQ_DIR_TKN_CNT_RST);
1484         }
1485
1486         r4.field.token_depth_select = r3.field.token_depth_select;
1487         r4.field.disable_wb_opt = 0;
1488         r4.field.ignore_depth = 0;
1489
1490         DLB2_CSR_WR(hw,
1491                     DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
1492                     r4.val);
1493
1494         /* Reset the CQ write pointer */
1495         DLB2_CSR_WR(hw,
1496                     DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
1497                     DLB2_CHP_DIR_CQ_WPTR_RST);
1498
1499         /* Virtualize the PPID */
1500         r9.field.keep_pf_ppid = 0;
1501
1502         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), r9.val);
1503
1504         /*
1505          * Address translation (AT) settings: 0: untranslated, 2: translated
1506          * (see ATS spec regarding Address Type field for more details)
1507          */
1508         r10.field.cq_at = 0;
1509
1510         DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), r10.val);
1511
1512         if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
1513                 r11.field.pasid = hw->pasid[vdev_id];
1514                 r11.field.fmt2 = 1;
1515         }
1516
1517         DLB2_CSR_WR(hw,
1518                     DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
1519                     r11.val);
1520
1521         r12.field.cq2vas = domain->id.phys_id;
1522
1523         DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(port->id.phys_id), r12.val);
1524
1525         return 0;
1526 }
1527
1528 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
1529                                    struct dlb2_hw_domain *domain,
1530                                    struct dlb2_dir_pq_pair *port,
1531                                    uintptr_t cq_dma_base,
1532                                    struct dlb2_create_dir_port_args *args,
1533                                    bool vdev_req,
1534                                    unsigned int vdev_id)
1535 {
1536         int ret;
1537
1538         ret = dlb2_dir_port_configure_cq(hw,
1539                                          domain,
1540                                          port,
1541                                          cq_dma_base,
1542                                          args,
1543                                          vdev_req,
1544                                          vdev_id);
1545
1546         if (ret < 0)
1547                 return ret;
1548
1549         dlb2_dir_port_configure_pp(hw,
1550                                    domain,
1551                                    port,
1552                                    vdev_req,
1553                                    vdev_id);
1554
1555         dlb2_dir_port_cq_enable(hw, port);
1556
1557         port->enabled = true;
1558
1559         port->port_configured = true;
1560
1561         return 0;
1562 }
1563
1564 /**
1565  * dlb2_hw_create_dir_port() - Allocate and initialize a DLB directed port
1566  *      and queue. The port/queue pair have the same ID and name.
1567  * @hw: Contains the current state of the DLB2 hardware.
1568  * @domain_id: Domain ID
1569  * @args: User-provided arguments.
1570  * @cq_dma_base: Base DMA address for consumer queue memory
1571  * @resp: Response to user.
1572  * @vdev_req: Request came from a virtual device.
1573  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
1574  *
1575  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
1576  * satisfy a request, resp->status will be set accordingly.
1577  */
1578 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
1579                             u32 domain_id,
1580                             struct dlb2_create_dir_port_args *args,
1581                             uintptr_t cq_dma_base,
1582                             struct dlb2_cmd_response *resp,
1583                             bool vdev_req,
1584                             unsigned int vdev_id)
1585 {
1586         struct dlb2_dir_pq_pair *port;
1587         struct dlb2_hw_domain *domain;
1588         int ret;
1589
1590         dlb2_log_create_dir_port_args(hw,
1591                                       domain_id,
1592                                       cq_dma_base,
1593                                       args,
1594                                       vdev_req,
1595                                       vdev_id);
1596
1597         /*
1598          * Verify that hardware resources are available before attempting to
1599          * satisfy the request. This simplifies the error unwinding code.
1600          */
1601         ret = dlb2_verify_create_dir_port_args(hw,
1602                                                domain_id,
1603                                                cq_dma_base,
1604                                                args,
1605                                                resp,
1606                                                vdev_req,
1607                                                vdev_id);
1608         if (ret)
1609                 return ret;
1610
1611         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1612
1613         if (args->queue_id != -1)
1614                 port = dlb2_get_domain_used_dir_pq(hw,
1615                                                    args->queue_id,
1616                                                    vdev_req,
1617                                                    domain);
1618         else
1619                 port = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
1620                                           typeof(*port));
1621         if (port == NULL) {
1622                 DLB2_HW_ERR(hw,
1623                             "[%s():%d] Internal error: no available dir ports\n",
1624                             __func__, __LINE__);
1625                 return -EFAULT;
1626         }
1627
1628         ret = dlb2_configure_dir_port(hw,
1629                                       domain,
1630                                       port,
1631                                       cq_dma_base,
1632                                       args,
1633                                       vdev_req,
1634                                       vdev_id);
1635         if (ret < 0)
1636                 return ret;
1637
1638         /*
1639          * Configuration succeeded, so move the resource from the 'avail' to
1640          * the 'used' list (if it's not already there).
1641          */
1642         if (args->queue_id == -1) {
1643                 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
1644
1645                 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
1646         }
1647
1648         resp->status = 0;
1649         resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
1650
1651         return 0;
1652 }
1653
1654 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
1655                                      struct dlb2_hw_domain *domain,
1656                                      struct dlb2_dir_pq_pair *queue,
1657                                      struct dlb2_create_dir_queue_args *args,
1658                                      bool vdev_req,
1659                                      unsigned int vdev_id)
1660 {
1661         union dlb2_sys_dir_vasqid_v r0 = { {0} };
1662         union dlb2_sys_dir_qid_its r1 = { {0} };
1663         union dlb2_lsp_qid_dir_depth_thrsh r2 = { {0} };
1664         union dlb2_sys_dir_qid_v r5 = { {0} };
1665
1666         unsigned int offs;
1667
1668         /* QID write permissions are turned on when the domain is started */
1669         r0.field.vasqid_v = 0;
1670
1671         offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
1672                 queue->id.phys_id;
1673
1674         DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
1675
1676         /* Don't timestamp QEs that pass through this queue */
1677         r1.field.qid_its = 0;
1678
1679         DLB2_CSR_WR(hw,
1680                     DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
1681                     r1.val);
1682
1683         r2.field.thresh = args->depth_threshold;
1684
1685         DLB2_CSR_WR(hw,
1686                     DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
1687                     r2.val);
1688
1689         if (vdev_req) {
1690                 union dlb2_sys_vf_dir_vqid_v r3 = { {0} };
1691                 union dlb2_sys_vf_dir_vqid2qid r4 = { {0} };
1692
1693                 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver)
1694                         + queue->id.virt_id;
1695
1696                 r3.field.vqid_v = 1;
1697
1698                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), r3.val);
1699
1700                 r4.field.qid = queue->id.phys_id;
1701
1702                 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), r4.val);
1703         }
1704
1705         r5.field.qid_v = 1;
1706
1707         DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), r5.val);
1708
1709         queue->queue_configured = true;
1710 }
1711
1712 static void
1713 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
1714                                u32 domain_id,
1715                                struct dlb2_create_dir_queue_args *args,
1716                                bool vdev_req,
1717                                unsigned int vdev_id)
1718 {
1719         DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
1720         if (vdev_req)
1721                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
1722         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
1723         DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
1724 }
1725
1726 static int
1727 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
1728                                   u32 domain_id,
1729                                   struct dlb2_create_dir_queue_args *args,
1730                                   struct dlb2_cmd_response *resp,
1731                                   bool vdev_req,
1732                                   unsigned int vdev_id)
1733 {
1734         struct dlb2_hw_domain *domain;
1735
1736         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1737
1738         if (domain == NULL) {
1739                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1740                 return -EINVAL;
1741         }
1742
1743         if (!domain->configured) {
1744                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
1745                 return -EINVAL;
1746         }
1747
1748         if (domain->started) {
1749                 resp->status = DLB2_ST_DOMAIN_STARTED;
1750                 return -EINVAL;
1751         }
1752
1753         /*
1754          * If the user claims the port is already configured, validate the port
1755          * ID, its domain, and whether the port is configured.
1756          */
1757         if (args->port_id != -1) {
1758                 struct dlb2_dir_pq_pair *port;
1759
1760                 port = dlb2_get_domain_used_dir_pq(hw,
1761                                                    args->port_id,
1762                                                    vdev_req,
1763                                                    domain);
1764
1765                 if (port == NULL || port->domain_id.phys_id !=
1766                                 domain->id.phys_id || !port->port_configured) {
1767                         resp->status = DLB2_ST_INVALID_PORT_ID;
1768                         return -EINVAL;
1769                 }
1770         }
1771
1772         /*
1773          * If the queue's port is not configured, validate that a free
1774          * port-queue pair is available.
1775          */
1776         if (args->port_id == -1 &&
1777             dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
1778                 resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
1779                 return -EINVAL;
1780         }
1781
1782         return 0;
1783 }
1784
1785 /**
1786  * dlb2_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
1787  * @hw: Contains the current state of the DLB2 hardware.
1788  * @domain_id: Domain ID
1789  * @args: User-provided arguments.
1790  * @resp: Response to user.
1791  * @vdev_req: Request came from a virtual device.
1792  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
1793  *
1794  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
1795  * satisfy a request, resp->status will be set accordingly.
1796  */
1797 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
1798                              u32 domain_id,
1799                              struct dlb2_create_dir_queue_args *args,
1800                              struct dlb2_cmd_response *resp,
1801                              bool vdev_req,
1802                              unsigned int vdev_id)
1803 {
1804         struct dlb2_dir_pq_pair *queue;
1805         struct dlb2_hw_domain *domain;
1806         int ret;
1807
1808         dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
1809
1810         /*
1811          * Verify that hardware resources are available before attempting to
1812          * satisfy the request. This simplifies the error unwinding code.
1813          */
1814         ret = dlb2_verify_create_dir_queue_args(hw,
1815                                                 domain_id,
1816                                                 args,
1817                                                 resp,
1818                                                 vdev_req,
1819                                                 vdev_id);
1820         if (ret)
1821                 return ret;
1822
1823         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1824         if (domain == NULL) {
1825                 DLB2_HW_ERR(hw,
1826                             "[%s():%d] Internal error: domain not found\n",
1827                             __func__, __LINE__);
1828                 return -EFAULT;
1829         }
1830
1831         if (args->port_id != -1)
1832                 queue = dlb2_get_domain_used_dir_pq(hw,
1833                                                     args->port_id,
1834                                                     vdev_req,
1835                                                     domain);
1836         else
1837                 queue = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
1838                                            typeof(*queue));
1839         if (queue == NULL) {
1840                 DLB2_HW_ERR(hw,
1841                             "[%s():%d] Internal error: no available dir queues\n",
1842                             __func__, __LINE__);
1843                 return -EFAULT;
1844         }
1845
1846         dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
1847
1848         /*
1849          * Configuration succeeded, so move the resource from the 'avail' to
1850          * the 'used' list (if it's not already there).
1851          */
1852         if (args->port_id == -1) {
1853                 dlb2_list_del(&domain->avail_dir_pq_pairs,
1854                               &queue->domain_list);
1855
1856                 dlb2_list_add(&domain->used_dir_pq_pairs,
1857                               &queue->domain_list);
1858         }
1859
1860         resp->status = 0;
1861
1862         resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
1863
1864         return 0;
1865 }
1866
1867 static bool
1868 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
1869                                            struct dlb2_ldb_queue *queue,
1870                                            int *slot)
1871 {
1872         int i;
1873
1874         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1875                 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
1876
1877                 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
1878                     map->pending_qid == queue->id.phys_id)
1879                         break;
1880         }
1881
1882         *slot = i;
1883
1884         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1885 }
1886
1887 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
1888                                               struct dlb2_ldb_port *port,
1889                                               int slot,
1890                                               struct dlb2_map_qid_args *args)
1891 {
1892         union dlb2_lsp_cq2priov r0;
1893
1894         /* Read-modify-write the priority and valid bit register */
1895         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id));
1896
1897         r0.field.v |= 1 << slot;
1898         r0.field.prio |= (args->priority & 0x7) << slot * 3;
1899
1900         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r0.val);
1901
1902         dlb2_flush_csr(hw);
1903
1904         port->qid_map[slot].priority = args->priority;
1905 }
1906
1907 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
1908                                               struct dlb2_ldb_queue *queue,
1909                                               struct dlb2_cmd_response *resp)
1910 {
1911         enum dlb2_qid_map_state state;
1912         int i;
1913
1914         /* Unused slot available? */
1915         if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
1916                 return 0;
1917
1918         /*
1919          * If the queue is already mapped (from the application's perspective),
1920          * this is simply a priority update.
1921          */
1922         state = DLB2_QUEUE_MAPPED;
1923         if (dlb2_port_find_slot_queue(port, state, queue, &i))
1924                 return 0;
1925
1926         state = DLB2_QUEUE_MAP_IN_PROG;
1927         if (dlb2_port_find_slot_queue(port, state, queue, &i))
1928                 return 0;
1929
1930         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
1931                 return 0;
1932
1933         /*
1934          * If the slot contains an unmap in progress, it's considered
1935          * available.
1936          */
1937         state = DLB2_QUEUE_UNMAP_IN_PROG;
1938         if (dlb2_port_find_slot(port, state, &i))
1939                 return 0;
1940
1941         state = DLB2_QUEUE_UNMAPPED;
1942         if (dlb2_port_find_slot(port, state, &i))
1943                 return 0;
1944
1945         resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
1946         return -EINVAL;
1947 }
1948
1949 static struct dlb2_ldb_queue *
1950 dlb2_get_domain_ldb_queue(u32 id,
1951                           bool vdev_req,
1952                           struct dlb2_hw_domain *domain)
1953 {
1954         struct dlb2_list_entry *iter;
1955         struct dlb2_ldb_queue *queue;
1956         RTE_SET_USED(iter);
1957
1958         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1959                 return NULL;
1960
1961         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
1962                 if ((!vdev_req && queue->id.phys_id == id) ||
1963                     (vdev_req && queue->id.virt_id == id))
1964                         return queue;
1965
1966         return NULL;
1967 }
1968
1969 static struct dlb2_ldb_port *
1970 dlb2_get_domain_used_ldb_port(u32 id,
1971                               bool vdev_req,
1972                               struct dlb2_hw_domain *domain)
1973 {
1974         struct dlb2_list_entry *iter;
1975         struct dlb2_ldb_port *port;
1976         int i;
1977         RTE_SET_USED(iter);
1978
1979         if (id >= DLB2_MAX_NUM_LDB_PORTS)
1980                 return NULL;
1981
1982         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1983                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
1984                         if ((!vdev_req && port->id.phys_id == id) ||
1985                             (vdev_req && port->id.virt_id == id))
1986                                 return port;
1987
1988                 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter)
1989                         if ((!vdev_req && port->id.phys_id == id) ||
1990                             (vdev_req && port->id.virt_id == id))
1991                                 return port;
1992         }
1993
1994         return NULL;
1995 }
1996
1997 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
1998                                     u32 domain_id,
1999                                     struct dlb2_map_qid_args *args,
2000                                     struct dlb2_cmd_response *resp,
2001                                     bool vdev_req,
2002                                     unsigned int vdev_id)
2003 {
2004         struct dlb2_hw_domain *domain;
2005         struct dlb2_ldb_port *port;
2006         struct dlb2_ldb_queue *queue;
2007         int id;
2008
2009         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2010
2011         if (domain == NULL) {
2012                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2013                 return -EINVAL;
2014         }
2015
2016         if (!domain->configured) {
2017                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
2018                 return -EINVAL;
2019         }
2020
2021         id = args->port_id;
2022
2023         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
2024
2025         if (port == NULL || !port->configured) {
2026                 resp->status = DLB2_ST_INVALID_PORT_ID;
2027                 return -EINVAL;
2028         }
2029
2030         if (args->priority >= DLB2_QID_PRIORITIES) {
2031                 resp->status = DLB2_ST_INVALID_PRIORITY;
2032                 return -EINVAL;
2033         }
2034
2035         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
2036
2037         if (queue == NULL || !queue->configured) {
2038                 resp->status = DLB2_ST_INVALID_QID;
2039                 return -EINVAL;
2040         }
2041
2042         if (queue->domain_id.phys_id != domain->id.phys_id) {
2043                 resp->status = DLB2_ST_INVALID_QID;
2044                 return -EINVAL;
2045         }
2046
2047         if (port->domain_id.phys_id != domain->id.phys_id) {
2048                 resp->status = DLB2_ST_INVALID_PORT_ID;
2049                 return -EINVAL;
2050         }
2051
2052         return 0;
2053 }
2054
2055 static void dlb2_log_map_qid(struct dlb2_hw *hw,
2056                              u32 domain_id,
2057                              struct dlb2_map_qid_args *args,
2058                              bool vdev_req,
2059                              unsigned int vdev_id)
2060 {
2061         DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
2062         if (vdev_req)
2063                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2064         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
2065                     domain_id);
2066         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
2067                     args->port_id);
2068         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
2069                     args->qid);
2070         DLB2_HW_DBG(hw, "\tPriority:  %d\n",
2071                     args->priority);
2072 }
2073
2074 int dlb2_hw_map_qid(struct dlb2_hw *hw,
2075                     u32 domain_id,
2076                     struct dlb2_map_qid_args *args,
2077                     struct dlb2_cmd_response *resp,
2078                     bool vdev_req,
2079                     unsigned int vdev_id)
2080 {
2081         struct dlb2_hw_domain *domain;
2082         struct dlb2_ldb_queue *queue;
2083         enum dlb2_qid_map_state st;
2084         struct dlb2_ldb_port *port;
2085         int ret, i, id;
2086         u8 prio;
2087
2088         dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
2089
2090         /*
2091          * Verify that hardware resources are available before attempting to
2092          * satisfy the request. This simplifies the error unwinding code.
2093          */
2094         ret = dlb2_verify_map_qid_args(hw,
2095                                        domain_id,
2096                                        args,
2097                                        resp,
2098                                        vdev_req,
2099                                        vdev_id);
2100         if (ret)
2101                 return ret;
2102
2103         prio = args->priority;
2104
2105         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2106         if (domain == NULL) {
2107                 DLB2_HW_ERR(hw,
2108                             "[%s():%d] Internal error: domain not found\n",
2109                             __func__, __LINE__);
2110                 return -EFAULT;
2111         }
2112
2113         id = args->port_id;
2114
2115         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
2116         if (port == NULL) {
2117                 DLB2_HW_ERR(hw,
2118                             "[%s():%d] Internal error: port not found\n",
2119                             __func__, __LINE__);
2120                 return -EFAULT;
2121         }
2122
2123         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
2124         if (queue == NULL) {
2125                 DLB2_HW_ERR(hw,
2126                             "[%s():%d] Internal error: queue not found\n",
2127                             __func__, __LINE__);
2128                 return -EFAULT;
2129         }
2130
2131         /*
2132          * If there are any outstanding detach operations for this port,
2133          * attempt to complete them. This may be necessary to free up a QID
2134          * slot for this requested mapping.
2135          */
2136         if (port->num_pending_removals)
2137                 dlb2_domain_finish_unmap_port(hw, domain, port);
2138
2139         ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
2140         if (ret)
2141                 return ret;
2142
2143         /* Hardware requires disabling the CQ before mapping QIDs. */
2144         if (port->enabled)
2145                 dlb2_ldb_port_cq_disable(hw, port);
2146
2147         /*
2148          * If this is only a priority change, don't perform the full QID->CQ
2149          * mapping procedure
2150          */
2151         st = DLB2_QUEUE_MAPPED;
2152         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
2153                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2154                         DLB2_HW_ERR(hw,
2155                                     "[%s():%d] Internal error: port slot tracking failed\n",
2156                                     __func__, __LINE__);
2157                         return -EFAULT;
2158                 }
2159
2160                 if (prio != port->qid_map[i].priority) {
2161                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
2162                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
2163                 }
2164
2165                 st = DLB2_QUEUE_MAPPED;
2166                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
2167                 if (ret)
2168                         return ret;
2169
2170                 goto map_qid_done;
2171         }
2172
2173         st = DLB2_QUEUE_UNMAP_IN_PROG;
2174         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
2175                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2176                         DLB2_HW_ERR(hw,
2177                                     "[%s():%d] Internal error: port slot tracking failed\n",
2178                                     __func__, __LINE__);
2179                         return -EFAULT;
2180                 }
2181
2182                 if (prio != port->qid_map[i].priority) {
2183                         dlb2_ldb_port_change_qid_priority(hw, port, i, args);
2184                         DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
2185                 }
2186
2187                 st = DLB2_QUEUE_MAPPED;
2188                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
2189                 if (ret)
2190                         return ret;
2191
2192                 goto map_qid_done;
2193         }
2194
2195         /*
2196          * If this is a priority change on an in-progress mapping, don't
2197          * perform the full QID->CQ mapping procedure.
2198          */
2199         st = DLB2_QUEUE_MAP_IN_PROG;
2200         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
2201                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2202                         DLB2_HW_ERR(hw,
2203                                     "[%s():%d] Internal error: port slot tracking failed\n",
2204                                     __func__, __LINE__);
2205                         return -EFAULT;
2206                 }
2207
2208                 port->qid_map[i].priority = prio;
2209
2210                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
2211
2212                 goto map_qid_done;
2213         }
2214
2215         /*
2216          * If this is a priority change on a pending mapping, update the
2217          * pending priority
2218          */
2219         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
2220                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2221                         DLB2_HW_ERR(hw,
2222                                     "[%s():%d] Internal error: port slot tracking failed\n",
2223                                     __func__, __LINE__);
2224                         return -EFAULT;
2225                 }
2226
2227                 port->qid_map[i].pending_priority = prio;
2228
2229                 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
2230
2231                 goto map_qid_done;
2232         }
2233
2234         /*
2235          * If all the CQ's slots are in use, then there's an unmap in progress
2236          * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
2237          * mapping to pending_map and return. When the removal is completed for
2238          * the slot's current occupant, this mapping will be performed.
2239          */
2240         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
2241                 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
2242                         enum dlb2_qid_map_state st;
2243
2244                         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2245                                 DLB2_HW_ERR(hw,
2246                                             "[%s():%d] Internal error: port slot tracking failed\n",
2247                                             __func__, __LINE__);
2248                                 return -EFAULT;
2249                         }
2250
2251                         port->qid_map[i].pending_qid = queue->id.phys_id;
2252                         port->qid_map[i].pending_priority = prio;
2253
2254                         st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2255
2256                         ret = dlb2_port_slot_state_transition(hw, port, queue,
2257                                                               i, st);
2258                         if (ret)
2259                                 return ret;
2260
2261                         DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
2262
2263                         goto map_qid_done;
2264                 }
2265         }
2266
2267         /*
2268          * If the domain has started, a special "dynamic" CQ->queue mapping
2269          * procedure is required in order to safely update the CQ<->QID tables.
2270          * The "static" procedure cannot be used when traffic is flowing,
2271          * because the CQ<->QID tables cannot be updated atomically and the
2272          * scheduler won't see the new mapping unless the queue's if_status
2273          * changes, which isn't guaranteed.
2274          */
2275         ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
2276
2277         /* If ret is less than zero, it's due to an internal error */
2278         if (ret < 0)
2279                 return ret;
2280
2281 map_qid_done:
2282         if (port->enabled)
2283                 dlb2_ldb_port_cq_enable(hw, port);
2284
2285         resp->status = 0;
2286
2287         return 0;
2288 }
2289
2290 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
2291                                u32 domain_id,
2292                                struct dlb2_unmap_qid_args *args,
2293                                bool vdev_req,
2294                                unsigned int vdev_id)
2295 {
2296         DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
2297         if (vdev_req)
2298                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2299         DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
2300                     domain_id);
2301         DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
2302                     args->port_id);
2303         DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
2304                     args->qid);
2305         if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
2306                 DLB2_HW_DBG(hw, "\tQueue's num mappings:  %d\n",
2307                             hw->rsrcs.ldb_queues[args->qid].num_mappings);
2308 }
2309
2310 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
2311                                       u32 domain_id,
2312                                       struct dlb2_unmap_qid_args *args,
2313                                       struct dlb2_cmd_response *resp,
2314                                       bool vdev_req,
2315                                       unsigned int vdev_id)
2316 {
2317         enum dlb2_qid_map_state state;
2318         struct dlb2_hw_domain *domain;
2319         struct dlb2_ldb_queue *queue;
2320         struct dlb2_ldb_port *port;
2321         int slot;
2322         int id;
2323
2324         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2325
2326         if (domain == NULL) {
2327                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2328                 return -EINVAL;
2329         }
2330
2331         if (!domain->configured) {
2332                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
2333                 return -EINVAL;
2334         }
2335
2336         id = args->port_id;
2337
2338         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
2339
2340         if (port == NULL || !port->configured) {
2341                 resp->status = DLB2_ST_INVALID_PORT_ID;
2342                 return -EINVAL;
2343         }
2344
2345         if (port->domain_id.phys_id != domain->id.phys_id) {
2346                 resp->status = DLB2_ST_INVALID_PORT_ID;
2347                 return -EINVAL;
2348         }
2349
2350         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
2351
2352         if (queue == NULL || !queue->configured) {
2353                 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
2354                             __func__, args->qid);
2355                 resp->status = DLB2_ST_INVALID_QID;
2356                 return -EINVAL;
2357         }
2358
2359         /*
2360          * Verify that the port has the queue mapped. From the application's
2361          * perspective a queue is mapped if it is actually mapped, the map is
2362          * in progress, or the map is blocked pending an unmap.
2363          */
2364         state = DLB2_QUEUE_MAPPED;
2365         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
2366                 return 0;
2367
2368         state = DLB2_QUEUE_MAP_IN_PROG;
2369         if (dlb2_port_find_slot_queue(port, state, queue, &slot))
2370                 return 0;
2371
2372         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
2373                 return 0;
2374
2375         resp->status = DLB2_ST_INVALID_QID;
2376         return -EINVAL;
2377 }
2378
2379 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
2380                       u32 domain_id,
2381                       struct dlb2_unmap_qid_args *args,
2382                       struct dlb2_cmd_response *resp,
2383                       bool vdev_req,
2384                       unsigned int vdev_id)
2385 {
2386         struct dlb2_hw_domain *domain;
2387         struct dlb2_ldb_queue *queue;
2388         enum dlb2_qid_map_state st;
2389         struct dlb2_ldb_port *port;
2390         bool unmap_complete;
2391         int i, ret, id;
2392
2393         dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
2394
2395         /*
2396          * Verify that hardware resources are available before attempting to
2397          * satisfy the request. This simplifies the error unwinding code.
2398          */
2399         ret = dlb2_verify_unmap_qid_args(hw,
2400                                          domain_id,
2401                                          args,
2402                                          resp,
2403                                          vdev_req,
2404                                          vdev_id);
2405         if (ret)
2406                 return ret;
2407
2408         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2409         if (domain == NULL) {
2410                 DLB2_HW_ERR(hw,
2411                             "[%s():%d] Internal error: domain not found\n",
2412                             __func__, __LINE__);
2413                 return -EFAULT;
2414         }
2415
2416         id = args->port_id;
2417
2418         port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
2419         if (port == NULL) {
2420                 DLB2_HW_ERR(hw,
2421                             "[%s():%d] Internal error: port not found\n",
2422                             __func__, __LINE__);
2423                 return -EFAULT;
2424         }
2425
2426         queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
2427         if (queue == NULL) {
2428                 DLB2_HW_ERR(hw,
2429                             "[%s():%d] Internal error: queue not found\n",
2430                             __func__, __LINE__);
2431                 return -EFAULT;
2432         }
2433
2434         /*
2435          * If the queue hasn't been mapped yet, we need to update the slot's
2436          * state and re-enable the queue's inflights.
2437          */
2438         st = DLB2_QUEUE_MAP_IN_PROG;
2439         if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
2440                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2441                         DLB2_HW_ERR(hw,
2442                                     "[%s():%d] Internal error: port slot tracking failed\n",
2443                                     __func__, __LINE__);
2444                         return -EFAULT;
2445                 }
2446
2447                 /*
2448                  * Since the in-progress map was aborted, re-enable the QID's
2449                  * inflights.
2450                  */
2451                 if (queue->num_pending_additions == 0)
2452                         dlb2_ldb_queue_set_inflight_limit(hw, queue);
2453
2454                 st = DLB2_QUEUE_UNMAPPED;
2455                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
2456                 if (ret)
2457                         return ret;
2458
2459                 goto unmap_qid_done;
2460         }
2461
2462         /*
2463          * If the queue mapping is on hold pending an unmap, we simply need to
2464          * update the slot's state.
2465          */
2466         if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
2467                 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2468                         DLB2_HW_ERR(hw,
2469                                     "[%s():%d] Internal error: port slot tracking failed\n",
2470                                     __func__, __LINE__);
2471                         return -EFAULT;
2472                 }
2473
2474                 st = DLB2_QUEUE_UNMAP_IN_PROG;
2475                 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
2476                 if (ret)
2477                         return ret;
2478
2479                 goto unmap_qid_done;
2480         }
2481
2482         st = DLB2_QUEUE_MAPPED;
2483         if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
2484                 DLB2_HW_ERR(hw,
2485                             "[%s()] Internal error: no available CQ slots\n",
2486                             __func__);
2487                 return -EFAULT;
2488         }
2489
2490         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2491                 DLB2_HW_ERR(hw,
2492                             "[%s():%d] Internal error: port slot tracking failed\n",
2493                             __func__, __LINE__);
2494                 return -EFAULT;
2495         }
2496
2497         /*
2498          * QID->CQ mapping removal is an asynchronous procedure. It requires
2499          * stopping the DLB2 from scheduling this CQ, draining all inflights
2500          * from the CQ, then unmapping the queue from the CQ. This function
2501          * simply marks the port as needing the queue unmapped, and (if
2502          * necessary) starts the unmapping worker thread.
2503          */
2504         dlb2_ldb_port_cq_disable(hw, port);
2505
2506         st = DLB2_QUEUE_UNMAP_IN_PROG;
2507         ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
2508         if (ret)
2509                 return ret;
2510
2511         /*
2512          * Attempt to finish the unmapping now, in case the port has no
2513          * outstanding inflights. If that's not the case, this will fail and
2514          * the unmapping will be completed at a later time.
2515          */
2516         unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
2517
2518         /*
2519          * If the unmapping couldn't complete immediately, launch the worker
2520          * thread (if it isn't already launched) to finish it later.
2521          */
2522         if (!unmap_complete && !os_worker_active(hw))
2523                 os_schedule_work(hw);
2524
2525 unmap_qid_done:
2526         resp->status = 0;
2527
2528         return 0;
2529 }
2530
2531 static void
2532 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
2533                                   struct dlb2_pending_port_unmaps_args *args,
2534                                   bool vdev_req,
2535                                   unsigned int vdev_id)
2536 {
2537         DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
2538         if (vdev_req)
2539                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
2540         DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
2541 }
2542
2543 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
2544                                 u32 domain_id,
2545                                 struct dlb2_pending_port_unmaps_args *args,
2546                                 struct dlb2_cmd_response *resp,
2547                                 bool vdev_req,
2548                                 unsigned int vdev_id)
2549 {
2550         struct dlb2_hw_domain *domain;
2551         struct dlb2_ldb_port *port;
2552
2553         dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
2554
2555         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2556
2557         if (domain == NULL) {
2558                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2559                 return -EINVAL;
2560         }
2561
2562         port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
2563         if (port == NULL || !port->configured) {
2564                 resp->status = DLB2_ST_INVALID_PORT_ID;
2565                 return -EINVAL;
2566         }
2567
2568         resp->id = port->num_pending_removals;
2569
2570         return 0;
2571 }
2572
2573 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
2574                                          u32 domain_id,
2575                                          struct dlb2_cmd_response *resp,
2576                                          bool vdev_req,
2577                                          unsigned int vdev_id)
2578 {
2579         struct dlb2_hw_domain *domain;
2580
2581         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2582
2583         if (domain == NULL) {
2584                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2585                 return -EINVAL;
2586         }
2587
2588         if (!domain->configured) {
2589                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
2590                 return -EINVAL;
2591         }
2592
2593         if (domain->started) {
2594                 resp->status = DLB2_ST_DOMAIN_STARTED;
2595                 return -EINVAL;
2596         }
2597
2598         return 0;
2599 }
2600
2601 static void dlb2_log_start_domain(struct dlb2_hw *hw,
2602                                   u32 domain_id,
2603                                   bool vdev_req,
2604                                   unsigned int vdev_id)
2605 {
2606         DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
2607         if (vdev_req)
2608                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2609         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2610 }
2611
2612 /**
2613  * dlb2_hw_start_domain() - Lock the domain configuration
2614  * @hw: Contains the current state of the DLB2 hardware.
2615  * @domain_id: Domain ID
2616  * @arg: User-provided arguments (unused, here for ioctl callback template).
2617  * @resp: Response to user.
2618  * @vdev_req: Request came from a virtual device.
2619  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
2620  *
2621  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
2622  * satisfy a request, resp->status will be set accordingly.
2623  */
2624 int
2625 dlb2_hw_start_domain(struct dlb2_hw *hw,
2626                      u32 domain_id,
2627                      struct dlb2_start_domain_args *arg,
2628                      struct dlb2_cmd_response *resp,
2629                      bool vdev_req,
2630                      unsigned int vdev_id)
2631 {
2632         struct dlb2_list_entry *iter;
2633         struct dlb2_dir_pq_pair *dir_queue;
2634         struct dlb2_ldb_queue *ldb_queue;
2635         struct dlb2_hw_domain *domain;
2636         int ret;
2637         RTE_SET_USED(arg);
2638         RTE_SET_USED(iter);
2639
2640         dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
2641
2642         ret = dlb2_verify_start_domain_args(hw,
2643                                             domain_id,
2644                                             resp,
2645                                             vdev_req,
2646                                             vdev_id);
2647         if (ret)
2648                 return ret;
2649
2650         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2651         if (domain == NULL) {
2652                 DLB2_HW_ERR(hw,
2653                             "[%s():%d] Internal error: domain not found\n",
2654                             __func__, __LINE__);
2655                 return -EFAULT;
2656         }
2657
2658         /*
2659          * Enable load-balanced and directed queue write permissions for the
2660          * queues this domain owns. Without this, the DLB2 will drop all
2661          * incoming traffic to those queues.
2662          */
2663         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
2664                 union dlb2_sys_ldb_vasqid_v r0 = { {0} };
2665                 unsigned int offs;
2666
2667                 r0.field.vasqid_v = 1;
2668
2669                 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
2670                         ldb_queue->id.phys_id;
2671
2672                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val);
2673         }
2674
2675         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
2676                 union dlb2_sys_dir_vasqid_v r0 = { {0} };
2677                 unsigned int offs;
2678
2679                 r0.field.vasqid_v = 1;
2680
2681                 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
2682                         dir_queue->id.phys_id;
2683
2684                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
2685         }
2686
2687         dlb2_flush_csr(hw);
2688
2689         domain->started = true;
2690
2691         resp->status = 0;
2692
2693         return 0;
2694 }
2695
2696 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
2697                                          u32 domain_id,
2698                                          u32 queue_id,
2699                                          bool vdev_req,
2700                                          unsigned int vf_id)
2701 {
2702         DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
2703         if (vdev_req)
2704                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
2705         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2706         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
2707 }
2708
2709 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
2710                                 u32 domain_id,
2711                                 struct dlb2_get_dir_queue_depth_args *args,
2712                                 struct dlb2_cmd_response *resp,
2713                                 bool vdev_req,
2714                                 unsigned int vdev_id)
2715 {
2716         struct dlb2_dir_pq_pair *queue;
2717         struct dlb2_hw_domain *domain;
2718         int id;
2719
2720         id = domain_id;
2721
2722         dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
2723                                      vdev_req, vdev_id);
2724
2725         domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
2726         if (domain == NULL) {
2727                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2728                 return -EINVAL;
2729         }
2730
2731         id = args->queue_id;
2732
2733         queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
2734         if (queue == NULL) {
2735                 resp->status = DLB2_ST_INVALID_QID;
2736                 return -EINVAL;
2737         }
2738
2739         resp->id = dlb2_dir_queue_depth(hw, queue);
2740
2741         return 0;
2742 }
2743
2744 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
2745                                          u32 domain_id,
2746                                          u32 queue_id,
2747                                          bool vdev_req,
2748                                          unsigned int vf_id)
2749 {
2750         DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
2751         if (vdev_req)
2752                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
2753         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2754         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
2755 }
2756
2757 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
2758                                 u32 domain_id,
2759                                 struct dlb2_get_ldb_queue_depth_args *args,
2760                                 struct dlb2_cmd_response *resp,
2761                                 bool vdev_req,
2762                                 unsigned int vdev_id)
2763 {
2764         struct dlb2_hw_domain *domain;
2765         struct dlb2_ldb_queue *queue;
2766
2767         dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
2768                                      vdev_req, vdev_id);
2769
2770         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
2771         if (domain == NULL) {
2772                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
2773                 return -EINVAL;
2774         }
2775
2776         queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
2777         if (queue == NULL) {
2778                 resp->status = DLB2_ST_INVALID_QID;
2779                 return -EINVAL;
2780         }
2781
2782         resp->id = dlb2_ldb_queue_depth(hw, queue);
2783
2784         return 0;
2785 }