ab5b080c1de6d49f3c4a81a43c537fbee0d7211c
[dpdk.git] / drivers / event / dlb2 / pf / base / dlb2_resource.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include "dlb2_user.h"
6
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
13
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
16
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18         DLB2_LIST_HEAD((head), type, domain_list)
19
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21         DLB2_LIST_HEAD((head), type, func_list)
22
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24         DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
25
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27         DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
28
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
31
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33         DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
34
35 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
36 {
37         union dlb2_chp_cfg_chp_csr_ctrl r0;
38
39         r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
40
41         r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
42
43         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
44 }
45
46 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
47 {
48         union dlb2_chp_cfg_chp_csr_ctrl r0;
49
50         r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
51
52         r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
53
54         DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
55 }
56
57 /*
58  * The PF driver cannot assume that a register write will affect subsequent HCW
59  * writes. To ensure a write completes, the driver must read back a CSR. This
60  * function only need be called for configuration that can occur after the
61  * domain has started; prior to starting, applications can't send HCWs.
62  */
63 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
64 {
65         DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
66 }
67
68 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
69                                 struct dlb2_dir_pq_pair *queue)
70 {
71         union dlb2_lsp_qid_dir_enqueue_cnt r0;
72
73         r0.val = DLB2_CSR_RD(hw,
74                              DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
75
76         return r0.field.count;
77 }
78
79 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
80                                     struct dlb2_ldb_port *port)
81 {
82         union dlb2_lsp_cq_ldb_dsbl reg;
83
84         /*
85          * Don't re-enable the port if a removal is pending. The caller should
86          * mark this port as enabled (if it isn't already), and when the
87          * removal completes the port will be enabled.
88          */
89         if (port->num_pending_removals)
90                 return;
91
92         reg.field.disabled = 0;
93
94         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
95
96         dlb2_flush_csr(hw);
97 }
98
99 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
100                                      struct dlb2_ldb_port *port)
101 {
102         union dlb2_lsp_cq_ldb_dsbl reg;
103
104         reg.field.disabled = 1;
105
106         DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
107
108         dlb2_flush_csr(hw);
109 }
110
111 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
112                                 struct dlb2_ldb_queue *queue)
113 {
114         union dlb2_lsp_qid_aqed_active_cnt r0;
115         union dlb2_lsp_qid_atm_active r1;
116         union dlb2_lsp_qid_ldb_enqueue_cnt r2;
117
118         r0.val = DLB2_CSR_RD(hw,
119                              DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
120         r1.val = DLB2_CSR_RD(hw,
121                              DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
122
123         r2.val = DLB2_CSR_RD(hw,
124                              DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
125
126         return r0.field.count + r1.field.count + r2.field.count;
127 }
128
129 static struct dlb2_ldb_queue *
130 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
131                            u32 id,
132                            bool vdev_req,
133                            unsigned int vdev_id)
134 {
135         struct dlb2_list_entry *iter1;
136         struct dlb2_list_entry *iter2;
137         struct dlb2_function_resources *rsrcs;
138         struct dlb2_hw_domain *domain;
139         struct dlb2_ldb_queue *queue;
140         RTE_SET_USED(iter1);
141         RTE_SET_USED(iter2);
142
143         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
144                 return NULL;
145
146         rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
147
148         if (!vdev_req)
149                 return &hw->rsrcs.ldb_queues[id];
150
151         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
152                 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
153                         if (queue->id.virt_id == id)
154                                 return queue;
155         }
156
157         DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
158                 if (queue->id.virt_id == id)
159                         return queue;
160
161         return NULL;
162 }
163
164 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
165                                                       u32 id,
166                                                       bool vdev_req,
167                                                       unsigned int vdev_id)
168 {
169         struct dlb2_list_entry *iteration;
170         struct dlb2_function_resources *rsrcs;
171         struct dlb2_hw_domain *domain;
172         RTE_SET_USED(iteration);
173
174         if (id >= DLB2_MAX_NUM_DOMAINS)
175                 return NULL;
176
177         if (!vdev_req)
178                 return &hw->domains[id];
179
180         rsrcs = &hw->vdev[vdev_id];
181
182         DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
183                 if (domain->id.virt_id == id)
184                         return domain;
185
186         return NULL;
187 }
188
189 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
190                                            struct dlb2_ldb_port *port,
191                                            struct dlb2_ldb_queue *queue,
192                                            int slot,
193                                            enum dlb2_qid_map_state new_state)
194 {
195         enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
196         struct dlb2_hw_domain *domain;
197         int domain_id;
198
199         domain_id = port->domain_id.phys_id;
200
201         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
202         if (domain == NULL) {
203                 DLB2_HW_ERR(hw,
204                             "[%s()] Internal error: unable to find domain %d\n",
205                             __func__, domain_id);
206                 return -EINVAL;
207         }
208
209         switch (curr_state) {
210         case DLB2_QUEUE_UNMAPPED:
211                 switch (new_state) {
212                 case DLB2_QUEUE_MAPPED:
213                         queue->num_mappings++;
214                         port->num_mappings++;
215                         break;
216                 case DLB2_QUEUE_MAP_IN_PROG:
217                         queue->num_pending_additions++;
218                         domain->num_pending_additions++;
219                         break;
220                 default:
221                         goto error;
222                 }
223                 break;
224         case DLB2_QUEUE_MAPPED:
225                 switch (new_state) {
226                 case DLB2_QUEUE_UNMAPPED:
227                         queue->num_mappings--;
228                         port->num_mappings--;
229                         break;
230                 case DLB2_QUEUE_UNMAP_IN_PROG:
231                         port->num_pending_removals++;
232                         domain->num_pending_removals++;
233                         break;
234                 case DLB2_QUEUE_MAPPED:
235                         /* Priority change, nothing to update */
236                         break;
237                 default:
238                         goto error;
239                 }
240                 break;
241         case DLB2_QUEUE_MAP_IN_PROG:
242                 switch (new_state) {
243                 case DLB2_QUEUE_UNMAPPED:
244                         queue->num_pending_additions--;
245                         domain->num_pending_additions--;
246                         break;
247                 case DLB2_QUEUE_MAPPED:
248                         queue->num_mappings++;
249                         port->num_mappings++;
250                         queue->num_pending_additions--;
251                         domain->num_pending_additions--;
252                         break;
253                 default:
254                         goto error;
255                 }
256                 break;
257         case DLB2_QUEUE_UNMAP_IN_PROG:
258                 switch (new_state) {
259                 case DLB2_QUEUE_UNMAPPED:
260                         port->num_pending_removals--;
261                         domain->num_pending_removals--;
262                         queue->num_mappings--;
263                         port->num_mappings--;
264                         break;
265                 case DLB2_QUEUE_MAPPED:
266                         port->num_pending_removals--;
267                         domain->num_pending_removals--;
268                         break;
269                 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
270                         /* Nothing to update */
271                         break;
272                 default:
273                         goto error;
274                 }
275                 break;
276         case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
277                 switch (new_state) {
278                 case DLB2_QUEUE_UNMAP_IN_PROG:
279                         /* Nothing to update */
280                         break;
281                 case DLB2_QUEUE_UNMAPPED:
282                         /*
283                          * An UNMAP_IN_PROG_PENDING_MAP slot briefly
284                          * becomes UNMAPPED before it transitions to
285                          * MAP_IN_PROG.
286                          */
287                         queue->num_mappings--;
288                         port->num_mappings--;
289                         port->num_pending_removals--;
290                         domain->num_pending_removals--;
291                         break;
292                 default:
293                         goto error;
294                 }
295                 break;
296         default:
297                 goto error;
298         }
299
300         port->qid_map[slot].state = new_state;
301
302         DLB2_HW_DBG(hw,
303                     "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
304                     __func__, queue->id.phys_id, port->id.phys_id,
305                     curr_state, new_state);
306         return 0;
307
308 error:
309         DLB2_HW_ERR(hw,
310                     "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
311                     __func__, queue->id.phys_id, port->id.phys_id,
312                     curr_state, new_state);
313         return -EFAULT;
314 }
315
316 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
317                                 enum dlb2_qid_map_state state,
318                                 int *slot)
319 {
320         int i;
321
322         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
323                 if (port->qid_map[i].state == state)
324                         break;
325         }
326
327         *slot = i;
328
329         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
330 }
331
332 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
333                                       enum dlb2_qid_map_state state,
334                                       struct dlb2_ldb_queue *queue,
335                                       int *slot)
336 {
337         int i;
338
339         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
340                 if (port->qid_map[i].state == state &&
341                     port->qid_map[i].qid == queue->id.phys_id)
342                         break;
343         }
344
345         *slot = i;
346
347         return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
348 }
349
350 /*
351  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
352  * their function names imply, and should only be called by the dynamic CQ
353  * mapping code.
354  */
355 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
356                                               struct dlb2_hw_domain *domain,
357                                               struct dlb2_ldb_queue *queue)
358 {
359         struct dlb2_list_entry *iter;
360         struct dlb2_ldb_port *port;
361         int slot, i;
362         RTE_SET_USED(iter);
363
364         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
365                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
366                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
367
368                         if (!dlb2_port_find_slot_queue(port, state,
369                                                        queue, &slot))
370                                 continue;
371
372                         if (port->enabled)
373                                 dlb2_ldb_port_cq_disable(hw, port);
374                 }
375         }
376 }
377
378 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
379                                              struct dlb2_hw_domain *domain,
380                                              struct dlb2_ldb_queue *queue)
381 {
382         struct dlb2_list_entry *iter;
383         struct dlb2_ldb_port *port;
384         int slot, i;
385         RTE_SET_USED(iter);
386
387         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
388                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
389                         enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
390
391                         if (!dlb2_port_find_slot_queue(port, state,
392                                                        queue, &slot))
393                                 continue;
394
395                         if (port->enabled)
396                                 dlb2_ldb_port_cq_enable(hw, port);
397                 }
398         }
399 }
400
401 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
402                                                 struct dlb2_ldb_port *port,
403                                                 int slot)
404 {
405         union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
406
407         r0.field.cq = port->id.phys_id;
408         r0.field.qidix = slot;
409         r0.field.value = 0;
410         r0.field.inflight_ok_v = 1;
411
412         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
413
414         dlb2_flush_csr(hw);
415 }
416
417 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
418                                               struct dlb2_ldb_port *port,
419                                               int slot)
420 {
421         union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
422
423         r0.field.cq = port->id.phys_id;
424         r0.field.qidix = slot;
425         r0.field.value = 1;
426         r0.field.inflight_ok_v = 1;
427
428         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
429
430         dlb2_flush_csr(hw);
431 }
432
433 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
434                                         struct dlb2_ldb_port *p,
435                                         struct dlb2_ldb_queue *q,
436                                         u8 priority)
437 {
438         union dlb2_lsp_cq2priov r0;
439         union dlb2_lsp_cq2qid0 r1;
440         union dlb2_atm_qid2cqidix_00 r2;
441         union dlb2_lsp_qid2cqidix_00 r3;
442         union dlb2_lsp_qid2cqidix2_00 r4;
443         enum dlb2_qid_map_state state;
444         int i;
445
446         /* Look for a pending or already mapped slot, else an unused slot */
447         if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
448             !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
449             !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
450                 DLB2_HW_ERR(hw,
451                             "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
452                             __func__, __LINE__);
453                 return -EFAULT;
454         }
455
456         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
457                 DLB2_HW_ERR(hw,
458                             "[%s():%d] Internal error: port slot tracking failed\n",
459                             __func__, __LINE__);
460                 return -EFAULT;
461         }
462
463         /* Read-modify-write the priority and valid bit register */
464         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
465
466         r0.field.v |= 1 << i;
467         r0.field.prio |= (priority & 0x7) << i * 3;
468
469         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
470
471         /* Read-modify-write the QID map register */
472         if (i < 4)
473                 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
474         else
475                 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
476
477         if (i == 0 || i == 4)
478                 r1.field.qid_p0 = q->id.phys_id;
479         if (i == 1 || i == 5)
480                 r1.field.qid_p1 = q->id.phys_id;
481         if (i == 2 || i == 6)
482                 r1.field.qid_p2 = q->id.phys_id;
483         if (i == 3 || i == 7)
484                 r1.field.qid_p3 = q->id.phys_id;
485
486         if (i < 4)
487                 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
488         else
489                 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
490
491         r2.val = DLB2_CSR_RD(hw,
492                              DLB2_ATM_QID2CQIDIX(q->id.phys_id,
493                                                  p->id.phys_id / 4));
494
495         r3.val = DLB2_CSR_RD(hw,
496                              DLB2_LSP_QID2CQIDIX(q->id.phys_id,
497                                                  p->id.phys_id / 4));
498
499         r4.val = DLB2_CSR_RD(hw,
500                              DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
501                                                   p->id.phys_id / 4));
502
503         switch (p->id.phys_id % 4) {
504         case 0:
505                 r2.field.cq_p0 |= 1 << i;
506                 r3.field.cq_p0 |= 1 << i;
507                 r4.field.cq_p0 |= 1 << i;
508                 break;
509
510         case 1:
511                 r2.field.cq_p1 |= 1 << i;
512                 r3.field.cq_p1 |= 1 << i;
513                 r4.field.cq_p1 |= 1 << i;
514                 break;
515
516         case 2:
517                 r2.field.cq_p2 |= 1 << i;
518                 r3.field.cq_p2 |= 1 << i;
519                 r4.field.cq_p2 |= 1 << i;
520                 break;
521
522         case 3:
523                 r2.field.cq_p3 |= 1 << i;
524                 r3.field.cq_p3 |= 1 << i;
525                 r4.field.cq_p3 |= 1 << i;
526                 break;
527         }
528
529         DLB2_CSR_WR(hw,
530                     DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
531                     r2.val);
532
533         DLB2_CSR_WR(hw,
534                     DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
535                     r3.val);
536
537         DLB2_CSR_WR(hw,
538                     DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
539                     r4.val);
540
541         dlb2_flush_csr(hw);
542
543         p->qid_map[i].qid = q->id.phys_id;
544         p->qid_map[i].priority = priority;
545
546         state = DLB2_QUEUE_MAPPED;
547
548         return dlb2_port_slot_state_transition(hw, p, q, i, state);
549 }
550
551 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
552                                            struct dlb2_ldb_port *port,
553                                            struct dlb2_ldb_queue *queue,
554                                            int slot)
555 {
556         union dlb2_lsp_qid_aqed_active_cnt r0;
557         union dlb2_lsp_qid_ldb_enqueue_cnt r1;
558         union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
559
560         /* Set the atomic scheduling haswork bit */
561         r0.val = DLB2_CSR_RD(hw,
562                              DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
563
564         r2.field.cq = port->id.phys_id;
565         r2.field.qidix = slot;
566         r2.field.value = 1;
567         r2.field.rlist_haswork_v = r0.field.count > 0;
568
569         /* Set the non-atomic scheduling haswork bit */
570         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
571
572         r1.val = DLB2_CSR_RD(hw,
573                              DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
574
575         memset(&r2, 0, sizeof(r2));
576
577         r2.field.cq = port->id.phys_id;
578         r2.field.qidix = slot;
579         r2.field.value = 1;
580         r2.field.nalb_haswork_v = (r1.field.count > 0);
581
582         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
583
584         dlb2_flush_csr(hw);
585
586         return 0;
587 }
588
589 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
590                                               struct dlb2_ldb_port *port,
591                                               u8 slot)
592 {
593         union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
594
595         r2.field.cq = port->id.phys_id;
596         r2.field.qidix = slot;
597         r2.field.value = 0;
598         r2.field.rlist_haswork_v = 1;
599
600         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
601
602         memset(&r2, 0, sizeof(r2));
603
604         r2.field.cq = port->id.phys_id;
605         r2.field.qidix = slot;
606         r2.field.value = 0;
607         r2.field.nalb_haswork_v = 1;
608
609         DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
610
611         dlb2_flush_csr(hw);
612 }
613
614 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
615                                               struct dlb2_ldb_queue *queue)
616 {
617         union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
618
619         r0.field.limit = queue->num_qid_inflights;
620
621         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
622 }
623
624 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
625                                                 struct dlb2_ldb_queue *queue)
626 {
627         DLB2_CSR_WR(hw,
628                     DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
629                     DLB2_LSP_QID_LDB_INFL_LIM_RST);
630 }
631
632 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
633                                                 struct dlb2_hw_domain *domain,
634                                                 struct dlb2_ldb_port *port,
635                                                 struct dlb2_ldb_queue *queue)
636 {
637         struct dlb2_list_entry *iter;
638         union dlb2_lsp_qid_ldb_infl_cnt r0;
639         enum dlb2_qid_map_state state;
640         int slot, ret, i;
641         u8 prio;
642         RTE_SET_USED(iter);
643
644         r0.val = DLB2_CSR_RD(hw,
645                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
646
647         if (r0.field.count) {
648                 DLB2_HW_ERR(hw,
649                             "[%s()] Internal error: non-zero QID inflight count\n",
650                             __func__);
651                 return -EINVAL;
652         }
653
654         /*
655          * Static map the port and set its corresponding has_work bits.
656          */
657         state = DLB2_QUEUE_MAP_IN_PROG;
658         if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
659                 return -EINVAL;
660
661         if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
662                 DLB2_HW_ERR(hw,
663                             "[%s():%d] Internal error: port slot tracking failed\n",
664                             __func__, __LINE__);
665                 return -EFAULT;
666         }
667
668         prio = port->qid_map[slot].priority;
669
670         /*
671          * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
672          * the port's qid_map state.
673          */
674         ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
675         if (ret)
676                 return ret;
677
678         ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
679         if (ret)
680                 return ret;
681
682         /*
683          * Ensure IF_status(cq,qid) is 0 before enabling the port to
684          * prevent spurious schedules to cause the queue's inflight
685          * count to increase.
686          */
687         dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
688
689         /* Reset the queue's inflight status */
690         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
691                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
692                         state = DLB2_QUEUE_MAPPED;
693                         if (!dlb2_port_find_slot_queue(port, state,
694                                                        queue, &slot))
695                                 continue;
696
697                         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
698                 }
699         }
700
701         dlb2_ldb_queue_set_inflight_limit(hw, queue);
702
703         /* Re-enable CQs mapped to this queue */
704         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
705
706         /* If this queue has other mappings pending, clear its inflight limit */
707         if (queue->num_pending_additions > 0)
708                 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
709
710         return 0;
711 }
712
713 /**
714  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
715  * @hw: dlb2_hw handle for a particular device.
716  * @port: load-balanced port
717  * @queue: load-balanced queue
718  * @priority: queue servicing priority
719  *
720  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
721  * at a later point, and <0 if an error occurred.
722  */
723 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
724                                          struct dlb2_ldb_port *port,
725                                          struct dlb2_ldb_queue *queue,
726                                          u8 priority)
727 {
728         union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
729         enum dlb2_qid_map_state state;
730         struct dlb2_hw_domain *domain;
731         int domain_id, slot, ret;
732
733         domain_id = port->domain_id.phys_id;
734
735         domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
736         if (domain == NULL) {
737                 DLB2_HW_ERR(hw,
738                             "[%s()] Internal error: unable to find domain %d\n",
739                             __func__, port->domain_id.phys_id);
740                 return -EINVAL;
741         }
742
743         /*
744          * Set the QID inflight limit to 0 to prevent further scheduling of the
745          * queue.
746          */
747         DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
748
749         if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
750                 DLB2_HW_ERR(hw,
751                             "Internal error: No available unmapped slots\n");
752                 return -EFAULT;
753         }
754
755         if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
756                 DLB2_HW_ERR(hw,
757                             "[%s():%d] Internal error: port slot tracking failed\n",
758                             __func__, __LINE__);
759                 return -EFAULT;
760         }
761
762         port->qid_map[slot].qid = queue->id.phys_id;
763         port->qid_map[slot].priority = priority;
764
765         state = DLB2_QUEUE_MAP_IN_PROG;
766         ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
767         if (ret)
768                 return ret;
769
770         r0.val = DLB2_CSR_RD(hw,
771                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
772
773         if (r0.field.count) {
774                 /*
775                  * The queue is owed completions so it's not safe to map it
776                  * yet. Schedule a kernel thread to complete the mapping later,
777                  * once software has completed all the queue's inflight events.
778                  */
779                 if (!os_worker_active(hw))
780                         os_schedule_work(hw);
781
782                 return 1;
783         }
784
785         /*
786          * Disable the affected CQ, and the CQs already mapped to the QID,
787          * before reading the QID's inflight count a second time. There is an
788          * unlikely race in which the QID may schedule one more QE after we
789          * read an inflight count of 0, and disabling the CQs guarantees that
790          * the race will not occur after a re-read of the inflight count
791          * register.
792          */
793         if (port->enabled)
794                 dlb2_ldb_port_cq_disable(hw, port);
795
796         dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
797
798         r0.val = DLB2_CSR_RD(hw,
799                              DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
800
801         if (r0.field.count) {
802                 if (port->enabled)
803                         dlb2_ldb_port_cq_enable(hw, port);
804
805                 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
806
807                 /*
808                  * The queue is owed completions so it's not safe to map it
809                  * yet. Schedule a kernel thread to complete the mapping later,
810                  * once software has completed all the queue's inflight events.
811                  */
812                 if (!os_worker_active(hw))
813                         os_schedule_work(hw);
814
815                 return 1;
816         }
817
818         return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
819 }
820
821 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
822                                         struct dlb2_hw_domain *domain,
823                                         struct dlb2_ldb_port *port)
824 {
825         int i;
826
827         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
828                 union dlb2_lsp_qid_ldb_infl_cnt r0;
829                 struct dlb2_ldb_queue *queue;
830                 int qid;
831
832                 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
833                         continue;
834
835                 qid = port->qid_map[i].qid;
836
837                 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
838
839                 if (queue == NULL) {
840                         DLB2_HW_ERR(hw,
841                                     "[%s()] Internal error: unable to find queue %d\n",
842                                     __func__, qid);
843                         continue;
844                 }
845
846                 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
847
848                 if (r0.field.count)
849                         continue;
850
851                 /*
852                  * Disable the affected CQ, and the CQs already mapped to the
853                  * QID, before reading the QID's inflight count a second time.
854                  * There is an unlikely race in which the QID may schedule one
855                  * more QE after we read an inflight count of 0, and disabling
856                  * the CQs guarantees that the race will not occur after a
857                  * re-read of the inflight count register.
858                  */
859                 if (port->enabled)
860                         dlb2_ldb_port_cq_disable(hw, port);
861
862                 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
863
864                 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
865
866                 if (r0.field.count) {
867                         if (port->enabled)
868                                 dlb2_ldb_port_cq_enable(hw, port);
869
870                         dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
871
872                         continue;
873                 }
874
875                 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
876         }
877 }
878
879 static unsigned int
880 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
881                                       struct dlb2_hw_domain *domain)
882 {
883         struct dlb2_list_entry *iter;
884         struct dlb2_ldb_port *port;
885         int i;
886         RTE_SET_USED(iter);
887
888         if (!domain->configured || domain->num_pending_additions == 0)
889                 return 0;
890
891         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
892                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
893                         dlb2_domain_finish_map_port(hw, domain, port);
894         }
895
896         return domain->num_pending_additions;
897 }
898
899 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
900                                    struct dlb2_ldb_port *port,
901                                    struct dlb2_ldb_queue *queue)
902 {
903         enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
904         union dlb2_lsp_cq2priov r0;
905         union dlb2_atm_qid2cqidix_00 r1;
906         union dlb2_lsp_qid2cqidix_00 r2;
907         union dlb2_lsp_qid2cqidix2_00 r3;
908         u32 queue_id;
909         u32 port_id;
910         int i;
911
912         /* Find the queue's slot */
913         mapped = DLB2_QUEUE_MAPPED;
914         in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
915         pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
916
917         if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
918             !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
919             !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
920                 DLB2_HW_ERR(hw,
921                             "[%s():%d] Internal error: QID %d isn't mapped\n",
922                             __func__, __LINE__, queue->id.phys_id);
923                 return -EFAULT;
924         }
925
926         if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
927                 DLB2_HW_ERR(hw,
928                             "[%s():%d] Internal error: port slot tracking failed\n",
929                             __func__, __LINE__);
930                 return -EFAULT;
931         }
932
933         port_id = port->id.phys_id;
934         queue_id = queue->id.phys_id;
935
936         /* Read-modify-write the priority and valid bit register */
937         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
938
939         r0.field.v &= ~(1 << i);
940
941         DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
942
943         r1.val = DLB2_CSR_RD(hw,
944                              DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
945
946         r2.val = DLB2_CSR_RD(hw,
947                              DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
948
949         r3.val = DLB2_CSR_RD(hw,
950                              DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
951
952         switch (port_id % 4) {
953         case 0:
954                 r1.field.cq_p0 &= ~(1 << i);
955                 r2.field.cq_p0 &= ~(1 << i);
956                 r3.field.cq_p0 &= ~(1 << i);
957                 break;
958
959         case 1:
960                 r1.field.cq_p1 &= ~(1 << i);
961                 r2.field.cq_p1 &= ~(1 << i);
962                 r3.field.cq_p1 &= ~(1 << i);
963                 break;
964
965         case 2:
966                 r1.field.cq_p2 &= ~(1 << i);
967                 r2.field.cq_p2 &= ~(1 << i);
968                 r3.field.cq_p2 &= ~(1 << i);
969                 break;
970
971         case 3:
972                 r1.field.cq_p3 &= ~(1 << i);
973                 r2.field.cq_p3 &= ~(1 << i);
974                 r3.field.cq_p3 &= ~(1 << i);
975                 break;
976         }
977
978         DLB2_CSR_WR(hw,
979                     DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
980                     r1.val);
981
982         DLB2_CSR_WR(hw,
983                     DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
984                     r2.val);
985
986         DLB2_CSR_WR(hw,
987                     DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
988                     r3.val);
989
990         dlb2_flush_csr(hw);
991
992         unmapped = DLB2_QUEUE_UNMAPPED;
993
994         return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
995 }
996
997 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
998                                  struct dlb2_hw_domain *domain,
999                                  struct dlb2_ldb_port *port,
1000                                  struct dlb2_ldb_queue *queue,
1001                                  u8 prio)
1002 {
1003         if (domain->started)
1004                 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
1005         else
1006                 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1007 }
1008
1009 static void
1010 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
1011                                    struct dlb2_hw_domain *domain,
1012                                    struct dlb2_ldb_port *port,
1013                                    int slot)
1014 {
1015         enum dlb2_qid_map_state state;
1016         struct dlb2_ldb_queue *queue;
1017
1018         queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
1019
1020         state = port->qid_map[slot].state;
1021
1022         /* Update the QID2CQIDX and CQ2QID vectors */
1023         dlb2_ldb_port_unmap_qid(hw, port, queue);
1024
1025         /*
1026          * Ensure the QID will not be serviced by this {CQ, slot} by clearing
1027          * the has_work bits
1028          */
1029         dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
1030
1031         /* Reset the {CQ, slot} to its default state */
1032         dlb2_ldb_port_set_queue_if_status(hw, port, slot);
1033
1034         /* Re-enable the CQ if it wasn't manually disabled by the user */
1035         if (port->enabled)
1036                 dlb2_ldb_port_cq_enable(hw, port);
1037
1038         /*
1039          * If there is a mapping that is pending this slot's removal, perform
1040          * the mapping now.
1041          */
1042         if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
1043                 struct dlb2_ldb_port_qid_map *map;
1044                 struct dlb2_ldb_queue *map_queue;
1045                 u8 prio;
1046
1047                 map = &port->qid_map[slot];
1048
1049                 map->qid = map->pending_qid;
1050                 map->priority = map->pending_priority;
1051
1052                 map_queue = &hw->rsrcs.ldb_queues[map->qid];
1053                 prio = map->priority;
1054
1055                 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
1056         }
1057 }
1058
1059 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
1060                                           struct dlb2_hw_domain *domain,
1061                                           struct dlb2_ldb_port *port)
1062 {
1063         union dlb2_lsp_cq_ldb_infl_cnt r0;
1064         int i;
1065
1066         if (port->num_pending_removals == 0)
1067                 return false;
1068
1069         /*
1070          * The unmap requires all the CQ's outstanding inflights to be
1071          * completed.
1072          */
1073         r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
1074         if (r0.field.count > 0)
1075                 return false;
1076
1077         for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1078                 struct dlb2_ldb_port_qid_map *map;
1079
1080                 map = &port->qid_map[i];
1081
1082                 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
1083                     map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
1084                         continue;
1085
1086                 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
1087         }
1088
1089         return true;
1090 }
1091
1092 static unsigned int
1093 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
1094                                         struct dlb2_hw_domain *domain)
1095 {
1096         struct dlb2_list_entry *iter;
1097         struct dlb2_ldb_port *port;
1098         int i;
1099         RTE_SET_USED(iter);
1100
1101         if (!domain->configured || domain->num_pending_removals == 0)
1102                 return 0;
1103
1104         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1105                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
1106                         dlb2_domain_finish_unmap_port(hw, domain, port);
1107         }
1108
1109         return domain->num_pending_removals;
1110 }
1111
1112 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
1113 {
1114         int i, num = 0;
1115
1116         /* Finish queue unmap jobs for any domain that needs it */
1117         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
1118                 struct dlb2_hw_domain *domain = &hw->domains[i];
1119
1120                 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
1121         }
1122
1123         return num;
1124 }
1125
1126 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
1127 {
1128         int i, num = 0;
1129
1130         /* Finish queue map jobs for any domain that needs it */
1131         for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
1132                 struct dlb2_hw_domain *domain = &hw->domains[i];
1133
1134                 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
1135         }
1136
1137         return num;
1138 }
1139
1140 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id)
1141 {
1142         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1143                 return -EINVAL;
1144
1145         return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
1146 }
1147
1148 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw,
1149                                              unsigned int group_id)
1150 {
1151         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1152                 return -EINVAL;
1153
1154         return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
1155 }
1156
1157 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
1158                                                 unsigned int group_id,
1159                                                 unsigned long val)
1160 {
1161         DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
1162         DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
1163         DLB2_HW_DBG(hw, "\tValue:    %lu\n", val);
1164 }
1165
1166 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
1167                                     unsigned int group_id,
1168                                     unsigned long val)
1169 {
1170         u32 valid_allocations[] = {64, 128, 256, 512, 1024};
1171         union dlb2_ro_pipe_grp_sn_mode r0 = { {0} };
1172         struct dlb2_sn_group *group;
1173         int mode;
1174
1175         if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1176                 return -EINVAL;
1177
1178         group = &hw->rsrcs.sn_groups[group_id];
1179
1180         /*
1181          * Once the first load-balanced queue using an SN group is configured,
1182          * the group cannot be changed.
1183          */
1184         if (group->slot_use_bitmap != 0)
1185                 return -EPERM;
1186
1187         for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
1188                 if (val == valid_allocations[mode])
1189                         break;
1190
1191         if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
1192                 return -EINVAL;
1193
1194         group->mode = mode;
1195         group->sequence_numbers_per_queue = val;
1196
1197         r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
1198         r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
1199
1200         DLB2_CSR_WR(hw, DLB2_RO_PIPE_GRP_SN_MODE, r0.val);
1201
1202         dlb2_log_set_group_sequence_numbers(hw, group_id, val);
1203
1204         return 0;
1205 }
1206
1207 static struct dlb2_dir_pq_pair *
1208 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
1209                             u32 id,
1210                             bool vdev_req,
1211                             struct dlb2_hw_domain *domain)
1212 {
1213         struct dlb2_list_entry *iter;
1214         struct dlb2_dir_pq_pair *port;
1215         RTE_SET_USED(iter);
1216
1217         if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
1218                 return NULL;
1219
1220         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
1221                 if ((!vdev_req && port->id.phys_id == id) ||
1222                     (vdev_req && port->id.virt_id == id))
1223                         return port;
1224
1225         return NULL;
1226 }
1227
1228 static struct dlb2_ldb_queue *
1229 dlb2_get_domain_ldb_queue(u32 id,
1230                           bool vdev_req,
1231                           struct dlb2_hw_domain *domain)
1232 {
1233         struct dlb2_list_entry *iter;
1234         struct dlb2_ldb_queue *queue;
1235         RTE_SET_USED(iter);
1236
1237         if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1238                 return NULL;
1239
1240         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
1241                 if ((!vdev_req && queue->id.phys_id == id) ||
1242                     (vdev_req && queue->id.virt_id == id))
1243                         return queue;
1244
1245         return NULL;
1246 }
1247
1248 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
1249                                          u32 domain_id,
1250                                          struct dlb2_cmd_response *resp,
1251                                          bool vdev_req,
1252                                          unsigned int vdev_id)
1253 {
1254         struct dlb2_hw_domain *domain;
1255
1256         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1257
1258         if (domain == NULL) {
1259                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1260                 return -EINVAL;
1261         }
1262
1263         if (!domain->configured) {
1264                 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
1265                 return -EINVAL;
1266         }
1267
1268         if (domain->started) {
1269                 resp->status = DLB2_ST_DOMAIN_STARTED;
1270                 return -EINVAL;
1271         }
1272
1273         return 0;
1274 }
1275
1276 static void dlb2_log_start_domain(struct dlb2_hw *hw,
1277                                   u32 domain_id,
1278                                   bool vdev_req,
1279                                   unsigned int vdev_id)
1280 {
1281         DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
1282         if (vdev_req)
1283                 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
1284         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
1285 }
1286
1287 /**
1288  * dlb2_hw_start_domain() - Lock the domain configuration
1289  * @hw: Contains the current state of the DLB2 hardware.
1290  * @domain_id: Domain ID
1291  * @arg: User-provided arguments (unused, here for ioctl callback template).
1292  * @resp: Response to user.
1293  * @vdev_req: Request came from a virtual device.
1294  * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
1295  *
1296  * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
1297  * satisfy a request, resp->status will be set accordingly.
1298  */
1299 int
1300 dlb2_hw_start_domain(struct dlb2_hw *hw,
1301                      u32 domain_id,
1302                      struct dlb2_start_domain_args *arg,
1303                      struct dlb2_cmd_response *resp,
1304                      bool vdev_req,
1305                      unsigned int vdev_id)
1306 {
1307         struct dlb2_list_entry *iter;
1308         struct dlb2_dir_pq_pair *dir_queue;
1309         struct dlb2_ldb_queue *ldb_queue;
1310         struct dlb2_hw_domain *domain;
1311         int ret;
1312         RTE_SET_USED(arg);
1313         RTE_SET_USED(iter);
1314
1315         dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
1316
1317         ret = dlb2_verify_start_domain_args(hw,
1318                                             domain_id,
1319                                             resp,
1320                                             vdev_req,
1321                                             vdev_id);
1322         if (ret)
1323                 return ret;
1324
1325         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1326         if (domain == NULL) {
1327                 DLB2_HW_ERR(hw,
1328                             "[%s():%d] Internal error: domain not found\n",
1329                             __func__, __LINE__);
1330                 return -EFAULT;
1331         }
1332
1333         /*
1334          * Enable load-balanced and directed queue write permissions for the
1335          * queues this domain owns. Without this, the DLB2 will drop all
1336          * incoming traffic to those queues.
1337          */
1338         DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
1339                 union dlb2_sys_ldb_vasqid_v r0 = { {0} };
1340                 unsigned int offs;
1341
1342                 r0.field.vasqid_v = 1;
1343
1344                 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
1345                         ldb_queue->id.phys_id;
1346
1347                 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val);
1348         }
1349
1350         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
1351                 union dlb2_sys_dir_vasqid_v r0 = { {0} };
1352                 unsigned int offs;
1353
1354                 r0.field.vasqid_v = 1;
1355
1356                 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
1357                         dir_queue->id.phys_id;
1358
1359                 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
1360         }
1361
1362         dlb2_flush_csr(hw);
1363
1364         domain->started = true;
1365
1366         resp->status = 0;
1367
1368         return 0;
1369 }
1370
1371 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
1372                                          u32 domain_id,
1373                                          u32 queue_id,
1374                                          bool vdev_req,
1375                                          unsigned int vf_id)
1376 {
1377         DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
1378         if (vdev_req)
1379                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
1380         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
1381         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
1382 }
1383
1384 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
1385                                 u32 domain_id,
1386                                 struct dlb2_get_dir_queue_depth_args *args,
1387                                 struct dlb2_cmd_response *resp,
1388                                 bool vdev_req,
1389                                 unsigned int vdev_id)
1390 {
1391         struct dlb2_dir_pq_pair *queue;
1392         struct dlb2_hw_domain *domain;
1393         int id;
1394
1395         id = domain_id;
1396
1397         dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
1398                                      vdev_req, vdev_id);
1399
1400         domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
1401         if (domain == NULL) {
1402                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1403                 return -EINVAL;
1404         }
1405
1406         id = args->queue_id;
1407
1408         queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
1409         if (queue == NULL) {
1410                 resp->status = DLB2_ST_INVALID_QID;
1411                 return -EINVAL;
1412         }
1413
1414         resp->id = dlb2_dir_queue_depth(hw, queue);
1415
1416         return 0;
1417 }
1418
1419 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
1420                                          u32 domain_id,
1421                                          u32 queue_id,
1422                                          bool vdev_req,
1423                                          unsigned int vf_id)
1424 {
1425         DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
1426         if (vdev_req)
1427                 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
1428         DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
1429         DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
1430 }
1431
1432 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
1433                                 u32 domain_id,
1434                                 struct dlb2_get_ldb_queue_depth_args *args,
1435                                 struct dlb2_cmd_response *resp,
1436                                 bool vdev_req,
1437                                 unsigned int vdev_id)
1438 {
1439         struct dlb2_hw_domain *domain;
1440         struct dlb2_ldb_queue *queue;
1441
1442         dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
1443                                      vdev_req, vdev_id);
1444
1445         domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1446         if (domain == NULL) {
1447                 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1448                 return -EINVAL;
1449         }
1450
1451         queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
1452         if (queue == NULL) {
1453                 resp->status = DLB2_ST_INVALID_QID;
1454                 return -EINVAL;
1455         }
1456
1457         resp->id = dlb2_ldb_queue_depth(hw, queue);
1458
1459         return 0;
1460 }