1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
7 #include "dlb2_hw_types.h"
9 #include "dlb2_osdep.h"
10 #include "dlb2_osdep_bitmap.h"
11 #include "dlb2_osdep_types.h"
12 #include "dlb2_regs.h"
13 #include "dlb2_resource.h"
15 #include "../../dlb2_priv.h"
16 #include "../../dlb2_inline_fns.h"
18 #define DLB2_DOM_LIST_HEAD(head, type) \
19 DLB2_LIST_HEAD((head), type, domain_list)
21 #define DLB2_FUNC_LIST_HEAD(head, type) \
22 DLB2_LIST_HEAD((head), type, func_list)
24 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
25 DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
27 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
28 DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
30 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
31 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
33 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
34 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
36 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
40 dlb2_list_init_head(&domain->used_ldb_queues);
41 dlb2_list_init_head(&domain->used_dir_pq_pairs);
42 dlb2_list_init_head(&domain->avail_ldb_queues);
43 dlb2_list_init_head(&domain->avail_dir_pq_pairs);
45 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
46 dlb2_list_init_head(&domain->used_ldb_ports[i]);
47 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
48 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
51 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
55 dlb2_list_init_head(&rsrc->avail_domains);
56 dlb2_list_init_head(&rsrc->used_domains);
57 dlb2_list_init_head(&rsrc->avail_ldb_queues);
58 dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
60 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
61 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
64 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
66 union dlb2_chp_cfg_chp_csr_ctrl r0;
68 r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
70 r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
72 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
75 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
76 struct dlb2_get_num_resources_args *arg,
80 struct dlb2_function_resources *rsrcs;
81 struct dlb2_bitmap *map;
84 if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
88 rsrcs = &hw->vdev[vdev_id];
92 arg->num_sched_domains = rsrcs->num_avail_domains;
94 arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
96 arg->num_ldb_ports = 0;
97 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
98 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
100 arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
101 arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
102 arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
103 arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
105 arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
107 arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
109 map = rsrcs->avail_hist_list_entries;
111 arg->num_hist_list_entries = dlb2_bitmap_count(map);
113 arg->max_contiguous_hist_list_entries =
114 dlb2_bitmap_longest_set_range(map);
116 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
118 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
123 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
125 union dlb2_chp_cfg_chp_csr_ctrl r0;
127 r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
129 r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
131 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
134 void dlb2_resource_free(struct dlb2_hw *hw)
138 if (hw->pf.avail_hist_list_entries)
139 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
141 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
142 if (hw->vdev[i].avail_hist_list_entries)
143 dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
147 int dlb2_resource_init(struct dlb2_hw *hw)
149 struct dlb2_list_entry *list;
154 * For optimal load-balancing, ports that map to one or more QIDs in
155 * common should not be in numerical sequence. This is application
156 * dependent, but the driver interleaves port IDs as much as possible
157 * to reduce the likelihood of this. This initial allocation maximizes
158 * the average distance between an ID and its immediate neighbors (i.e.
159 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
162 u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
163 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,
164 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
165 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
166 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
169 /* Zero-out resource tracking data structures */
170 memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
171 memset(&hw->pf, 0, sizeof(hw->pf));
173 dlb2_init_fn_rsrc_lists(&hw->pf);
175 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
176 memset(&hw->vdev[i], 0, sizeof(hw->vdev[i]));
177 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
180 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
181 memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
182 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
183 hw->domains[i].parent_func = &hw->pf;
186 /* Give all resources to the PF driver */
187 hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
188 for (i = 0; i < hw->pf.num_avail_domains; i++) {
189 list = &hw->domains[i].func_list;
191 dlb2_list_add(&hw->pf.avail_domains, list);
194 hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
195 for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
196 list = &hw->rsrcs.ldb_queues[i].func_list;
198 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
201 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
202 hw->pf.num_avail_ldb_ports[i] =
203 DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
205 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
206 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
207 struct dlb2_ldb_port *port;
209 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
211 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
215 hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS;
216 for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
217 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
219 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
222 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
223 hw->pf.num_avail_dqed_entries = DLB2_MAX_NUM_DIR_CREDITS;
224 hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
226 ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
227 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
231 ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
235 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
236 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
237 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
241 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
246 /* Initialize the hardware resource IDs */
247 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
248 hw->domains[i].id.phys_id = i;
249 hw->domains[i].id.vdev_owned = false;
252 for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
253 hw->rsrcs.ldb_queues[i].id.phys_id = i;
254 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
257 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
258 hw->rsrcs.ldb_ports[i].id.phys_id = i;
259 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
262 for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS; i++) {
263 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
264 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
267 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
268 hw->rsrcs.sn_groups[i].id = i;
269 /* Default mode (0) is 64 sequence numbers per queue */
270 hw->rsrcs.sn_groups[i].mode = 0;
271 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
272 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
275 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
276 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
281 dlb2_resource_free(hw);
286 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
288 union dlb2_cfg_mstr_cfg_pm_pmcsr_disable r0;
290 r0.val = DLB2_CSR_RD(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE);
292 r0.field.disable = 0;
294 DLB2_CSR_WR(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE, r0.val);
297 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
298 struct dlb2_hw_domain *domain)
300 union dlb2_chp_cfg_ldb_vas_crd r0 = { {0} };
301 union dlb2_chp_cfg_dir_vas_crd r1 = { {0} };
303 r0.field.count = domain->num_ldb_credits;
305 DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), r0.val);
307 r1.field.count = domain->num_dir_credits;
309 DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), r1.val);
312 static struct dlb2_ldb_port *
313 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
314 struct dlb2_function_resources *rsrcs,
318 struct dlb2_list_entry *iter;
319 struct dlb2_ldb_port *port;
322 * To reduce the odds of consecutive load-balanced ports mapping to the
323 * same queue(s), the driver attempts to allocate ports whose neighbors
324 * are owned by a different domain.
326 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
330 phys_id = port->id.phys_id;
334 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
337 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
339 if (!hw->rsrcs.ldb_ports[next].owned ||
340 hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
343 if (!hw->rsrcs.ldb_ports[prev].owned ||
344 hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
351 * Failing that, the driver looks for a port with one neighbor owned by
352 * a different domain and the other unallocated.
354 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
358 phys_id = port->id.phys_id;
362 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
365 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
367 if (!hw->rsrcs.ldb_ports[prev].owned &&
368 hw->rsrcs.ldb_ports[next].owned &&
369 hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
372 if (!hw->rsrcs.ldb_ports[next].owned &&
373 hw->rsrcs.ldb_ports[prev].owned &&
374 hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
379 * Failing that, the driver looks for a port with both neighbors
382 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
386 phys_id = port->id.phys_id;
390 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
393 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
395 if (!hw->rsrcs.ldb_ports[prev].owned &&
396 !hw->rsrcs.ldb_ports[next].owned)
400 /* If all else fails, the driver returns the next available port. */
401 return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
405 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
406 struct dlb2_function_resources *rsrcs,
407 struct dlb2_hw_domain *domain,
410 struct dlb2_cmd_response *resp)
414 if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
415 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
419 for (i = 0; i < num_ports; i++) {
420 struct dlb2_ldb_port *port;
422 port = dlb2_get_next_ldb_port(hw, rsrcs,
423 domain->id.phys_id, cos_id);
426 "[%s()] Internal error: domain validation failed\n",
431 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
434 port->domain_id = domain->id;
437 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
441 rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
446 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
447 struct dlb2_function_resources *rsrcs,
448 struct dlb2_hw_domain *domain,
449 struct dlb2_create_sched_domain_args *args,
450 struct dlb2_cmd_response *resp)
455 if (args->cos_strict) {
456 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
457 u32 num = args->num_cos_ldb_ports[i];
459 /* Allocate ports from specific classes-of-service */
460 ret = __dlb2_attach_ldb_ports(hw,
474 * Attempt to allocate from specific class-of-service, but
475 * fallback to the other classes if that fails.
477 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
478 for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
479 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
480 cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
482 ret = __dlb2_attach_ldb_ports(hw,
498 /* Allocate num_ldb_ports from any class-of-service */
499 for (i = 0; i < args->num_ldb_ports; i++) {
500 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
501 ret = __dlb2_attach_ldb_ports(hw,
518 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
519 struct dlb2_function_resources *rsrcs,
520 struct dlb2_hw_domain *domain,
522 struct dlb2_cmd_response *resp)
526 if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
527 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
531 for (i = 0; i < num_ports; i++) {
532 struct dlb2_dir_pq_pair *port;
534 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
538 "[%s()] Internal error: domain validation failed\n",
543 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
545 port->domain_id = domain->id;
548 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
551 rsrcs->num_avail_dir_pq_pairs -= num_ports;
556 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
557 struct dlb2_hw_domain *domain,
559 struct dlb2_cmd_response *resp)
561 if (rsrcs->num_avail_qed_entries < num_credits) {
562 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
566 rsrcs->num_avail_qed_entries -= num_credits;
567 domain->num_ldb_credits += num_credits;
571 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
572 struct dlb2_hw_domain *domain,
574 struct dlb2_cmd_response *resp)
576 if (rsrcs->num_avail_dqed_entries < num_credits) {
577 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
581 rsrcs->num_avail_dqed_entries -= num_credits;
582 domain->num_dir_credits += num_credits;
586 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
587 struct dlb2_hw_domain *domain,
588 u32 num_atomic_inflights,
589 struct dlb2_cmd_response *resp)
591 if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
592 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
596 rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
597 domain->num_avail_aqed_entries += num_atomic_inflights;
602 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
603 struct dlb2_hw_domain *domain,
604 u32 num_hist_list_entries,
605 struct dlb2_cmd_response *resp)
607 struct dlb2_bitmap *bitmap;
610 if (num_hist_list_entries) {
611 bitmap = rsrcs->avail_hist_list_entries;
613 base = dlb2_bitmap_find_set_bit_range(bitmap,
614 num_hist_list_entries);
618 domain->total_hist_list_entries = num_hist_list_entries;
619 domain->avail_hist_list_entries = num_hist_list_entries;
620 domain->hist_list_entry_base = base;
621 domain->hist_list_entry_offset = 0;
623 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
628 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
632 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
633 struct dlb2_function_resources *rsrcs,
634 struct dlb2_hw_domain *domain,
636 struct dlb2_cmd_response *resp)
640 if (rsrcs->num_avail_ldb_queues < num_queues) {
641 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
645 for (i = 0; i < num_queues; i++) {
646 struct dlb2_ldb_queue *queue;
648 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
652 "[%s()] Internal error: domain validation failed\n",
657 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
659 queue->domain_id = domain->id;
662 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
665 rsrcs->num_avail_ldb_queues -= num_queues;
671 dlb2_domain_attach_resources(struct dlb2_hw *hw,
672 struct dlb2_function_resources *rsrcs,
673 struct dlb2_hw_domain *domain,
674 struct dlb2_create_sched_domain_args *args,
675 struct dlb2_cmd_response *resp)
679 ret = dlb2_attach_ldb_queues(hw,
682 args->num_ldb_queues,
687 ret = dlb2_attach_ldb_ports(hw,
695 ret = dlb2_attach_dir_ports(hw,
703 ret = dlb2_attach_ldb_credits(rsrcs,
705 args->num_ldb_credits,
710 ret = dlb2_attach_dir_credits(rsrcs,
712 args->num_dir_credits,
717 ret = dlb2_attach_domain_hist_list_entries(rsrcs,
719 args->num_hist_list_entries,
724 ret = dlb2_attach_atomic_inflights(rsrcs,
726 args->num_atomic_inflights,
731 dlb2_configure_domain_credits(hw, domain);
733 domain->configured = true;
735 domain->started = false;
737 rsrcs->num_avail_domains--;
743 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
744 struct dlb2_create_sched_domain_args *args,
745 struct dlb2_cmd_response *resp)
747 u32 num_avail_ldb_ports, req_ldb_ports;
748 struct dlb2_bitmap *avail_hl_entries;
749 unsigned int max_contig_hl_range;
752 avail_hl_entries = rsrcs->avail_hist_list_entries;
754 max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
756 num_avail_ldb_ports = 0;
758 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
759 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
761 req_ldb_ports += args->num_cos_ldb_ports[i];
764 req_ldb_ports += args->num_ldb_ports;
766 if (rsrcs->num_avail_domains < 1) {
767 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
771 if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
772 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
776 if (req_ldb_ports > num_avail_ldb_ports) {
777 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
781 for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
782 if (args->num_cos_ldb_ports[i] >
783 rsrcs->num_avail_ldb_ports[i]) {
784 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
789 if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
790 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
794 if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
795 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
799 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
800 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
804 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
805 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
809 if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
810 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
814 if (max_contig_hl_range < args->num_hist_list_entries) {
815 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
823 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
824 struct dlb2_create_sched_domain_args *args,
826 unsigned int vdev_id)
828 DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
830 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
831 DLB2_HW_DBG(hw, "\tNumber of LDB queues: %d\n",
832 args->num_ldb_queues);
833 DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
834 args->num_ldb_ports);
835 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0): %d\n",
836 args->num_cos_ldb_ports[0]);
837 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1): %d\n",
838 args->num_cos_ldb_ports[1]);
839 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2): %d\n",
840 args->num_cos_ldb_ports[1]);
841 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3): %d\n",
842 args->num_cos_ldb_ports[1]);
843 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %d\n",
845 DLB2_HW_DBG(hw, "\tNumber of DIR ports: %d\n",
846 args->num_dir_ports);
847 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
848 args->num_atomic_inflights);
849 DLB2_HW_DBG(hw, "\tNumber of hist list entries: %d\n",
850 args->num_hist_list_entries);
851 DLB2_HW_DBG(hw, "\tNumber of LDB credits: %d\n",
852 args->num_ldb_credits);
853 DLB2_HW_DBG(hw, "\tNumber of DIR credits: %d\n",
854 args->num_dir_credits);
858 * dlb2_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
859 * domain and its resources.
860 * @hw: Contains the current state of the DLB2 hardware.
861 * @args: User-provided arguments.
862 * @resp: Response to user.
863 * @vdev_req: Request came from a virtual device.
864 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
866 * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
867 * satisfy a request, resp->status will be set accordingly.
869 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
870 struct dlb2_create_sched_domain_args *args,
871 struct dlb2_cmd_response *resp,
873 unsigned int vdev_id)
875 struct dlb2_function_resources *rsrcs;
876 struct dlb2_hw_domain *domain;
879 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
881 dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
884 * Verify that hardware resources are available before attempting to
885 * satisfy the request. This simplifies the error unwinding code.
887 ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp);
891 domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
892 if (domain == NULL) {
894 "[%s():%d] Internal error: no available domains\n",
899 if (domain->configured) {
901 "[%s()] Internal error: avail_domains contains configured domains.\n",
906 dlb2_init_domain_rsrc_lists(domain);
908 ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
911 "[%s()] Internal error: failed to verify args.\n",
917 dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
919 dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
921 resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
928 * The PF driver cannot assume that a register write will affect subsequent HCW
929 * writes. To ensure a write completes, the driver must read back a CSR. This
930 * function only need be called for configuration that can occur after the
931 * domain has started; prior to starting, applications can't send HCWs.
933 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
935 DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
938 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
939 struct dlb2_dir_pq_pair *port)
941 union dlb2_lsp_cq_dir_dsbl reg;
943 reg.field.disabled = 1;
945 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
950 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
951 struct dlb2_dir_pq_pair *port)
953 union dlb2_lsp_cq_dir_tkn_cnt r0;
955 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id));
958 * Account for the initial token count, which is used in order to
959 * provide a CQ with depth less than 8.
962 return r0.field.count - port->init_tkn_cnt;
965 static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
966 struct dlb2_dir_pq_pair *port)
968 unsigned int port_id = port->id.phys_id;
971 /* Return any outstanding tokens */
972 cnt = dlb2_dir_cq_token_count(hw, port);
975 struct dlb2_hcw hcw_mem[8], *hcw;
978 pp_addr = os_map_producer_port(hw, port_id, false);
980 /* Point hcw to a 64B-aligned location */
981 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
984 * Program the first HCW for a batch token return and
987 memset(hcw, 0, 4 * sizeof(*hcw));
989 hcw->lock_id = cnt - 1;
991 dlb2_movdir64b(pp_addr, hcw);
993 os_fence_hcw(hw, pp_addr);
995 os_unmap_producer_port(hw, pp_addr);
1001 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1002 struct dlb2_dir_pq_pair *port)
1004 union dlb2_lsp_cq_dir_dsbl reg;
1006 reg.field.disabled = 0;
1008 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
1013 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1014 struct dlb2_hw_domain *domain,
1017 struct dlb2_list_entry *iter;
1018 struct dlb2_dir_pq_pair *port;
1022 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1024 * Can't drain a port if it's not configured, and there's
1025 * nothing to drain if its queue is unconfigured.
1027 if (!port->port_configured || !port->queue_configured)
1031 dlb2_dir_port_cq_disable(hw, port);
1033 ret = dlb2_drain_dir_cq(hw, port);
1038 dlb2_dir_port_cq_enable(hw, port);
1044 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1045 struct dlb2_dir_pq_pair *queue)
1047 union dlb2_lsp_qid_dir_enqueue_cnt r0;
1049 r0.val = DLB2_CSR_RD(hw,
1050 DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
1052 return r0.field.count;
1055 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1056 struct dlb2_dir_pq_pair *queue)
1058 return dlb2_dir_queue_depth(hw, queue) == 0;
1061 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1062 struct dlb2_hw_domain *domain)
1064 struct dlb2_list_entry *iter;
1065 struct dlb2_dir_pq_pair *queue;
1068 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1069 if (!dlb2_dir_queue_is_empty(hw, queue))
1076 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1077 struct dlb2_hw_domain *domain)
1081 /* If the domain hasn't been started, there's no traffic to drain */
1082 if (!domain->started)
1085 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1086 ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
1090 if (dlb2_domain_dir_queues_empty(hw, domain))
1094 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1096 "[%s()] Internal error: failed to empty queues\n",
1102 * Drain the CQs one more time. For the queues to go empty, they would
1103 * have scheduled one or more QEs.
1105 ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
1112 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1113 struct dlb2_ldb_port *port)
1115 union dlb2_lsp_cq_ldb_dsbl reg;
1118 * Don't re-enable the port if a removal is pending. The caller should
1119 * mark this port as enabled (if it isn't already), and when the
1120 * removal completes the port will be enabled.
1122 if (port->num_pending_removals)
1125 reg.field.disabled = 0;
1127 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
1132 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1133 struct dlb2_ldb_port *port)
1135 union dlb2_lsp_cq_ldb_dsbl reg;
1137 reg.field.disabled = 1;
1139 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
1144 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1145 struct dlb2_ldb_port *port)
1147 union dlb2_lsp_cq_ldb_infl_cnt r0;
1149 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
1151 return r0.field.count;
1154 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1155 struct dlb2_ldb_port *port)
1157 union dlb2_lsp_cq_ldb_tkn_cnt r0;
1159 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id));
1162 * Account for the initial token count, which is used in order to
1163 * provide a CQ with depth less than 8.
1166 return r0.field.token_count - port->init_tkn_cnt;
1169 static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1171 u32 infl_cnt, tkn_cnt;
1174 infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1175 tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1177 if (infl_cnt || tkn_cnt) {
1178 struct dlb2_hcw hcw_mem[8], *hcw;
1181 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1183 /* Point hcw to a 64B-aligned location */
1184 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1187 * Program the first HCW for a completion and token return and
1188 * the other HCWs as NOOPS
1191 memset(hcw, 0, 4 * sizeof(*hcw));
1192 hcw->qe_comp = (infl_cnt > 0);
1193 hcw->cq_token = (tkn_cnt > 0);
1194 hcw->lock_id = tkn_cnt - 1;
1196 /* Return tokens in the first HCW */
1197 dlb2_movdir64b(pp_addr, hcw);
1201 /* Issue remaining completions (if any) */
1202 for (i = 1; i < infl_cnt; i++)
1203 dlb2_movdir64b(pp_addr, hcw);
1205 os_fence_hcw(hw, pp_addr);
1207 os_unmap_producer_port(hw, pp_addr);
1213 static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1214 struct dlb2_hw_domain *domain,
1217 struct dlb2_list_entry *iter;
1218 struct dlb2_ldb_port *port;
1222 /* If the domain hasn't been started, there's no traffic to drain */
1223 if (!domain->started)
1226 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1227 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1229 dlb2_ldb_port_cq_disable(hw, port);
1231 ret = dlb2_drain_ldb_cq(hw, port);
1236 dlb2_ldb_port_cq_enable(hw, port);
1243 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1244 struct dlb2_ldb_queue *queue)
1246 union dlb2_lsp_qid_aqed_active_cnt r0;
1247 union dlb2_lsp_qid_atm_active r1;
1248 union dlb2_lsp_qid_ldb_enqueue_cnt r2;
1250 r0.val = DLB2_CSR_RD(hw,
1251 DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
1252 r1.val = DLB2_CSR_RD(hw,
1253 DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
1255 r2.val = DLB2_CSR_RD(hw,
1256 DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
1258 return r0.field.count + r1.field.count + r2.field.count;
1261 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1262 struct dlb2_ldb_queue *queue)
1264 return dlb2_ldb_queue_depth(hw, queue) == 0;
1267 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1268 struct dlb2_hw_domain *domain)
1270 struct dlb2_list_entry *iter;
1271 struct dlb2_ldb_queue *queue;
1274 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1275 if (queue->num_mappings == 0)
1278 if (!dlb2_ldb_queue_is_empty(hw, queue))
1285 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1286 struct dlb2_hw_domain *domain)
1290 /* If the domain hasn't been started, there's no traffic to drain */
1291 if (!domain->started)
1294 if (domain->num_pending_removals > 0) {
1296 "[%s()] Internal error: failed to unmap domain queues\n",
1301 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1302 ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
1306 if (dlb2_domain_mapped_queues_empty(hw, domain))
1310 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1312 "[%s()] Internal error: failed to empty queues\n",
1318 * Drain the CQs one more time. For the queues to go empty, they would
1319 * have scheduled one or more QEs.
1321 ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
1328 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1329 struct dlb2_hw_domain *domain)
1331 struct dlb2_list_entry *iter;
1332 struct dlb2_ldb_port *port;
1336 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1337 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1338 port->enabled = true;
1340 dlb2_ldb_port_cq_enable(hw, port);
1345 static struct dlb2_ldb_queue *
1346 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1349 unsigned int vdev_id)
1351 struct dlb2_list_entry *iter1;
1352 struct dlb2_list_entry *iter2;
1353 struct dlb2_function_resources *rsrcs;
1354 struct dlb2_hw_domain *domain;
1355 struct dlb2_ldb_queue *queue;
1356 RTE_SET_USED(iter1);
1357 RTE_SET_USED(iter2);
1359 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1362 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1365 return &hw->rsrcs.ldb_queues[id];
1367 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1368 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
1369 if (queue->id.virt_id == id)
1373 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
1374 if (queue->id.virt_id == id)
1380 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1383 unsigned int vdev_id)
1385 struct dlb2_list_entry *iteration;
1386 struct dlb2_function_resources *rsrcs;
1387 struct dlb2_hw_domain *domain;
1388 RTE_SET_USED(iteration);
1390 if (id >= DLB2_MAX_NUM_DOMAINS)
1394 return &hw->domains[id];
1396 rsrcs = &hw->vdev[vdev_id];
1398 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
1399 if (domain->id.virt_id == id)
1405 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1406 struct dlb2_ldb_port *port,
1407 struct dlb2_ldb_queue *queue,
1409 enum dlb2_qid_map_state new_state)
1411 enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1412 struct dlb2_hw_domain *domain;
1415 domain_id = port->domain_id.phys_id;
1417 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1418 if (domain == NULL) {
1420 "[%s()] Internal error: unable to find domain %d\n",
1421 __func__, domain_id);
1425 switch (curr_state) {
1426 case DLB2_QUEUE_UNMAPPED:
1427 switch (new_state) {
1428 case DLB2_QUEUE_MAPPED:
1429 queue->num_mappings++;
1430 port->num_mappings++;
1432 case DLB2_QUEUE_MAP_IN_PROG:
1433 queue->num_pending_additions++;
1434 domain->num_pending_additions++;
1440 case DLB2_QUEUE_MAPPED:
1441 switch (new_state) {
1442 case DLB2_QUEUE_UNMAPPED:
1443 queue->num_mappings--;
1444 port->num_mappings--;
1446 case DLB2_QUEUE_UNMAP_IN_PROG:
1447 port->num_pending_removals++;
1448 domain->num_pending_removals++;
1450 case DLB2_QUEUE_MAPPED:
1451 /* Priority change, nothing to update */
1457 case DLB2_QUEUE_MAP_IN_PROG:
1458 switch (new_state) {
1459 case DLB2_QUEUE_UNMAPPED:
1460 queue->num_pending_additions--;
1461 domain->num_pending_additions--;
1463 case DLB2_QUEUE_MAPPED:
1464 queue->num_mappings++;
1465 port->num_mappings++;
1466 queue->num_pending_additions--;
1467 domain->num_pending_additions--;
1473 case DLB2_QUEUE_UNMAP_IN_PROG:
1474 switch (new_state) {
1475 case DLB2_QUEUE_UNMAPPED:
1476 port->num_pending_removals--;
1477 domain->num_pending_removals--;
1478 queue->num_mappings--;
1479 port->num_mappings--;
1481 case DLB2_QUEUE_MAPPED:
1482 port->num_pending_removals--;
1483 domain->num_pending_removals--;
1485 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1486 /* Nothing to update */
1492 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1493 switch (new_state) {
1494 case DLB2_QUEUE_UNMAP_IN_PROG:
1495 /* Nothing to update */
1497 case DLB2_QUEUE_UNMAPPED:
1499 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1500 * becomes UNMAPPED before it transitions to
1503 queue->num_mappings--;
1504 port->num_mappings--;
1505 port->num_pending_removals--;
1506 domain->num_pending_removals--;
1516 port->qid_map[slot].state = new_state;
1519 "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1520 __func__, queue->id.phys_id, port->id.phys_id,
1521 curr_state, new_state);
1526 "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1527 __func__, queue->id.phys_id, port->id.phys_id,
1528 curr_state, new_state);
1532 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1533 enum dlb2_qid_map_state state,
1538 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1539 if (port->qid_map[i].state == state)
1545 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1548 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1549 enum dlb2_qid_map_state state,
1550 struct dlb2_ldb_queue *queue,
1555 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1556 if (port->qid_map[i].state == state &&
1557 port->qid_map[i].qid == queue->id.phys_id)
1563 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1567 * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1568 * their function names imply, and should only be called by the dynamic CQ
1571 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1572 struct dlb2_hw_domain *domain,
1573 struct dlb2_ldb_queue *queue)
1575 struct dlb2_list_entry *iter;
1576 struct dlb2_ldb_port *port;
1580 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1581 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1582 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1584 if (!dlb2_port_find_slot_queue(port, state,
1589 dlb2_ldb_port_cq_disable(hw, port);
1594 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1595 struct dlb2_hw_domain *domain,
1596 struct dlb2_ldb_queue *queue)
1598 struct dlb2_list_entry *iter;
1599 struct dlb2_ldb_port *port;
1603 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1604 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1605 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1607 if (!dlb2_port_find_slot_queue(port, state,
1612 dlb2_ldb_port_cq_enable(hw, port);
1617 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1618 struct dlb2_ldb_port *port,
1621 union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
1623 r0.field.cq = port->id.phys_id;
1624 r0.field.qidix = slot;
1626 r0.field.inflight_ok_v = 1;
1628 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
1633 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1634 struct dlb2_ldb_port *port,
1637 union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
1639 r0.field.cq = port->id.phys_id;
1640 r0.field.qidix = slot;
1642 r0.field.inflight_ok_v = 1;
1644 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
1649 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1650 struct dlb2_ldb_port *p,
1651 struct dlb2_ldb_queue *q,
1654 union dlb2_lsp_cq2priov r0;
1655 union dlb2_lsp_cq2qid0 r1;
1656 union dlb2_atm_qid2cqidix_00 r2;
1657 union dlb2_lsp_qid2cqidix_00 r3;
1658 union dlb2_lsp_qid2cqidix2_00 r4;
1659 enum dlb2_qid_map_state state;
1662 /* Look for a pending or already mapped slot, else an unused slot */
1663 if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1664 !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1665 !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1667 "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1668 __func__, __LINE__);
1672 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1674 "[%s():%d] Internal error: port slot tracking failed\n",
1675 __func__, __LINE__);
1679 /* Read-modify-write the priority and valid bit register */
1680 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
1682 r0.field.v |= 1 << i;
1683 r0.field.prio |= (priority & 0x7) << i * 3;
1685 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
1687 /* Read-modify-write the QID map register */
1689 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
1691 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
1693 if (i == 0 || i == 4)
1694 r1.field.qid_p0 = q->id.phys_id;
1695 if (i == 1 || i == 5)
1696 r1.field.qid_p1 = q->id.phys_id;
1697 if (i == 2 || i == 6)
1698 r1.field.qid_p2 = q->id.phys_id;
1699 if (i == 3 || i == 7)
1700 r1.field.qid_p3 = q->id.phys_id;
1703 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
1705 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
1707 r2.val = DLB2_CSR_RD(hw,
1708 DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1709 p->id.phys_id / 4));
1711 r3.val = DLB2_CSR_RD(hw,
1712 DLB2_LSP_QID2CQIDIX(q->id.phys_id,
1713 p->id.phys_id / 4));
1715 r4.val = DLB2_CSR_RD(hw,
1716 DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
1717 p->id.phys_id / 4));
1719 switch (p->id.phys_id % 4) {
1721 r2.field.cq_p0 |= 1 << i;
1722 r3.field.cq_p0 |= 1 << i;
1723 r4.field.cq_p0 |= 1 << i;
1727 r2.field.cq_p1 |= 1 << i;
1728 r3.field.cq_p1 |= 1 << i;
1729 r4.field.cq_p1 |= 1 << i;
1733 r2.field.cq_p2 |= 1 << i;
1734 r3.field.cq_p2 |= 1 << i;
1735 r4.field.cq_p2 |= 1 << i;
1739 r2.field.cq_p3 |= 1 << i;
1740 r3.field.cq_p3 |= 1 << i;
1741 r4.field.cq_p3 |= 1 << i;
1746 DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1750 DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1754 DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
1759 p->qid_map[i].qid = q->id.phys_id;
1760 p->qid_map[i].priority = priority;
1762 state = DLB2_QUEUE_MAPPED;
1764 return dlb2_port_slot_state_transition(hw, p, q, i, state);
1767 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1768 struct dlb2_ldb_port *port,
1769 struct dlb2_ldb_queue *queue,
1772 union dlb2_lsp_qid_aqed_active_cnt r0;
1773 union dlb2_lsp_qid_ldb_enqueue_cnt r1;
1774 union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
1776 /* Set the atomic scheduling haswork bit */
1777 r0.val = DLB2_CSR_RD(hw,
1778 DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
1780 r2.field.cq = port->id.phys_id;
1781 r2.field.qidix = slot;
1783 r2.field.rlist_haswork_v = r0.field.count > 0;
1785 /* Set the non-atomic scheduling haswork bit */
1786 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1788 r1.val = DLB2_CSR_RD(hw,
1789 DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
1791 memset(&r2, 0, sizeof(r2));
1793 r2.field.cq = port->id.phys_id;
1794 r2.field.qidix = slot;
1796 r2.field.nalb_haswork_v = (r1.field.count > 0);
1798 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1805 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1806 struct dlb2_ldb_port *port,
1809 union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
1811 r2.field.cq = port->id.phys_id;
1812 r2.field.qidix = slot;
1814 r2.field.rlist_haswork_v = 1;
1816 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1818 memset(&r2, 0, sizeof(r2));
1820 r2.field.cq = port->id.phys_id;
1821 r2.field.qidix = slot;
1823 r2.field.nalb_haswork_v = 1;
1825 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1830 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1831 struct dlb2_ldb_queue *queue)
1833 union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
1835 r0.field.limit = queue->num_qid_inflights;
1837 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
1840 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1841 struct dlb2_ldb_queue *queue)
1844 DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
1845 DLB2_LSP_QID_LDB_INFL_LIM_RST);
1848 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1849 struct dlb2_hw_domain *domain,
1850 struct dlb2_ldb_port *port,
1851 struct dlb2_ldb_queue *queue)
1853 struct dlb2_list_entry *iter;
1854 union dlb2_lsp_qid_ldb_infl_cnt r0;
1855 enum dlb2_qid_map_state state;
1860 r0.val = DLB2_CSR_RD(hw,
1861 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
1863 if (r0.field.count) {
1865 "[%s()] Internal error: non-zero QID inflight count\n",
1871 * Static map the port and set its corresponding has_work bits.
1873 state = DLB2_QUEUE_MAP_IN_PROG;
1874 if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1877 if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1879 "[%s():%d] Internal error: port slot tracking failed\n",
1880 __func__, __LINE__);
1884 prio = port->qid_map[slot].priority;
1887 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1888 * the port's qid_map state.
1890 ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1894 ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1899 * Ensure IF_status(cq,qid) is 0 before enabling the port to
1900 * prevent spurious schedules to cause the queue's inflight
1901 * count to increase.
1903 dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1905 /* Reset the queue's inflight status */
1906 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1907 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1908 state = DLB2_QUEUE_MAPPED;
1909 if (!dlb2_port_find_slot_queue(port, state,
1913 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
1917 dlb2_ldb_queue_set_inflight_limit(hw, queue);
1919 /* Re-enable CQs mapped to this queue */
1920 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
1922 /* If this queue has other mappings pending, clear its inflight limit */
1923 if (queue->num_pending_additions > 0)
1924 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
1930 * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
1931 * @hw: dlb2_hw handle for a particular device.
1932 * @port: load-balanced port
1933 * @queue: load-balanced queue
1934 * @priority: queue servicing priority
1936 * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
1937 * at a later point, and <0 if an error occurred.
1939 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
1940 struct dlb2_ldb_port *port,
1941 struct dlb2_ldb_queue *queue,
1944 union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
1945 enum dlb2_qid_map_state state;
1946 struct dlb2_hw_domain *domain;
1947 int domain_id, slot, ret;
1949 domain_id = port->domain_id.phys_id;
1951 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1952 if (domain == NULL) {
1954 "[%s()] Internal error: unable to find domain %d\n",
1955 __func__, port->domain_id.phys_id);
1960 * Set the QID inflight limit to 0 to prevent further scheduling of the
1963 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
1965 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
1967 "Internal error: No available unmapped slots\n");
1971 if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1973 "[%s():%d] Internal error: port slot tracking failed\n",
1974 __func__, __LINE__);
1978 port->qid_map[slot].qid = queue->id.phys_id;
1979 port->qid_map[slot].priority = priority;
1981 state = DLB2_QUEUE_MAP_IN_PROG;
1982 ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
1986 r0.val = DLB2_CSR_RD(hw,
1987 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
1989 if (r0.field.count) {
1991 * The queue is owed completions so it's not safe to map it
1992 * yet. Schedule a kernel thread to complete the mapping later,
1993 * once software has completed all the queue's inflight events.
1995 if (!os_worker_active(hw))
1996 os_schedule_work(hw);
2002 * Disable the affected CQ, and the CQs already mapped to the QID,
2003 * before reading the QID's inflight count a second time. There is an
2004 * unlikely race in which the QID may schedule one more QE after we
2005 * read an inflight count of 0, and disabling the CQs guarantees that
2006 * the race will not occur after a re-read of the inflight count
2010 dlb2_ldb_port_cq_disable(hw, port);
2012 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2014 r0.val = DLB2_CSR_RD(hw,
2015 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
2017 if (r0.field.count) {
2019 dlb2_ldb_port_cq_enable(hw, port);
2021 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2024 * The queue is owed completions so it's not safe to map it
2025 * yet. Schedule a kernel thread to complete the mapping later,
2026 * once software has completed all the queue's inflight events.
2028 if (!os_worker_active(hw))
2029 os_schedule_work(hw);
2034 return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2037 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2038 struct dlb2_hw_domain *domain,
2039 struct dlb2_ldb_port *port)
2043 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2044 union dlb2_lsp_qid_ldb_infl_cnt r0;
2045 struct dlb2_ldb_queue *queue;
2048 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2051 qid = port->qid_map[i].qid;
2053 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2055 if (queue == NULL) {
2057 "[%s()] Internal error: unable to find queue %d\n",
2062 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
2068 * Disable the affected CQ, and the CQs already mapped to the
2069 * QID, before reading the QID's inflight count a second time.
2070 * There is an unlikely race in which the QID may schedule one
2071 * more QE after we read an inflight count of 0, and disabling
2072 * the CQs guarantees that the race will not occur after a
2073 * re-read of the inflight count register.
2076 dlb2_ldb_port_cq_disable(hw, port);
2078 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2080 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
2082 if (r0.field.count) {
2084 dlb2_ldb_port_cq_enable(hw, port);
2086 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2091 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2096 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2097 struct dlb2_hw_domain *domain)
2099 struct dlb2_list_entry *iter;
2100 struct dlb2_ldb_port *port;
2104 if (!domain->configured || domain->num_pending_additions == 0)
2107 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2108 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2109 dlb2_domain_finish_map_port(hw, domain, port);
2112 return domain->num_pending_additions;
2115 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2116 struct dlb2_ldb_port *port,
2117 struct dlb2_ldb_queue *queue)
2119 enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2120 union dlb2_lsp_cq2priov r0;
2121 union dlb2_atm_qid2cqidix_00 r1;
2122 union dlb2_lsp_qid2cqidix_00 r2;
2123 union dlb2_lsp_qid2cqidix2_00 r3;
2128 /* Find the queue's slot */
2129 mapped = DLB2_QUEUE_MAPPED;
2130 in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2131 pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2133 if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2134 !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2135 !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2137 "[%s():%d] Internal error: QID %d isn't mapped\n",
2138 __func__, __LINE__, queue->id.phys_id);
2142 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2144 "[%s():%d] Internal error: port slot tracking failed\n",
2145 __func__, __LINE__);
2149 port_id = port->id.phys_id;
2150 queue_id = queue->id.phys_id;
2152 /* Read-modify-write the priority and valid bit register */
2153 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
2155 r0.field.v &= ~(1 << i);
2157 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
2159 r1.val = DLB2_CSR_RD(hw,
2160 DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
2162 r2.val = DLB2_CSR_RD(hw,
2163 DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
2165 r3.val = DLB2_CSR_RD(hw,
2166 DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
2168 switch (port_id % 4) {
2170 r1.field.cq_p0 &= ~(1 << i);
2171 r2.field.cq_p0 &= ~(1 << i);
2172 r3.field.cq_p0 &= ~(1 << i);
2176 r1.field.cq_p1 &= ~(1 << i);
2177 r2.field.cq_p1 &= ~(1 << i);
2178 r3.field.cq_p1 &= ~(1 << i);
2182 r1.field.cq_p2 &= ~(1 << i);
2183 r2.field.cq_p2 &= ~(1 << i);
2184 r3.field.cq_p2 &= ~(1 << i);
2188 r1.field.cq_p3 &= ~(1 << i);
2189 r2.field.cq_p3 &= ~(1 << i);
2190 r3.field.cq_p3 &= ~(1 << i);
2195 DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
2199 DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
2203 DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
2208 unmapped = DLB2_QUEUE_UNMAPPED;
2210 return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2213 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2214 struct dlb2_hw_domain *domain,
2215 struct dlb2_ldb_port *port,
2216 struct dlb2_ldb_queue *queue,
2219 if (domain->started)
2220 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2222 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2226 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2227 struct dlb2_hw_domain *domain,
2228 struct dlb2_ldb_port *port,
2231 enum dlb2_qid_map_state state;
2232 struct dlb2_ldb_queue *queue;
2234 queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2236 state = port->qid_map[slot].state;
2238 /* Update the QID2CQIDX and CQ2QID vectors */
2239 dlb2_ldb_port_unmap_qid(hw, port, queue);
2242 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2245 dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2247 /* Reset the {CQ, slot} to its default state */
2248 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2250 /* Re-enable the CQ if it wasn't manually disabled by the user */
2252 dlb2_ldb_port_cq_enable(hw, port);
2255 * If there is a mapping that is pending this slot's removal, perform
2258 if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2259 struct dlb2_ldb_port_qid_map *map;
2260 struct dlb2_ldb_queue *map_queue;
2263 map = &port->qid_map[slot];
2265 map->qid = map->pending_qid;
2266 map->priority = map->pending_priority;
2268 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2269 prio = map->priority;
2271 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2275 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2276 struct dlb2_hw_domain *domain,
2277 struct dlb2_ldb_port *port)
2279 union dlb2_lsp_cq_ldb_infl_cnt r0;
2282 if (port->num_pending_removals == 0)
2286 * The unmap requires all the CQ's outstanding inflights to be
2289 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
2290 if (r0.field.count > 0)
2293 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2294 struct dlb2_ldb_port_qid_map *map;
2296 map = &port->qid_map[i];
2298 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2299 map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2302 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2309 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2310 struct dlb2_hw_domain *domain)
2312 struct dlb2_list_entry *iter;
2313 struct dlb2_ldb_port *port;
2317 if (!domain->configured || domain->num_pending_removals == 0)
2320 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2321 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2322 dlb2_domain_finish_unmap_port(hw, domain, port);
2325 return domain->num_pending_removals;
2328 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2329 struct dlb2_hw_domain *domain)
2331 struct dlb2_list_entry *iter;
2332 struct dlb2_ldb_port *port;
2336 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2337 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2338 port->enabled = false;
2340 dlb2_ldb_port_cq_disable(hw, port);
2345 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2348 unsigned int vdev_id)
2350 DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2352 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2353 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2356 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2357 struct dlb2_hw_domain *domain,
2358 unsigned int vdev_id)
2360 struct dlb2_list_entry *iter;
2361 union dlb2_sys_vf_dir_vpp_v r1;
2362 struct dlb2_dir_pq_pair *port;
2367 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2371 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2372 virt_id = port->id.virt_id;
2374 virt_id = port->id.phys_id;
2376 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
2378 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val);
2382 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2383 struct dlb2_hw_domain *domain,
2384 unsigned int vdev_id)
2386 struct dlb2_list_entry *iter;
2387 union dlb2_sys_vf_ldb_vpp_v r1;
2388 struct dlb2_ldb_port *port;
2394 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2395 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2399 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2400 virt_id = port->id.virt_id;
2402 virt_id = port->id.phys_id;
2404 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2406 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r1.val);
2412 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2413 struct dlb2_hw_domain *domain)
2415 struct dlb2_list_entry *iter;
2416 union dlb2_chp_ldb_cq_int_enb r0 = { {0} };
2417 union dlb2_chp_ldb_cq_wd_enb r1 = { {0} };
2418 struct dlb2_ldb_port *port;
2422 r0.field.en_tim = 0;
2423 r0.field.en_depth = 0;
2425 r1.field.wd_enable = 0;
2427 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2428 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2430 DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
2434 DLB2_CHP_LDB_CQ_WD_ENB(port->id.phys_id),
2441 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2442 struct dlb2_hw_domain *domain)
2444 struct dlb2_list_entry *iter;
2445 union dlb2_chp_dir_cq_int_enb r0 = { {0} };
2446 union dlb2_chp_dir_cq_wd_enb r1 = { {0} };
2447 struct dlb2_dir_pq_pair *port;
2450 r0.field.en_tim = 0;
2451 r0.field.en_depth = 0;
2453 r1.field.wd_enable = 0;
2455 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2457 DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
2461 DLB2_CHP_DIR_CQ_WD_ENB(port->id.phys_id),
2467 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2468 struct dlb2_hw_domain *domain)
2470 int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2471 struct dlb2_list_entry *iter;
2472 struct dlb2_ldb_queue *queue;
2475 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2476 union dlb2_sys_ldb_vasqid_v r0 = { {0} };
2477 union dlb2_sys_ldb_qid2vqid r1 = { {0} };
2478 union dlb2_sys_vf_ldb_vqid_v r2 = { {0} };
2479 union dlb2_sys_vf_ldb_vqid2qid r3 = { {0} };
2482 idx = domain_offset + queue->id.phys_id;
2484 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), r0.val);
2486 if (queue->id.vdev_owned) {
2488 DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2491 idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2495 DLB2_SYS_VF_LDB_VQID_V(idx),
2499 DLB2_SYS_VF_LDB_VQID2QID(idx),
2506 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2507 struct dlb2_hw_domain *domain)
2509 int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS;
2510 struct dlb2_list_entry *iter;
2511 struct dlb2_dir_pq_pair *queue;
2514 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2515 union dlb2_sys_dir_vasqid_v r0 = { {0} };
2516 union dlb2_sys_vf_dir_vqid_v r1 = { {0} };
2517 union dlb2_sys_vf_dir_vqid2qid r2 = { {0} };
2520 idx = domain_offset + queue->id.phys_id;
2522 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val);
2524 if (queue->id.vdev_owned) {
2525 idx = queue->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS +
2529 DLB2_SYS_VF_DIR_VQID_V(idx),
2533 DLB2_SYS_VF_DIR_VQID2QID(idx),
2539 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2540 struct dlb2_hw_domain *domain)
2542 struct dlb2_list_entry *iter;
2543 union dlb2_chp_sn_chk_enbl r1;
2544 struct dlb2_ldb_port *port;
2550 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2551 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2553 DLB2_CHP_SN_CHK_ENBL(port->id.phys_id),
2558 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2559 struct dlb2_hw_domain *domain)
2561 struct dlb2_list_entry *iter;
2562 struct dlb2_ldb_port *port;
2566 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2567 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2570 for (i = 0; i < DLB2_MAX_CQ_COMP_CHECK_LOOPS; i++) {
2571 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2575 if (i == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2577 "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2578 __func__, port->id.phys_id);
2587 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2588 struct dlb2_hw_domain *domain)
2590 struct dlb2_list_entry *iter;
2591 struct dlb2_dir_pq_pair *port;
2594 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2595 port->enabled = false;
2597 dlb2_dir_port_cq_disable(hw, port);
2602 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2603 struct dlb2_hw_domain *domain)
2605 struct dlb2_list_entry *iter;
2606 struct dlb2_dir_pq_pair *port;
2607 union dlb2_sys_dir_pp_v r1;
2612 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2614 DLB2_SYS_DIR_PP_V(port->id.phys_id),
2619 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2620 struct dlb2_hw_domain *domain)
2622 struct dlb2_list_entry *iter;
2623 union dlb2_sys_ldb_pp_v r1;
2624 struct dlb2_ldb_port *port;
2630 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2631 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2633 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2638 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2639 struct dlb2_hw_domain *domain)
2641 struct dlb2_list_entry *iter;
2642 struct dlb2_dir_pq_pair *dir_port;
2643 struct dlb2_ldb_port *ldb_port;
2644 struct dlb2_ldb_queue *queue;
2649 * Confirm that all the domain's queue's inflight counts and AQED
2650 * active counts are 0.
2652 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2653 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2655 "[%s()] Internal error: failed to empty ldb queue %d\n",
2656 __func__, queue->id.phys_id);
2661 /* Confirm that all the domain's CQs inflight and token counts are 0. */
2662 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2663 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2664 if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2665 dlb2_ldb_cq_token_count(hw, ldb_port)) {
2667 "[%s()] Internal error: failed to empty ldb port %d\n",
2668 __func__, ldb_port->id.phys_id);
2674 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2675 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2677 "[%s()] Internal error: failed to empty dir queue %d\n",
2678 __func__, dir_port->id.phys_id);
2682 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2684 "[%s()] Internal error: failed to empty dir port %d\n",
2685 __func__, dir_port->id.phys_id);
2693 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2694 struct dlb2_ldb_port *port)
2697 DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2698 DLB2_SYS_LDB_PP2VAS_RST);
2701 DLB2_CHP_LDB_CQ2VAS(port->id.phys_id),
2702 DLB2_CHP_LDB_CQ2VAS_RST);
2705 DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2706 DLB2_SYS_LDB_PP2VDEV_RST);
2708 if (port->id.vdev_owned) {
2713 * DLB uses producer port address bits 17:12 to determine the
2714 * producer port ID. In Scalable IOV mode, PP accesses come
2715 * through the PF MMIO window for the physical producer port,
2716 * so for translation purposes the virtual and physical port
2719 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2720 virt_id = port->id.virt_id;
2722 virt_id = port->id.phys_id;
2724 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2727 DLB2_SYS_VF_LDB_VPP2PP(offs),
2728 DLB2_SYS_VF_LDB_VPP2PP_RST);
2731 DLB2_SYS_VF_LDB_VPP_V(offs),
2732 DLB2_SYS_VF_LDB_VPP_V_RST);
2736 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2737 DLB2_SYS_LDB_PP_V_RST);
2740 DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id),
2741 DLB2_LSP_CQ_LDB_DSBL_RST);
2744 DLB2_CHP_LDB_CQ_DEPTH(port->id.phys_id),
2745 DLB2_CHP_LDB_CQ_DEPTH_RST);
2748 DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id),
2749 DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2752 DLB2_CHP_HIST_LIST_LIM(port->id.phys_id),
2753 DLB2_CHP_HIST_LIST_LIM_RST);
2756 DLB2_CHP_HIST_LIST_BASE(port->id.phys_id),
2757 DLB2_CHP_HIST_LIST_BASE_RST);
2760 DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id),
2761 DLB2_CHP_HIST_LIST_POP_PTR_RST);
2764 DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
2765 DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2768 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id),
2769 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2772 DLB2_CHP_LDB_CQ_TMR_THRSH(port->id.phys_id),
2773 DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2776 DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
2777 DLB2_CHP_LDB_CQ_INT_ENB_RST);
2780 DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2781 DLB2_SYS_LDB_CQ_ISR_RST);
2784 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
2785 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2788 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2789 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2792 DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
2793 DLB2_CHP_LDB_CQ_WPTR_RST);
2796 DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
2797 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2800 DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2801 DLB2_SYS_LDB_CQ_ADDR_L_RST);
2804 DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2805 DLB2_SYS_LDB_CQ_ADDR_U_RST);
2808 DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2809 DLB2_SYS_LDB_CQ_AT_RST);
2812 DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
2813 DLB2_SYS_LDB_CQ_PASID_RST);
2816 DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2817 DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2820 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(port->id.phys_id),
2821 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2824 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(port->id.phys_id),
2825 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2828 DLB2_LSP_CQ2QID0(port->id.phys_id),
2829 DLB2_LSP_CQ2QID0_RST);
2832 DLB2_LSP_CQ2QID1(port->id.phys_id),
2833 DLB2_LSP_CQ2QID1_RST);
2836 DLB2_LSP_CQ2PRIOV(port->id.phys_id),
2837 DLB2_LSP_CQ2PRIOV_RST);
2840 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2841 struct dlb2_hw_domain *domain)
2843 struct dlb2_list_entry *iter;
2844 struct dlb2_ldb_port *port;
2848 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2849 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2850 __dlb2_domain_reset_ldb_port_registers(hw, port);
2855 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2856 struct dlb2_dir_pq_pair *port)
2859 DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
2860 DLB2_CHP_DIR_CQ2VAS_RST);
2863 DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id),
2864 DLB2_LSP_CQ_DIR_DSBL_RST);
2866 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2869 DLB2_CHP_DIR_CQ_DEPTH(port->id.phys_id),
2870 DLB2_CHP_DIR_CQ_DEPTH_RST);
2873 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id),
2874 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2877 DLB2_CHP_DIR_CQ_TMR_THRSH(port->id.phys_id),
2878 DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2881 DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
2882 DLB2_CHP_DIR_CQ_INT_ENB_RST);
2885 DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2886 DLB2_SYS_DIR_CQ_ISR_RST);
2889 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
2890 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2893 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2894 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2897 DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
2898 DLB2_CHP_DIR_CQ_WPTR_RST);
2901 DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
2902 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2905 DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2906 DLB2_SYS_DIR_CQ_ADDR_L_RST);
2909 DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2910 DLB2_SYS_DIR_CQ_ADDR_U_RST);
2913 DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2914 DLB2_SYS_DIR_CQ_AT_RST);
2917 DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
2918 DLB2_SYS_DIR_CQ_PASID_RST);
2921 DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2922 DLB2_SYS_DIR_CQ_FMT_RST);
2925 DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2926 DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
2929 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(port->id.phys_id),
2930 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
2933 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(port->id.phys_id),
2934 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
2937 DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
2938 DLB2_SYS_DIR_PP2VAS_RST);
2941 DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
2942 DLB2_CHP_DIR_CQ2VAS_RST);
2945 DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
2946 DLB2_SYS_DIR_PP2VDEV_RST);
2948 if (port->id.vdev_owned) {
2953 * DLB uses producer port address bits 17:12 to determine the
2954 * producer port ID. In Scalable IOV mode, PP accesses come
2955 * through the PF MMIO window for the physical producer port,
2956 * so for translation purposes the virtual and physical port
2959 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2960 virt_id = port->id.virt_id;
2962 virt_id = port->id.phys_id;
2964 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
2967 DLB2_SYS_VF_DIR_VPP2PP(offs),
2968 DLB2_SYS_VF_DIR_VPP2PP_RST);
2971 DLB2_SYS_VF_DIR_VPP_V(offs),
2972 DLB2_SYS_VF_DIR_VPP_V_RST);
2976 DLB2_SYS_DIR_PP_V(port->id.phys_id),
2977 DLB2_SYS_DIR_PP_V_RST);
2980 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2981 struct dlb2_hw_domain *domain)
2983 struct dlb2_list_entry *iter;
2984 struct dlb2_dir_pq_pair *port;
2987 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2988 __dlb2_domain_reset_dir_port_registers(hw, port);
2991 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
2992 struct dlb2_hw_domain *domain)
2994 struct dlb2_list_entry *iter;
2995 struct dlb2_ldb_queue *queue;
2998 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2999 unsigned int queue_id = queue->id.phys_id;
3003 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(queue_id),
3004 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3007 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(queue_id),
3008 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3011 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(queue_id),
3012 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3015 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(queue_id),
3016 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3019 DLB2_LSP_QID_NALDB_MAX_DEPTH(queue_id),
3020 DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3023 DLB2_LSP_QID_LDB_INFL_LIM(queue_id),
3024 DLB2_LSP_QID_LDB_INFL_LIM_RST);
3027 DLB2_LSP_QID_AQED_ACTIVE_LIM(queue_id),
3028 DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3031 DLB2_LSP_QID_ATM_DEPTH_THRSH(queue_id),
3032 DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3035 DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue_id),
3036 DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3039 DLB2_SYS_LDB_QID_ITS(queue_id),
3040 DLB2_SYS_LDB_QID_ITS_RST);
3043 DLB2_CHP_ORD_QID_SN(queue_id),
3044 DLB2_CHP_ORD_QID_SN_RST);
3047 DLB2_CHP_ORD_QID_SN_MAP(queue_id),
3048 DLB2_CHP_ORD_QID_SN_MAP_RST);
3051 DLB2_SYS_LDB_QID_V(queue_id),
3052 DLB2_SYS_LDB_QID_V_RST);
3055 DLB2_SYS_LDB_QID_CFG_V(queue_id),
3056 DLB2_SYS_LDB_QID_CFG_V_RST);
3058 if (queue->sn_cfg_valid) {
3061 offs[0] = DLB2_RO_PIPE_GRP_0_SLT_SHFT(queue->sn_slot);
3062 offs[1] = DLB2_RO_PIPE_GRP_1_SLT_SHFT(queue->sn_slot);
3065 offs[queue->sn_group],
3066 DLB2_RO_PIPE_GRP_0_SLT_SHFT_RST);
3069 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3071 DLB2_LSP_QID2CQIDIX(queue_id, i),
3072 DLB2_LSP_QID2CQIDIX_00_RST);
3075 DLB2_LSP_QID2CQIDIX2(queue_id, i),
3076 DLB2_LSP_QID2CQIDIX2_00_RST);
3079 DLB2_ATM_QID2CQIDIX(queue_id, i),
3080 DLB2_ATM_QID2CQIDIX_00_RST);
3085 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3086 struct dlb2_hw_domain *domain)
3088 struct dlb2_list_entry *iter;
3089 struct dlb2_dir_pq_pair *queue;
3092 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3094 DLB2_LSP_QID_DIR_MAX_DEPTH(queue->id.phys_id),
3095 DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3098 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(queue->id.phys_id),
3099 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3102 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(queue->id.phys_id),
3103 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3106 DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
3107 DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3110 DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3111 DLB2_SYS_DIR_QID_ITS_RST);
3114 DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3115 DLB2_SYS_DIR_QID_V_RST);
3119 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3120 struct dlb2_hw_domain *domain)
3122 dlb2_domain_reset_ldb_port_registers(hw, domain);
3124 dlb2_domain_reset_dir_port_registers(hw, domain);
3126 dlb2_domain_reset_ldb_queue_registers(hw, domain);
3128 dlb2_domain_reset_dir_queue_registers(hw, domain);
3131 DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3132 DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3135 DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3136 DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3139 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3140 struct dlb2_hw_domain *domain)
3142 struct dlb2_dir_pq_pair *tmp_dir_port;
3143 struct dlb2_ldb_queue *tmp_ldb_queue;
3144 struct dlb2_ldb_port *tmp_ldb_port;
3145 struct dlb2_list_entry *iter1;
3146 struct dlb2_list_entry *iter2;
3147 struct dlb2_function_resources *rsrcs;
3148 struct dlb2_dir_pq_pair *dir_port;
3149 struct dlb2_ldb_queue *ldb_queue;
3150 struct dlb2_ldb_port *ldb_port;
3151 struct dlb2_list_head *list;
3153 RTE_SET_USED(tmp_dir_port);
3154 RTE_SET_USED(tmp_ldb_queue);
3155 RTE_SET_USED(tmp_ldb_port);
3156 RTE_SET_USED(iter1);
3157 RTE_SET_USED(iter2);
3159 rsrcs = domain->parent_func;
3161 /* Move the domain's ldb queues to the function's avail list */
3162 list = &domain->used_ldb_queues;
3163 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3164 if (ldb_queue->sn_cfg_valid) {
3165 struct dlb2_sn_group *grp;
3167 grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3169 dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3170 ldb_queue->sn_cfg_valid = false;
3173 ldb_queue->owned = false;
3174 ldb_queue->num_mappings = 0;
3175 ldb_queue->num_pending_additions = 0;
3177 dlb2_list_del(&domain->used_ldb_queues,
3178 &ldb_queue->domain_list);
3179 dlb2_list_add(&rsrcs->avail_ldb_queues,
3180 &ldb_queue->func_list);
3181 rsrcs->num_avail_ldb_queues++;
3184 list = &domain->avail_ldb_queues;
3185 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3186 ldb_queue->owned = false;
3188 dlb2_list_del(&domain->avail_ldb_queues,
3189 &ldb_queue->domain_list);
3190 dlb2_list_add(&rsrcs->avail_ldb_queues,
3191 &ldb_queue->func_list);
3192 rsrcs->num_avail_ldb_queues++;
3195 /* Move the domain's ldb ports to the function's avail list */
3196 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3197 list = &domain->used_ldb_ports[i];
3198 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3202 ldb_port->owned = false;
3203 ldb_port->configured = false;
3204 ldb_port->num_pending_removals = 0;
3205 ldb_port->num_mappings = 0;
3206 ldb_port->init_tkn_cnt = 0;
3207 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3208 ldb_port->qid_map[j].state =
3209 DLB2_QUEUE_UNMAPPED;
3211 dlb2_list_del(&domain->used_ldb_ports[i],
3212 &ldb_port->domain_list);
3213 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3214 &ldb_port->func_list);
3215 rsrcs->num_avail_ldb_ports[i]++;
3218 list = &domain->avail_ldb_ports[i];
3219 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3221 ldb_port->owned = false;
3223 dlb2_list_del(&domain->avail_ldb_ports[i],
3224 &ldb_port->domain_list);
3225 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3226 &ldb_port->func_list);
3227 rsrcs->num_avail_ldb_ports[i]++;
3231 /* Move the domain's dir ports to the function's avail list */
3232 list = &domain->used_dir_pq_pairs;
3233 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3234 dir_port->owned = false;
3235 dir_port->port_configured = false;
3236 dir_port->init_tkn_cnt = 0;
3238 dlb2_list_del(&domain->used_dir_pq_pairs,
3239 &dir_port->domain_list);
3241 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3242 &dir_port->func_list);
3243 rsrcs->num_avail_dir_pq_pairs++;
3246 list = &domain->avail_dir_pq_pairs;
3247 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3248 dir_port->owned = false;
3250 dlb2_list_del(&domain->avail_dir_pq_pairs,
3251 &dir_port->domain_list);
3253 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3254 &dir_port->func_list);
3255 rsrcs->num_avail_dir_pq_pairs++;
3258 /* Return hist list entries to the function */
3259 ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3260 domain->hist_list_entry_base,
3261 domain->total_hist_list_entries);
3264 "[%s()] Internal error: domain hist list base doesn't match the function's bitmap.\n",
3269 domain->total_hist_list_entries = 0;
3270 domain->avail_hist_list_entries = 0;
3271 domain->hist_list_entry_base = 0;
3272 domain->hist_list_entry_offset = 0;
3274 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3275 domain->num_ldb_credits = 0;
3277 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3278 domain->num_dir_credits = 0;
3280 rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3281 rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3282 domain->num_avail_aqed_entries = 0;
3283 domain->num_used_aqed_entries = 0;
3285 domain->num_pending_removals = 0;
3286 domain->num_pending_additions = 0;
3287 domain->configured = false;
3288 domain->started = false;
3291 * Move the domain out of the used_domains list and back to the
3292 * function's avail_domains list.
3294 dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3295 dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3296 rsrcs->num_avail_domains++;
3301 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3302 struct dlb2_hw_domain *domain,
3303 struct dlb2_ldb_queue *queue)
3305 struct dlb2_ldb_port *port;
3308 /* If a domain has LDB queues, it must have LDB ports */
3309 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3310 if (!dlb2_list_empty(&domain->used_ldb_ports[i]))
3314 if (i == DLB2_NUM_COS_DOMAINS) {
3316 "[%s()] Internal error: No configured LDB ports\n",
3321 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i], typeof(*port));
3323 /* If necessary, free up a QID slot in this CQ */
3324 if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3325 struct dlb2_ldb_queue *mapped_queue;
3327 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3329 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3334 ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3338 return dlb2_domain_drain_mapped_queues(hw, domain);
3341 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3342 struct dlb2_hw_domain *domain)
3344 struct dlb2_list_entry *iter;
3345 struct dlb2_ldb_queue *queue;
3349 /* If the domain hasn't been started, there's no traffic to drain */
3350 if (!domain->started)
3354 * Pre-condition: the unattached queue must not have any outstanding
3355 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3356 * prior to this in dlb2_domain_drain_mapped_queues().
3358 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3359 if (queue->num_mappings != 0 ||
3360 dlb2_ldb_queue_is_empty(hw, queue))
3363 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3372 * dlb2_reset_domain() - Reset a DLB scheduling domain and its associated
3373 * hardware resources.
3374 * @hw: Contains the current state of the DLB2 hardware.
3375 * @domain_id: Domain ID
3376 * @vdev_req: Request came from a virtual device.
3377 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
3379 * Note: User software *must* stop sending to this domain's producer ports
3380 * before invoking this function, otherwise undefined behavior will result.
3382 * Return: returns < 0 on error, 0 otherwise.
3384 int dlb2_reset_domain(struct dlb2_hw *hw,
3387 unsigned int vdev_id)
3389 struct dlb2_hw_domain *domain;
3392 dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3394 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3396 if (domain == NULL || !domain->configured)
3401 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3403 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3406 /* Disable CQ interrupts */
3407 dlb2_domain_disable_dir_port_interrupts(hw, domain);
3409 dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3412 * For each queue owned by this domain, disable its write permissions to
3413 * cause any traffic sent to it to be dropped. Well-behaved software
3414 * should not be sending QEs at this point.
3416 dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3418 dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3420 /* Turn off completion tracking on all the domain's PPs. */
3421 dlb2_domain_disable_ldb_seq_checks(hw, domain);
3424 * Disable the LDB CQs and drain them in order to complete the map and
3425 * unmap procedures, which require zero CQ inflights and zero QID
3426 * inflights respectively.
3428 dlb2_domain_disable_ldb_cqs(hw, domain);
3430 ret = dlb2_domain_drain_ldb_cqs(hw, domain, false);
3434 ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3438 ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3442 ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3446 /* Re-enable the CQs in order to drain the mapped queues. */
3447 dlb2_domain_enable_ldb_cqs(hw, domain);
3449 ret = dlb2_domain_drain_mapped_queues(hw, domain);
3453 ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3457 /* Done draining LDB QEs, so disable the CQs. */
3458 dlb2_domain_disable_ldb_cqs(hw, domain);
3460 dlb2_domain_drain_dir_queues(hw, domain);
3462 /* Done draining DIR QEs, so disable the CQs. */
3463 dlb2_domain_disable_dir_cqs(hw, domain);
3466 dlb2_domain_disable_dir_producer_ports(hw, domain);
3468 dlb2_domain_disable_ldb_producer_ports(hw, domain);
3470 ret = dlb2_domain_verify_reset_success(hw, domain);
3474 /* Reset the QID and port state. */
3475 dlb2_domain_reset_registers(hw, domain);
3477 /* Hardware reset complete. Reset the domain's software state */
3478 ret = dlb2_domain_reset_software_state(hw, domain);
3485 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
3489 /* Finish queue unmap jobs for any domain that needs it */
3490 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
3491 struct dlb2_hw_domain *domain = &hw->domains[i];
3493 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3499 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
3503 /* Finish queue map jobs for any domain that needs it */
3504 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
3505 struct dlb2_hw_domain *domain = &hw->domains[i];
3507 num += dlb2_domain_finish_map_qid_procedures(hw, domain);