1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
7 #include "dlb2_hw_types.h"
9 #include "dlb2_osdep.h"
10 #include "dlb2_osdep_bitmap.h"
11 #include "dlb2_osdep_types.h"
12 #include "dlb2_regs.h"
13 #include "dlb2_resource.h"
15 #include "../../dlb2_priv.h"
16 #include "../../dlb2_inline_fns.h"
18 #define DLB2_DOM_LIST_HEAD(head, type) \
19 DLB2_LIST_HEAD((head), type, domain_list)
21 #define DLB2_FUNC_LIST_HEAD(head, type) \
22 DLB2_LIST_HEAD((head), type, func_list)
24 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
25 DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
27 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
28 DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
30 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
31 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
33 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
34 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
36 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
40 dlb2_list_init_head(&domain->used_ldb_queues);
41 dlb2_list_init_head(&domain->used_dir_pq_pairs);
42 dlb2_list_init_head(&domain->avail_ldb_queues);
43 dlb2_list_init_head(&domain->avail_dir_pq_pairs);
45 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
46 dlb2_list_init_head(&domain->used_ldb_ports[i]);
47 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
48 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
51 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
55 dlb2_list_init_head(&rsrc->avail_domains);
56 dlb2_list_init_head(&rsrc->used_domains);
57 dlb2_list_init_head(&rsrc->avail_ldb_queues);
58 dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
60 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
61 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
64 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
66 union dlb2_chp_cfg_chp_csr_ctrl r0;
68 r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
70 r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
72 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
75 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
76 struct dlb2_get_num_resources_args *arg,
80 struct dlb2_function_resources *rsrcs;
81 struct dlb2_bitmap *map;
84 if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
88 rsrcs = &hw->vdev[vdev_id];
92 arg->num_sched_domains = rsrcs->num_avail_domains;
94 arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
96 arg->num_ldb_ports = 0;
97 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
98 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
100 arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
101 arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
102 arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
103 arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
105 arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
107 arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
109 map = rsrcs->avail_hist_list_entries;
111 arg->num_hist_list_entries = dlb2_bitmap_count(map);
113 arg->max_contiguous_hist_list_entries =
114 dlb2_bitmap_longest_set_range(map);
116 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
118 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
123 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
125 union dlb2_chp_cfg_chp_csr_ctrl r0;
127 r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
129 r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
131 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
134 void dlb2_resource_free(struct dlb2_hw *hw)
138 if (hw->pf.avail_hist_list_entries)
139 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
141 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
142 if (hw->vdev[i].avail_hist_list_entries)
143 dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
147 int dlb2_resource_init(struct dlb2_hw *hw)
149 struct dlb2_list_entry *list;
154 * For optimal load-balancing, ports that map to one or more QIDs in
155 * common should not be in numerical sequence. This is application
156 * dependent, but the driver interleaves port IDs as much as possible
157 * to reduce the likelihood of this. This initial allocation maximizes
158 * the average distance between an ID and its immediate neighbors (i.e.
159 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
162 u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
163 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,
164 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
165 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
166 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
169 /* Zero-out resource tracking data structures */
170 memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
171 memset(&hw->pf, 0, sizeof(hw->pf));
173 dlb2_init_fn_rsrc_lists(&hw->pf);
175 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
176 memset(&hw->vdev[i], 0, sizeof(hw->vdev[i]));
177 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
180 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
181 memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
182 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
183 hw->domains[i].parent_func = &hw->pf;
186 /* Give all resources to the PF driver */
187 hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
188 for (i = 0; i < hw->pf.num_avail_domains; i++) {
189 list = &hw->domains[i].func_list;
191 dlb2_list_add(&hw->pf.avail_domains, list);
194 hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
195 for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
196 list = &hw->rsrcs.ldb_queues[i].func_list;
198 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
201 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
202 hw->pf.num_avail_ldb_ports[i] =
203 DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
205 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
206 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
207 struct dlb2_ldb_port *port;
209 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
211 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
215 hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS;
216 for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
217 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
219 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
222 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
223 hw->pf.num_avail_dqed_entries = DLB2_MAX_NUM_DIR_CREDITS;
224 hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
226 ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
227 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
231 ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
235 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
236 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
237 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
241 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
246 /* Initialize the hardware resource IDs */
247 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
248 hw->domains[i].id.phys_id = i;
249 hw->domains[i].id.vdev_owned = false;
252 for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
253 hw->rsrcs.ldb_queues[i].id.phys_id = i;
254 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
257 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
258 hw->rsrcs.ldb_ports[i].id.phys_id = i;
259 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
262 for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS; i++) {
263 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
264 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
267 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
268 hw->rsrcs.sn_groups[i].id = i;
269 /* Default mode (0) is 64 sequence numbers per queue */
270 hw->rsrcs.sn_groups[i].mode = 0;
271 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
272 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
275 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
276 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
281 dlb2_resource_free(hw);
286 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
288 union dlb2_cfg_mstr_cfg_pm_pmcsr_disable r0;
290 r0.val = DLB2_CSR_RD(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE);
292 r0.field.disable = 0;
294 DLB2_CSR_WR(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE, r0.val);
297 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
298 struct dlb2_hw_domain *domain)
300 union dlb2_chp_cfg_ldb_vas_crd r0 = { {0} };
301 union dlb2_chp_cfg_dir_vas_crd r1 = { {0} };
303 r0.field.count = domain->num_ldb_credits;
305 DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), r0.val);
307 r1.field.count = domain->num_dir_credits;
309 DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), r1.val);
312 static struct dlb2_ldb_port *
313 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
314 struct dlb2_function_resources *rsrcs,
318 struct dlb2_list_entry *iter;
319 struct dlb2_ldb_port *port;
322 * To reduce the odds of consecutive load-balanced ports mapping to the
323 * same queue(s), the driver attempts to allocate ports whose neighbors
324 * are owned by a different domain.
326 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
330 phys_id = port->id.phys_id;
334 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
337 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
339 if (!hw->rsrcs.ldb_ports[next].owned ||
340 hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
343 if (!hw->rsrcs.ldb_ports[prev].owned ||
344 hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
351 * Failing that, the driver looks for a port with one neighbor owned by
352 * a different domain and the other unallocated.
354 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
358 phys_id = port->id.phys_id;
362 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
365 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
367 if (!hw->rsrcs.ldb_ports[prev].owned &&
368 hw->rsrcs.ldb_ports[next].owned &&
369 hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
372 if (!hw->rsrcs.ldb_ports[next].owned &&
373 hw->rsrcs.ldb_ports[prev].owned &&
374 hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
379 * Failing that, the driver looks for a port with both neighbors
382 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
386 phys_id = port->id.phys_id;
390 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
393 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
395 if (!hw->rsrcs.ldb_ports[prev].owned &&
396 !hw->rsrcs.ldb_ports[next].owned)
400 /* If all else fails, the driver returns the next available port. */
401 return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
405 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
406 struct dlb2_function_resources *rsrcs,
407 struct dlb2_hw_domain *domain,
410 struct dlb2_cmd_response *resp)
414 if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
415 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
419 for (i = 0; i < num_ports; i++) {
420 struct dlb2_ldb_port *port;
422 port = dlb2_get_next_ldb_port(hw, rsrcs,
423 domain->id.phys_id, cos_id);
426 "[%s()] Internal error: domain validation failed\n",
431 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
434 port->domain_id = domain->id;
437 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
441 rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
446 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
447 struct dlb2_function_resources *rsrcs,
448 struct dlb2_hw_domain *domain,
449 struct dlb2_create_sched_domain_args *args,
450 struct dlb2_cmd_response *resp)
455 if (args->cos_strict) {
456 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
457 u32 num = args->num_cos_ldb_ports[i];
459 /* Allocate ports from specific classes-of-service */
460 ret = __dlb2_attach_ldb_ports(hw,
474 * Attempt to allocate from specific class-of-service, but
475 * fallback to the other classes if that fails.
477 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
478 for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
479 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
480 cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
482 ret = __dlb2_attach_ldb_ports(hw,
498 /* Allocate num_ldb_ports from any class-of-service */
499 for (i = 0; i < args->num_ldb_ports; i++) {
500 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
501 ret = __dlb2_attach_ldb_ports(hw,
518 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
519 struct dlb2_function_resources *rsrcs,
520 struct dlb2_hw_domain *domain,
522 struct dlb2_cmd_response *resp)
526 if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
527 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
531 for (i = 0; i < num_ports; i++) {
532 struct dlb2_dir_pq_pair *port;
534 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
538 "[%s()] Internal error: domain validation failed\n",
543 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
545 port->domain_id = domain->id;
548 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
551 rsrcs->num_avail_dir_pq_pairs -= num_ports;
556 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
557 struct dlb2_hw_domain *domain,
559 struct dlb2_cmd_response *resp)
561 if (rsrcs->num_avail_qed_entries < num_credits) {
562 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
566 rsrcs->num_avail_qed_entries -= num_credits;
567 domain->num_ldb_credits += num_credits;
571 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
572 struct dlb2_hw_domain *domain,
574 struct dlb2_cmd_response *resp)
576 if (rsrcs->num_avail_dqed_entries < num_credits) {
577 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
581 rsrcs->num_avail_dqed_entries -= num_credits;
582 domain->num_dir_credits += num_credits;
586 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
587 struct dlb2_hw_domain *domain,
588 u32 num_atomic_inflights,
589 struct dlb2_cmd_response *resp)
591 if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
592 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
596 rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
597 domain->num_avail_aqed_entries += num_atomic_inflights;
602 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
603 struct dlb2_hw_domain *domain,
604 u32 num_hist_list_entries,
605 struct dlb2_cmd_response *resp)
607 struct dlb2_bitmap *bitmap;
610 if (num_hist_list_entries) {
611 bitmap = rsrcs->avail_hist_list_entries;
613 base = dlb2_bitmap_find_set_bit_range(bitmap,
614 num_hist_list_entries);
618 domain->total_hist_list_entries = num_hist_list_entries;
619 domain->avail_hist_list_entries = num_hist_list_entries;
620 domain->hist_list_entry_base = base;
621 domain->hist_list_entry_offset = 0;
623 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
628 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
632 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
633 struct dlb2_function_resources *rsrcs,
634 struct dlb2_hw_domain *domain,
636 struct dlb2_cmd_response *resp)
640 if (rsrcs->num_avail_ldb_queues < num_queues) {
641 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
645 for (i = 0; i < num_queues; i++) {
646 struct dlb2_ldb_queue *queue;
648 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
652 "[%s()] Internal error: domain validation failed\n",
657 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
659 queue->domain_id = domain->id;
662 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
665 rsrcs->num_avail_ldb_queues -= num_queues;
671 dlb2_domain_attach_resources(struct dlb2_hw *hw,
672 struct dlb2_function_resources *rsrcs,
673 struct dlb2_hw_domain *domain,
674 struct dlb2_create_sched_domain_args *args,
675 struct dlb2_cmd_response *resp)
679 ret = dlb2_attach_ldb_queues(hw,
682 args->num_ldb_queues,
687 ret = dlb2_attach_ldb_ports(hw,
695 ret = dlb2_attach_dir_ports(hw,
703 ret = dlb2_attach_ldb_credits(rsrcs,
705 args->num_ldb_credits,
710 ret = dlb2_attach_dir_credits(rsrcs,
712 args->num_dir_credits,
717 ret = dlb2_attach_domain_hist_list_entries(rsrcs,
719 args->num_hist_list_entries,
724 ret = dlb2_attach_atomic_inflights(rsrcs,
726 args->num_atomic_inflights,
731 dlb2_configure_domain_credits(hw, domain);
733 domain->configured = true;
735 domain->started = false;
737 rsrcs->num_avail_domains--;
743 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
744 struct dlb2_create_sched_domain_args *args,
745 struct dlb2_cmd_response *resp)
747 u32 num_avail_ldb_ports, req_ldb_ports;
748 struct dlb2_bitmap *avail_hl_entries;
749 unsigned int max_contig_hl_range;
752 avail_hl_entries = rsrcs->avail_hist_list_entries;
754 max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
756 num_avail_ldb_ports = 0;
758 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
759 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
761 req_ldb_ports += args->num_cos_ldb_ports[i];
764 req_ldb_ports += args->num_ldb_ports;
766 if (rsrcs->num_avail_domains < 1) {
767 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
771 if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
772 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
776 if (req_ldb_ports > num_avail_ldb_ports) {
777 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
781 for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
782 if (args->num_cos_ldb_ports[i] >
783 rsrcs->num_avail_ldb_ports[i]) {
784 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
789 if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
790 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
794 if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
795 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
799 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
800 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
804 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
805 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
809 if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
810 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
814 if (max_contig_hl_range < args->num_hist_list_entries) {
815 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
823 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
824 struct dlb2_create_sched_domain_args *args,
826 unsigned int vdev_id)
828 DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
830 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
831 DLB2_HW_DBG(hw, "\tNumber of LDB queues: %d\n",
832 args->num_ldb_queues);
833 DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
834 args->num_ldb_ports);
835 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0): %d\n",
836 args->num_cos_ldb_ports[0]);
837 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1): %d\n",
838 args->num_cos_ldb_ports[1]);
839 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2): %d\n",
840 args->num_cos_ldb_ports[1]);
841 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3): %d\n",
842 args->num_cos_ldb_ports[1]);
843 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %d\n",
845 DLB2_HW_DBG(hw, "\tNumber of DIR ports: %d\n",
846 args->num_dir_ports);
847 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
848 args->num_atomic_inflights);
849 DLB2_HW_DBG(hw, "\tNumber of hist list entries: %d\n",
850 args->num_hist_list_entries);
851 DLB2_HW_DBG(hw, "\tNumber of LDB credits: %d\n",
852 args->num_ldb_credits);
853 DLB2_HW_DBG(hw, "\tNumber of DIR credits: %d\n",
854 args->num_dir_credits);
858 * dlb2_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
859 * domain and its resources.
860 * @hw: Contains the current state of the DLB2 hardware.
861 * @args: User-provided arguments.
862 * @resp: Response to user.
863 * @vdev_req: Request came from a virtual device.
864 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
866 * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
867 * satisfy a request, resp->status will be set accordingly.
869 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
870 struct dlb2_create_sched_domain_args *args,
871 struct dlb2_cmd_response *resp,
873 unsigned int vdev_id)
875 struct dlb2_function_resources *rsrcs;
876 struct dlb2_hw_domain *domain;
879 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
881 dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
884 * Verify that hardware resources are available before attempting to
885 * satisfy the request. This simplifies the error unwinding code.
887 ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp);
891 domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
892 if (domain == NULL) {
894 "[%s():%d] Internal error: no available domains\n",
899 if (domain->configured) {
901 "[%s()] Internal error: avail_domains contains configured domains.\n",
906 dlb2_init_domain_rsrc_lists(domain);
908 ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
911 "[%s()] Internal error: failed to verify args.\n",
917 dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
919 dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
921 resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
928 * The PF driver cannot assume that a register write will affect subsequent HCW
929 * writes. To ensure a write completes, the driver must read back a CSR. This
930 * function only need be called for configuration that can occur after the
931 * domain has started; prior to starting, applications can't send HCWs.
933 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
935 DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
938 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
939 struct dlb2_dir_pq_pair *port)
941 union dlb2_lsp_cq_dir_dsbl reg;
943 reg.field.disabled = 1;
945 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
950 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
951 struct dlb2_dir_pq_pair *port)
953 union dlb2_lsp_cq_dir_tkn_cnt r0;
955 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id));
958 * Account for the initial token count, which is used in order to
959 * provide a CQ with depth less than 8.
962 return r0.field.count - port->init_tkn_cnt;
965 static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
966 struct dlb2_dir_pq_pair *port)
968 unsigned int port_id = port->id.phys_id;
971 /* Return any outstanding tokens */
972 cnt = dlb2_dir_cq_token_count(hw, port);
975 struct dlb2_hcw hcw_mem[8], *hcw;
978 pp_addr = os_map_producer_port(hw, port_id, false);
980 /* Point hcw to a 64B-aligned location */
981 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
984 * Program the first HCW for a batch token return and
987 memset(hcw, 0, 4 * sizeof(*hcw));
989 hcw->lock_id = cnt - 1;
991 dlb2_movdir64b(pp_addr, hcw);
993 os_fence_hcw(hw, pp_addr);
995 os_unmap_producer_port(hw, pp_addr);
1001 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1002 struct dlb2_dir_pq_pair *port)
1004 union dlb2_lsp_cq_dir_dsbl reg;
1006 reg.field.disabled = 0;
1008 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
1013 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1014 struct dlb2_hw_domain *domain,
1017 struct dlb2_list_entry *iter;
1018 struct dlb2_dir_pq_pair *port;
1022 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1024 * Can't drain a port if it's not configured, and there's
1025 * nothing to drain if its queue is unconfigured.
1027 if (!port->port_configured || !port->queue_configured)
1031 dlb2_dir_port_cq_disable(hw, port);
1033 ret = dlb2_drain_dir_cq(hw, port);
1038 dlb2_dir_port_cq_enable(hw, port);
1044 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1045 struct dlb2_dir_pq_pair *queue)
1047 union dlb2_lsp_qid_dir_enqueue_cnt r0;
1049 r0.val = DLB2_CSR_RD(hw,
1050 DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
1052 return r0.field.count;
1055 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1056 struct dlb2_dir_pq_pair *queue)
1058 return dlb2_dir_queue_depth(hw, queue) == 0;
1061 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1062 struct dlb2_hw_domain *domain)
1064 struct dlb2_list_entry *iter;
1065 struct dlb2_dir_pq_pair *queue;
1068 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1069 if (!dlb2_dir_queue_is_empty(hw, queue))
1076 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1077 struct dlb2_hw_domain *domain)
1081 /* If the domain hasn't been started, there's no traffic to drain */
1082 if (!domain->started)
1085 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1086 ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
1090 if (dlb2_domain_dir_queues_empty(hw, domain))
1094 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1096 "[%s()] Internal error: failed to empty queues\n",
1102 * Drain the CQs one more time. For the queues to go empty, they would
1103 * have scheduled one or more QEs.
1105 ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
1112 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1113 struct dlb2_ldb_port *port)
1115 union dlb2_lsp_cq_ldb_dsbl reg;
1118 * Don't re-enable the port if a removal is pending. The caller should
1119 * mark this port as enabled (if it isn't already), and when the
1120 * removal completes the port will be enabled.
1122 if (port->num_pending_removals)
1125 reg.field.disabled = 0;
1127 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
1132 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1133 struct dlb2_ldb_port *port)
1135 union dlb2_lsp_cq_ldb_dsbl reg;
1137 reg.field.disabled = 1;
1139 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
1144 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1145 struct dlb2_ldb_port *port)
1147 union dlb2_lsp_cq_ldb_infl_cnt r0;
1149 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
1151 return r0.field.count;
1154 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1155 struct dlb2_ldb_port *port)
1157 union dlb2_lsp_cq_ldb_tkn_cnt r0;
1159 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id));
1162 * Account for the initial token count, which is used in order to
1163 * provide a CQ with depth less than 8.
1166 return r0.field.token_count - port->init_tkn_cnt;
1169 static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1171 u32 infl_cnt, tkn_cnt;
1174 infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1175 tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1177 if (infl_cnt || tkn_cnt) {
1178 struct dlb2_hcw hcw_mem[8], *hcw;
1181 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1183 /* Point hcw to a 64B-aligned location */
1184 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1187 * Program the first HCW for a completion and token return and
1188 * the other HCWs as NOOPS
1191 memset(hcw, 0, 4 * sizeof(*hcw));
1192 hcw->qe_comp = (infl_cnt > 0);
1193 hcw->cq_token = (tkn_cnt > 0);
1194 hcw->lock_id = tkn_cnt - 1;
1196 /* Return tokens in the first HCW */
1197 dlb2_movdir64b(pp_addr, hcw);
1201 /* Issue remaining completions (if any) */
1202 for (i = 1; i < infl_cnt; i++)
1203 dlb2_movdir64b(pp_addr, hcw);
1205 os_fence_hcw(hw, pp_addr);
1207 os_unmap_producer_port(hw, pp_addr);
1213 static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1214 struct dlb2_hw_domain *domain,
1217 struct dlb2_list_entry *iter;
1218 struct dlb2_ldb_port *port;
1222 /* If the domain hasn't been started, there's no traffic to drain */
1223 if (!domain->started)
1226 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1227 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1229 dlb2_ldb_port_cq_disable(hw, port);
1231 ret = dlb2_drain_ldb_cq(hw, port);
1236 dlb2_ldb_port_cq_enable(hw, port);
1243 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1244 struct dlb2_ldb_queue *queue)
1246 union dlb2_lsp_qid_aqed_active_cnt r0;
1247 union dlb2_lsp_qid_atm_active r1;
1248 union dlb2_lsp_qid_ldb_enqueue_cnt r2;
1250 r0.val = DLB2_CSR_RD(hw,
1251 DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
1252 r1.val = DLB2_CSR_RD(hw,
1253 DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
1255 r2.val = DLB2_CSR_RD(hw,
1256 DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
1258 return r0.field.count + r1.field.count + r2.field.count;
1261 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1262 struct dlb2_ldb_queue *queue)
1264 return dlb2_ldb_queue_depth(hw, queue) == 0;
1267 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1268 struct dlb2_hw_domain *domain)
1270 struct dlb2_list_entry *iter;
1271 struct dlb2_ldb_queue *queue;
1274 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1275 if (queue->num_mappings == 0)
1278 if (!dlb2_ldb_queue_is_empty(hw, queue))
1285 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1286 struct dlb2_hw_domain *domain)
1290 /* If the domain hasn't been started, there's no traffic to drain */
1291 if (!domain->started)
1294 if (domain->num_pending_removals > 0) {
1296 "[%s()] Internal error: failed to unmap domain queues\n",
1301 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1302 ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
1306 if (dlb2_domain_mapped_queues_empty(hw, domain))
1310 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1312 "[%s()] Internal error: failed to empty queues\n",
1318 * Drain the CQs one more time. For the queues to go empty, they would
1319 * have scheduled one or more QEs.
1321 ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
1328 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1329 struct dlb2_hw_domain *domain)
1331 struct dlb2_list_entry *iter;
1332 struct dlb2_ldb_port *port;
1336 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1337 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1338 port->enabled = true;
1340 dlb2_ldb_port_cq_enable(hw, port);
1345 static struct dlb2_ldb_queue *
1346 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1349 unsigned int vdev_id)
1351 struct dlb2_list_entry *iter1;
1352 struct dlb2_list_entry *iter2;
1353 struct dlb2_function_resources *rsrcs;
1354 struct dlb2_hw_domain *domain;
1355 struct dlb2_ldb_queue *queue;
1356 RTE_SET_USED(iter1);
1357 RTE_SET_USED(iter2);
1359 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1362 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1365 return &hw->rsrcs.ldb_queues[id];
1367 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1368 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
1369 if (queue->id.virt_id == id)
1373 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
1374 if (queue->id.virt_id == id)
1380 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1383 unsigned int vdev_id)
1385 struct dlb2_list_entry *iteration;
1386 struct dlb2_function_resources *rsrcs;
1387 struct dlb2_hw_domain *domain;
1388 RTE_SET_USED(iteration);
1390 if (id >= DLB2_MAX_NUM_DOMAINS)
1394 return &hw->domains[id];
1396 rsrcs = &hw->vdev[vdev_id];
1398 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
1399 if (domain->id.virt_id == id)
1405 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1406 struct dlb2_ldb_port *port,
1407 struct dlb2_ldb_queue *queue,
1409 enum dlb2_qid_map_state new_state)
1411 enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1412 struct dlb2_hw_domain *domain;
1415 domain_id = port->domain_id.phys_id;
1417 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1418 if (domain == NULL) {
1420 "[%s()] Internal error: unable to find domain %d\n",
1421 __func__, domain_id);
1425 switch (curr_state) {
1426 case DLB2_QUEUE_UNMAPPED:
1427 switch (new_state) {
1428 case DLB2_QUEUE_MAPPED:
1429 queue->num_mappings++;
1430 port->num_mappings++;
1432 case DLB2_QUEUE_MAP_IN_PROG:
1433 queue->num_pending_additions++;
1434 domain->num_pending_additions++;
1440 case DLB2_QUEUE_MAPPED:
1441 switch (new_state) {
1442 case DLB2_QUEUE_UNMAPPED:
1443 queue->num_mappings--;
1444 port->num_mappings--;
1446 case DLB2_QUEUE_UNMAP_IN_PROG:
1447 port->num_pending_removals++;
1448 domain->num_pending_removals++;
1450 case DLB2_QUEUE_MAPPED:
1451 /* Priority change, nothing to update */
1457 case DLB2_QUEUE_MAP_IN_PROG:
1458 switch (new_state) {
1459 case DLB2_QUEUE_UNMAPPED:
1460 queue->num_pending_additions--;
1461 domain->num_pending_additions--;
1463 case DLB2_QUEUE_MAPPED:
1464 queue->num_mappings++;
1465 port->num_mappings++;
1466 queue->num_pending_additions--;
1467 domain->num_pending_additions--;
1473 case DLB2_QUEUE_UNMAP_IN_PROG:
1474 switch (new_state) {
1475 case DLB2_QUEUE_UNMAPPED:
1476 port->num_pending_removals--;
1477 domain->num_pending_removals--;
1478 queue->num_mappings--;
1479 port->num_mappings--;
1481 case DLB2_QUEUE_MAPPED:
1482 port->num_pending_removals--;
1483 domain->num_pending_removals--;
1485 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1486 /* Nothing to update */
1492 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1493 switch (new_state) {
1494 case DLB2_QUEUE_UNMAP_IN_PROG:
1495 /* Nothing to update */
1497 case DLB2_QUEUE_UNMAPPED:
1499 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1500 * becomes UNMAPPED before it transitions to
1503 queue->num_mappings--;
1504 port->num_mappings--;
1505 port->num_pending_removals--;
1506 domain->num_pending_removals--;
1516 port->qid_map[slot].state = new_state;
1519 "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1520 __func__, queue->id.phys_id, port->id.phys_id,
1521 curr_state, new_state);
1526 "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1527 __func__, queue->id.phys_id, port->id.phys_id,
1528 curr_state, new_state);
1532 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1533 enum dlb2_qid_map_state state,
1538 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1539 if (port->qid_map[i].state == state)
1545 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1548 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1549 enum dlb2_qid_map_state state,
1550 struct dlb2_ldb_queue *queue,
1555 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1556 if (port->qid_map[i].state == state &&
1557 port->qid_map[i].qid == queue->id.phys_id)
1563 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1567 * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1568 * their function names imply, and should only be called by the dynamic CQ
1571 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1572 struct dlb2_hw_domain *domain,
1573 struct dlb2_ldb_queue *queue)
1575 struct dlb2_list_entry *iter;
1576 struct dlb2_ldb_port *port;
1580 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1581 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1582 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1584 if (!dlb2_port_find_slot_queue(port, state,
1589 dlb2_ldb_port_cq_disable(hw, port);
1594 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1595 struct dlb2_hw_domain *domain,
1596 struct dlb2_ldb_queue *queue)
1598 struct dlb2_list_entry *iter;
1599 struct dlb2_ldb_port *port;
1603 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1604 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1605 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1607 if (!dlb2_port_find_slot_queue(port, state,
1612 dlb2_ldb_port_cq_enable(hw, port);
1617 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1618 struct dlb2_ldb_port *port,
1621 union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
1623 r0.field.cq = port->id.phys_id;
1624 r0.field.qidix = slot;
1626 r0.field.inflight_ok_v = 1;
1628 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
1633 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1634 struct dlb2_ldb_port *port,
1637 union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
1639 r0.field.cq = port->id.phys_id;
1640 r0.field.qidix = slot;
1642 r0.field.inflight_ok_v = 1;
1644 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
1649 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1650 struct dlb2_ldb_port *p,
1651 struct dlb2_ldb_queue *q,
1654 union dlb2_lsp_cq2priov r0;
1655 union dlb2_lsp_cq2qid0 r1;
1656 union dlb2_atm_qid2cqidix_00 r2;
1657 union dlb2_lsp_qid2cqidix_00 r3;
1658 union dlb2_lsp_qid2cqidix2_00 r4;
1659 enum dlb2_qid_map_state state;
1662 /* Look for a pending or already mapped slot, else an unused slot */
1663 if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1664 !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1665 !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1667 "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1668 __func__, __LINE__);
1672 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1674 "[%s():%d] Internal error: port slot tracking failed\n",
1675 __func__, __LINE__);
1679 /* Read-modify-write the priority and valid bit register */
1680 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
1682 r0.field.v |= 1 << i;
1683 r0.field.prio |= (priority & 0x7) << i * 3;
1685 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
1687 /* Read-modify-write the QID map register */
1689 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
1691 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
1693 if (i == 0 || i == 4)
1694 r1.field.qid_p0 = q->id.phys_id;
1695 if (i == 1 || i == 5)
1696 r1.field.qid_p1 = q->id.phys_id;
1697 if (i == 2 || i == 6)
1698 r1.field.qid_p2 = q->id.phys_id;
1699 if (i == 3 || i == 7)
1700 r1.field.qid_p3 = q->id.phys_id;
1703 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
1705 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
1707 r2.val = DLB2_CSR_RD(hw,
1708 DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1709 p->id.phys_id / 4));
1711 r3.val = DLB2_CSR_RD(hw,
1712 DLB2_LSP_QID2CQIDIX(q->id.phys_id,
1713 p->id.phys_id / 4));
1715 r4.val = DLB2_CSR_RD(hw,
1716 DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
1717 p->id.phys_id / 4));
1719 switch (p->id.phys_id % 4) {
1721 r2.field.cq_p0 |= 1 << i;
1722 r3.field.cq_p0 |= 1 << i;
1723 r4.field.cq_p0 |= 1 << i;
1727 r2.field.cq_p1 |= 1 << i;
1728 r3.field.cq_p1 |= 1 << i;
1729 r4.field.cq_p1 |= 1 << i;
1733 r2.field.cq_p2 |= 1 << i;
1734 r3.field.cq_p2 |= 1 << i;
1735 r4.field.cq_p2 |= 1 << i;
1739 r2.field.cq_p3 |= 1 << i;
1740 r3.field.cq_p3 |= 1 << i;
1741 r4.field.cq_p3 |= 1 << i;
1746 DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1750 DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1754 DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
1759 p->qid_map[i].qid = q->id.phys_id;
1760 p->qid_map[i].priority = priority;
1762 state = DLB2_QUEUE_MAPPED;
1764 return dlb2_port_slot_state_transition(hw, p, q, i, state);
1767 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1768 struct dlb2_ldb_port *port,
1769 struct dlb2_ldb_queue *queue,
1772 union dlb2_lsp_qid_aqed_active_cnt r0;
1773 union dlb2_lsp_qid_ldb_enqueue_cnt r1;
1774 union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
1776 /* Set the atomic scheduling haswork bit */
1777 r0.val = DLB2_CSR_RD(hw,
1778 DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
1780 r2.field.cq = port->id.phys_id;
1781 r2.field.qidix = slot;
1783 r2.field.rlist_haswork_v = r0.field.count > 0;
1785 /* Set the non-atomic scheduling haswork bit */
1786 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1788 r1.val = DLB2_CSR_RD(hw,
1789 DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
1791 memset(&r2, 0, sizeof(r2));
1793 r2.field.cq = port->id.phys_id;
1794 r2.field.qidix = slot;
1796 r2.field.nalb_haswork_v = (r1.field.count > 0);
1798 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1805 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1806 struct dlb2_ldb_port *port,
1809 union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
1811 r2.field.cq = port->id.phys_id;
1812 r2.field.qidix = slot;
1814 r2.field.rlist_haswork_v = 1;
1816 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1818 memset(&r2, 0, sizeof(r2));
1820 r2.field.cq = port->id.phys_id;
1821 r2.field.qidix = slot;
1823 r2.field.nalb_haswork_v = 1;
1825 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
1830 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1831 struct dlb2_ldb_queue *queue)
1833 union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
1835 r0.field.limit = queue->num_qid_inflights;
1837 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
1840 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1841 struct dlb2_ldb_queue *queue)
1844 DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
1845 DLB2_LSP_QID_LDB_INFL_LIM_RST);
1848 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1849 struct dlb2_hw_domain *domain,
1850 struct dlb2_ldb_port *port,
1851 struct dlb2_ldb_queue *queue)
1853 struct dlb2_list_entry *iter;
1854 union dlb2_lsp_qid_ldb_infl_cnt r0;
1855 enum dlb2_qid_map_state state;
1860 r0.val = DLB2_CSR_RD(hw,
1861 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
1863 if (r0.field.count) {
1865 "[%s()] Internal error: non-zero QID inflight count\n",
1871 * Static map the port and set its corresponding has_work bits.
1873 state = DLB2_QUEUE_MAP_IN_PROG;
1874 if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1877 if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1879 "[%s():%d] Internal error: port slot tracking failed\n",
1880 __func__, __LINE__);
1884 prio = port->qid_map[slot].priority;
1887 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1888 * the port's qid_map state.
1890 ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1894 ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1899 * Ensure IF_status(cq,qid) is 0 before enabling the port to
1900 * prevent spurious schedules to cause the queue's inflight
1901 * count to increase.
1903 dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1905 /* Reset the queue's inflight status */
1906 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1907 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1908 state = DLB2_QUEUE_MAPPED;
1909 if (!dlb2_port_find_slot_queue(port, state,
1913 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
1917 dlb2_ldb_queue_set_inflight_limit(hw, queue);
1919 /* Re-enable CQs mapped to this queue */
1920 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
1922 /* If this queue has other mappings pending, clear its inflight limit */
1923 if (queue->num_pending_additions > 0)
1924 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
1930 * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
1931 * @hw: dlb2_hw handle for a particular device.
1932 * @port: load-balanced port
1933 * @queue: load-balanced queue
1934 * @priority: queue servicing priority
1936 * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
1937 * at a later point, and <0 if an error occurred.
1939 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
1940 struct dlb2_ldb_port *port,
1941 struct dlb2_ldb_queue *queue,
1944 union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
1945 enum dlb2_qid_map_state state;
1946 struct dlb2_hw_domain *domain;
1947 int domain_id, slot, ret;
1949 domain_id = port->domain_id.phys_id;
1951 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1952 if (domain == NULL) {
1954 "[%s()] Internal error: unable to find domain %d\n",
1955 __func__, port->domain_id.phys_id);
1960 * Set the QID inflight limit to 0 to prevent further scheduling of the
1963 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
1965 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
1967 "Internal error: No available unmapped slots\n");
1971 if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1973 "[%s():%d] Internal error: port slot tracking failed\n",
1974 __func__, __LINE__);
1978 port->qid_map[slot].qid = queue->id.phys_id;
1979 port->qid_map[slot].priority = priority;
1981 state = DLB2_QUEUE_MAP_IN_PROG;
1982 ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
1986 r0.val = DLB2_CSR_RD(hw,
1987 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
1989 if (r0.field.count) {
1991 * The queue is owed completions so it's not safe to map it
1992 * yet. Schedule a kernel thread to complete the mapping later,
1993 * once software has completed all the queue's inflight events.
1995 if (!os_worker_active(hw))
1996 os_schedule_work(hw);
2002 * Disable the affected CQ, and the CQs already mapped to the QID,
2003 * before reading the QID's inflight count a second time. There is an
2004 * unlikely race in which the QID may schedule one more QE after we
2005 * read an inflight count of 0, and disabling the CQs guarantees that
2006 * the race will not occur after a re-read of the inflight count
2010 dlb2_ldb_port_cq_disable(hw, port);
2012 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2014 r0.val = DLB2_CSR_RD(hw,
2015 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
2017 if (r0.field.count) {
2019 dlb2_ldb_port_cq_enable(hw, port);
2021 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2024 * The queue is owed completions so it's not safe to map it
2025 * yet. Schedule a kernel thread to complete the mapping later,
2026 * once software has completed all the queue's inflight events.
2028 if (!os_worker_active(hw))
2029 os_schedule_work(hw);
2034 return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2037 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2038 struct dlb2_hw_domain *domain,
2039 struct dlb2_ldb_port *port)
2043 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2044 union dlb2_lsp_qid_ldb_infl_cnt r0;
2045 struct dlb2_ldb_queue *queue;
2048 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2051 qid = port->qid_map[i].qid;
2053 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2055 if (queue == NULL) {
2057 "[%s()] Internal error: unable to find queue %d\n",
2062 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
2068 * Disable the affected CQ, and the CQs already mapped to the
2069 * QID, before reading the QID's inflight count a second time.
2070 * There is an unlikely race in which the QID may schedule one
2071 * more QE after we read an inflight count of 0, and disabling
2072 * the CQs guarantees that the race will not occur after a
2073 * re-read of the inflight count register.
2076 dlb2_ldb_port_cq_disable(hw, port);
2078 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2080 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
2082 if (r0.field.count) {
2084 dlb2_ldb_port_cq_enable(hw, port);
2086 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2091 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2096 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2097 struct dlb2_hw_domain *domain)
2099 struct dlb2_list_entry *iter;
2100 struct dlb2_ldb_port *port;
2104 if (!domain->configured || domain->num_pending_additions == 0)
2107 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2108 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2109 dlb2_domain_finish_map_port(hw, domain, port);
2112 return domain->num_pending_additions;
2115 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2116 struct dlb2_ldb_port *port,
2117 struct dlb2_ldb_queue *queue)
2119 enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2120 union dlb2_lsp_cq2priov r0;
2121 union dlb2_atm_qid2cqidix_00 r1;
2122 union dlb2_lsp_qid2cqidix_00 r2;
2123 union dlb2_lsp_qid2cqidix2_00 r3;
2128 /* Find the queue's slot */
2129 mapped = DLB2_QUEUE_MAPPED;
2130 in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2131 pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2133 if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2134 !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2135 !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2137 "[%s():%d] Internal error: QID %d isn't mapped\n",
2138 __func__, __LINE__, queue->id.phys_id);
2142 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2144 "[%s():%d] Internal error: port slot tracking failed\n",
2145 __func__, __LINE__);
2149 port_id = port->id.phys_id;
2150 queue_id = queue->id.phys_id;
2152 /* Read-modify-write the priority and valid bit register */
2153 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
2155 r0.field.v &= ~(1 << i);
2157 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
2159 r1.val = DLB2_CSR_RD(hw,
2160 DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
2162 r2.val = DLB2_CSR_RD(hw,
2163 DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
2165 r3.val = DLB2_CSR_RD(hw,
2166 DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
2168 switch (port_id % 4) {
2170 r1.field.cq_p0 &= ~(1 << i);
2171 r2.field.cq_p0 &= ~(1 << i);
2172 r3.field.cq_p0 &= ~(1 << i);
2176 r1.field.cq_p1 &= ~(1 << i);
2177 r2.field.cq_p1 &= ~(1 << i);
2178 r3.field.cq_p1 &= ~(1 << i);
2182 r1.field.cq_p2 &= ~(1 << i);
2183 r2.field.cq_p2 &= ~(1 << i);
2184 r3.field.cq_p2 &= ~(1 << i);
2188 r1.field.cq_p3 &= ~(1 << i);
2189 r2.field.cq_p3 &= ~(1 << i);
2190 r3.field.cq_p3 &= ~(1 << i);
2195 DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
2199 DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
2203 DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
2208 unmapped = DLB2_QUEUE_UNMAPPED;
2210 return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2213 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2214 struct dlb2_hw_domain *domain,
2215 struct dlb2_ldb_port *port,
2216 struct dlb2_ldb_queue *queue,
2219 if (domain->started)
2220 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2222 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2226 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2227 struct dlb2_hw_domain *domain,
2228 struct dlb2_ldb_port *port,
2231 enum dlb2_qid_map_state state;
2232 struct dlb2_ldb_queue *queue;
2234 queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2236 state = port->qid_map[slot].state;
2238 /* Update the QID2CQIDX and CQ2QID vectors */
2239 dlb2_ldb_port_unmap_qid(hw, port, queue);
2242 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2245 dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2247 /* Reset the {CQ, slot} to its default state */
2248 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2250 /* Re-enable the CQ if it wasn't manually disabled by the user */
2252 dlb2_ldb_port_cq_enable(hw, port);
2255 * If there is a mapping that is pending this slot's removal, perform
2258 if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2259 struct dlb2_ldb_port_qid_map *map;
2260 struct dlb2_ldb_queue *map_queue;
2263 map = &port->qid_map[slot];
2265 map->qid = map->pending_qid;
2266 map->priority = map->pending_priority;
2268 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2269 prio = map->priority;
2271 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2275 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2276 struct dlb2_hw_domain *domain,
2277 struct dlb2_ldb_port *port)
2279 union dlb2_lsp_cq_ldb_infl_cnt r0;
2282 if (port->num_pending_removals == 0)
2286 * The unmap requires all the CQ's outstanding inflights to be
2289 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
2290 if (r0.field.count > 0)
2293 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2294 struct dlb2_ldb_port_qid_map *map;
2296 map = &port->qid_map[i];
2298 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2299 map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2302 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2309 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2310 struct dlb2_hw_domain *domain)
2312 struct dlb2_list_entry *iter;
2313 struct dlb2_ldb_port *port;
2317 if (!domain->configured || domain->num_pending_removals == 0)
2320 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2321 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2322 dlb2_domain_finish_unmap_port(hw, domain, port);
2325 return domain->num_pending_removals;
2328 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2329 struct dlb2_hw_domain *domain)
2331 struct dlb2_list_entry *iter;
2332 struct dlb2_ldb_port *port;
2336 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2337 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2338 port->enabled = false;
2340 dlb2_ldb_port_cq_disable(hw, port);
2345 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2348 unsigned int vdev_id)
2350 DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2352 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2353 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2356 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2357 struct dlb2_hw_domain *domain,
2358 unsigned int vdev_id)
2360 struct dlb2_list_entry *iter;
2361 union dlb2_sys_vf_dir_vpp_v r1;
2362 struct dlb2_dir_pq_pair *port;
2367 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2371 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2372 virt_id = port->id.virt_id;
2374 virt_id = port->id.phys_id;
2376 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
2378 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val);
2382 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2383 struct dlb2_hw_domain *domain,
2384 unsigned int vdev_id)
2386 struct dlb2_list_entry *iter;
2387 union dlb2_sys_vf_ldb_vpp_v r1;
2388 struct dlb2_ldb_port *port;
2394 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2395 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2399 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2400 virt_id = port->id.virt_id;
2402 virt_id = port->id.phys_id;
2404 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2406 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r1.val);
2412 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2413 struct dlb2_hw_domain *domain)
2415 struct dlb2_list_entry *iter;
2416 union dlb2_chp_ldb_cq_int_enb r0 = { {0} };
2417 union dlb2_chp_ldb_cq_wd_enb r1 = { {0} };
2418 struct dlb2_ldb_port *port;
2422 r0.field.en_tim = 0;
2423 r0.field.en_depth = 0;
2425 r1.field.wd_enable = 0;
2427 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2428 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2430 DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
2434 DLB2_CHP_LDB_CQ_WD_ENB(port->id.phys_id),
2441 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2442 struct dlb2_hw_domain *domain)
2444 struct dlb2_list_entry *iter;
2445 union dlb2_chp_dir_cq_int_enb r0 = { {0} };
2446 union dlb2_chp_dir_cq_wd_enb r1 = { {0} };
2447 struct dlb2_dir_pq_pair *port;
2450 r0.field.en_tim = 0;
2451 r0.field.en_depth = 0;
2453 r1.field.wd_enable = 0;
2455 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2457 DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
2461 DLB2_CHP_DIR_CQ_WD_ENB(port->id.phys_id),
2467 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2468 struct dlb2_hw_domain *domain)
2470 int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2471 struct dlb2_list_entry *iter;
2472 struct dlb2_ldb_queue *queue;
2475 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2476 union dlb2_sys_ldb_vasqid_v r0 = { {0} };
2477 union dlb2_sys_ldb_qid2vqid r1 = { {0} };
2478 union dlb2_sys_vf_ldb_vqid_v r2 = { {0} };
2479 union dlb2_sys_vf_ldb_vqid2qid r3 = { {0} };
2482 idx = domain_offset + queue->id.phys_id;
2484 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), r0.val);
2486 if (queue->id.vdev_owned) {
2488 DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2491 idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2495 DLB2_SYS_VF_LDB_VQID_V(idx),
2499 DLB2_SYS_VF_LDB_VQID2QID(idx),
2506 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2507 struct dlb2_hw_domain *domain)
2509 int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS;
2510 struct dlb2_list_entry *iter;
2511 struct dlb2_dir_pq_pair *queue;
2514 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2515 union dlb2_sys_dir_vasqid_v r0 = { {0} };
2516 union dlb2_sys_vf_dir_vqid_v r1 = { {0} };
2517 union dlb2_sys_vf_dir_vqid2qid r2 = { {0} };
2520 idx = domain_offset + queue->id.phys_id;
2522 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val);
2524 if (queue->id.vdev_owned) {
2525 idx = queue->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS +
2529 DLB2_SYS_VF_DIR_VQID_V(idx),
2533 DLB2_SYS_VF_DIR_VQID2QID(idx),
2539 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2540 struct dlb2_hw_domain *domain)
2542 struct dlb2_list_entry *iter;
2543 union dlb2_chp_sn_chk_enbl r1;
2544 struct dlb2_ldb_port *port;
2550 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2551 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2553 DLB2_CHP_SN_CHK_ENBL(port->id.phys_id),
2558 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2559 struct dlb2_hw_domain *domain)
2561 struct dlb2_list_entry *iter;
2562 struct dlb2_ldb_port *port;
2566 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2567 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2570 for (i = 0; i < DLB2_MAX_CQ_COMP_CHECK_LOOPS; i++) {
2571 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2575 if (i == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2577 "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2578 __func__, port->id.phys_id);
2587 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2588 struct dlb2_hw_domain *domain)
2590 struct dlb2_list_entry *iter;
2591 struct dlb2_dir_pq_pair *port;
2594 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2595 port->enabled = false;
2597 dlb2_dir_port_cq_disable(hw, port);
2602 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2603 struct dlb2_hw_domain *domain)
2605 struct dlb2_list_entry *iter;
2606 struct dlb2_dir_pq_pair *port;
2607 union dlb2_sys_dir_pp_v r1;
2612 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2614 DLB2_SYS_DIR_PP_V(port->id.phys_id),
2619 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2620 struct dlb2_hw_domain *domain)
2622 struct dlb2_list_entry *iter;
2623 union dlb2_sys_ldb_pp_v r1;
2624 struct dlb2_ldb_port *port;
2630 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2631 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2633 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2638 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2639 struct dlb2_hw_domain *domain)
2641 struct dlb2_list_entry *iter;
2642 struct dlb2_dir_pq_pair *dir_port;
2643 struct dlb2_ldb_port *ldb_port;
2644 struct dlb2_ldb_queue *queue;
2649 * Confirm that all the domain's queue's inflight counts and AQED
2650 * active counts are 0.
2652 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2653 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2655 "[%s()] Internal error: failed to empty ldb queue %d\n",
2656 __func__, queue->id.phys_id);
2661 /* Confirm that all the domain's CQs inflight and token counts are 0. */
2662 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2663 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2664 if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2665 dlb2_ldb_cq_token_count(hw, ldb_port)) {
2667 "[%s()] Internal error: failed to empty ldb port %d\n",
2668 __func__, ldb_port->id.phys_id);
2674 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2675 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2677 "[%s()] Internal error: failed to empty dir queue %d\n",
2678 __func__, dir_port->id.phys_id);
2682 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2684 "[%s()] Internal error: failed to empty dir port %d\n",
2685 __func__, dir_port->id.phys_id);
2693 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2694 struct dlb2_ldb_port *port)
2697 DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2698 DLB2_SYS_LDB_PP2VAS_RST);
2701 DLB2_CHP_LDB_CQ2VAS(port->id.phys_id),
2702 DLB2_CHP_LDB_CQ2VAS_RST);
2705 DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2706 DLB2_SYS_LDB_PP2VDEV_RST);
2708 if (port->id.vdev_owned) {
2713 * DLB uses producer port address bits 17:12 to determine the
2714 * producer port ID. In Scalable IOV mode, PP accesses come
2715 * through the PF MMIO window for the physical producer port,
2716 * so for translation purposes the virtual and physical port
2719 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2720 virt_id = port->id.virt_id;
2722 virt_id = port->id.phys_id;
2724 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2727 DLB2_SYS_VF_LDB_VPP2PP(offs),
2728 DLB2_SYS_VF_LDB_VPP2PP_RST);
2731 DLB2_SYS_VF_LDB_VPP_V(offs),
2732 DLB2_SYS_VF_LDB_VPP_V_RST);
2736 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2737 DLB2_SYS_LDB_PP_V_RST);
2740 DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id),
2741 DLB2_LSP_CQ_LDB_DSBL_RST);
2744 DLB2_CHP_LDB_CQ_DEPTH(port->id.phys_id),
2745 DLB2_CHP_LDB_CQ_DEPTH_RST);
2748 DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id),
2749 DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2752 DLB2_CHP_HIST_LIST_LIM(port->id.phys_id),
2753 DLB2_CHP_HIST_LIST_LIM_RST);
2756 DLB2_CHP_HIST_LIST_BASE(port->id.phys_id),
2757 DLB2_CHP_HIST_LIST_BASE_RST);
2760 DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id),
2761 DLB2_CHP_HIST_LIST_POP_PTR_RST);
2764 DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
2765 DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2768 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id),
2769 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2772 DLB2_CHP_LDB_CQ_TMR_THRSH(port->id.phys_id),
2773 DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2776 DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
2777 DLB2_CHP_LDB_CQ_INT_ENB_RST);
2780 DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2781 DLB2_SYS_LDB_CQ_ISR_RST);
2784 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
2785 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2788 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2789 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2792 DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
2793 DLB2_CHP_LDB_CQ_WPTR_RST);
2796 DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
2797 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2800 DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2801 DLB2_SYS_LDB_CQ_ADDR_L_RST);
2804 DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2805 DLB2_SYS_LDB_CQ_ADDR_U_RST);
2808 DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2809 DLB2_SYS_LDB_CQ_AT_RST);
2812 DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
2813 DLB2_SYS_LDB_CQ_PASID_RST);
2816 DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2817 DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2820 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(port->id.phys_id),
2821 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2824 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(port->id.phys_id),
2825 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2828 DLB2_LSP_CQ2QID0(port->id.phys_id),
2829 DLB2_LSP_CQ2QID0_RST);
2832 DLB2_LSP_CQ2QID1(port->id.phys_id),
2833 DLB2_LSP_CQ2QID1_RST);
2836 DLB2_LSP_CQ2PRIOV(port->id.phys_id),
2837 DLB2_LSP_CQ2PRIOV_RST);
2840 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2841 struct dlb2_hw_domain *domain)
2843 struct dlb2_list_entry *iter;
2844 struct dlb2_ldb_port *port;
2848 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2849 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2850 __dlb2_domain_reset_ldb_port_registers(hw, port);
2855 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2856 struct dlb2_dir_pq_pair *port)
2859 DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
2860 DLB2_CHP_DIR_CQ2VAS_RST);
2863 DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id),
2864 DLB2_LSP_CQ_DIR_DSBL_RST);
2866 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2869 DLB2_CHP_DIR_CQ_DEPTH(port->id.phys_id),
2870 DLB2_CHP_DIR_CQ_DEPTH_RST);
2873 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id),
2874 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2877 DLB2_CHP_DIR_CQ_TMR_THRSH(port->id.phys_id),
2878 DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2881 DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
2882 DLB2_CHP_DIR_CQ_INT_ENB_RST);
2885 DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2886 DLB2_SYS_DIR_CQ_ISR_RST);
2889 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
2890 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2893 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
2894 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2897 DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
2898 DLB2_CHP_DIR_CQ_WPTR_RST);
2901 DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
2902 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2905 DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2906 DLB2_SYS_DIR_CQ_ADDR_L_RST);
2909 DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2910 DLB2_SYS_DIR_CQ_ADDR_U_RST);
2913 DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2914 DLB2_SYS_DIR_CQ_AT_RST);
2917 DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
2918 DLB2_SYS_DIR_CQ_PASID_RST);
2921 DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2922 DLB2_SYS_DIR_CQ_FMT_RST);
2925 DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2926 DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
2929 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(port->id.phys_id),
2930 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
2933 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(port->id.phys_id),
2934 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
2937 DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
2938 DLB2_SYS_DIR_PP2VAS_RST);
2941 DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
2942 DLB2_CHP_DIR_CQ2VAS_RST);
2945 DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
2946 DLB2_SYS_DIR_PP2VDEV_RST);
2948 if (port->id.vdev_owned) {
2953 * DLB uses producer port address bits 17:12 to determine the
2954 * producer port ID. In Scalable IOV mode, PP accesses come
2955 * through the PF MMIO window for the physical producer port,
2956 * so for translation purposes the virtual and physical port
2959 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2960 virt_id = port->id.virt_id;
2962 virt_id = port->id.phys_id;
2964 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
2967 DLB2_SYS_VF_DIR_VPP2PP(offs),
2968 DLB2_SYS_VF_DIR_VPP2PP_RST);
2971 DLB2_SYS_VF_DIR_VPP_V(offs),
2972 DLB2_SYS_VF_DIR_VPP_V_RST);
2976 DLB2_SYS_DIR_PP_V(port->id.phys_id),
2977 DLB2_SYS_DIR_PP_V_RST);
2980 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2981 struct dlb2_hw_domain *domain)
2983 struct dlb2_list_entry *iter;
2984 struct dlb2_dir_pq_pair *port;
2987 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
2988 __dlb2_domain_reset_dir_port_registers(hw, port);
2991 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
2992 struct dlb2_hw_domain *domain)
2994 struct dlb2_list_entry *iter;
2995 struct dlb2_ldb_queue *queue;
2998 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2999 unsigned int queue_id = queue->id.phys_id;
3003 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(queue_id),
3004 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3007 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(queue_id),
3008 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3011 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(queue_id),
3012 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3015 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(queue_id),
3016 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3019 DLB2_LSP_QID_NALDB_MAX_DEPTH(queue_id),
3020 DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3023 DLB2_LSP_QID_LDB_INFL_LIM(queue_id),
3024 DLB2_LSP_QID_LDB_INFL_LIM_RST);
3027 DLB2_LSP_QID_AQED_ACTIVE_LIM(queue_id),
3028 DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3031 DLB2_LSP_QID_ATM_DEPTH_THRSH(queue_id),
3032 DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3035 DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue_id),
3036 DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3039 DLB2_SYS_LDB_QID_ITS(queue_id),
3040 DLB2_SYS_LDB_QID_ITS_RST);
3043 DLB2_CHP_ORD_QID_SN(queue_id),
3044 DLB2_CHP_ORD_QID_SN_RST);
3047 DLB2_CHP_ORD_QID_SN_MAP(queue_id),
3048 DLB2_CHP_ORD_QID_SN_MAP_RST);
3051 DLB2_SYS_LDB_QID_V(queue_id),
3052 DLB2_SYS_LDB_QID_V_RST);
3055 DLB2_SYS_LDB_QID_CFG_V(queue_id),
3056 DLB2_SYS_LDB_QID_CFG_V_RST);
3058 if (queue->sn_cfg_valid) {
3061 offs[0] = DLB2_RO_PIPE_GRP_0_SLT_SHFT(queue->sn_slot);
3062 offs[1] = DLB2_RO_PIPE_GRP_1_SLT_SHFT(queue->sn_slot);
3065 offs[queue->sn_group],
3066 DLB2_RO_PIPE_GRP_0_SLT_SHFT_RST);
3069 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3071 DLB2_LSP_QID2CQIDIX(queue_id, i),
3072 DLB2_LSP_QID2CQIDIX_00_RST);
3075 DLB2_LSP_QID2CQIDIX2(queue_id, i),
3076 DLB2_LSP_QID2CQIDIX2_00_RST);
3079 DLB2_ATM_QID2CQIDIX(queue_id, i),
3080 DLB2_ATM_QID2CQIDIX_00_RST);
3085 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3086 struct dlb2_hw_domain *domain)
3088 struct dlb2_list_entry *iter;
3089 struct dlb2_dir_pq_pair *queue;
3092 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3094 DLB2_LSP_QID_DIR_MAX_DEPTH(queue->id.phys_id),
3095 DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3098 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(queue->id.phys_id),
3099 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3102 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(queue->id.phys_id),
3103 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3106 DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
3107 DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3110 DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3111 DLB2_SYS_DIR_QID_ITS_RST);
3114 DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3115 DLB2_SYS_DIR_QID_V_RST);
3119 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3120 struct dlb2_hw_domain *domain)
3122 dlb2_domain_reset_ldb_port_registers(hw, domain);
3124 dlb2_domain_reset_dir_port_registers(hw, domain);
3126 dlb2_domain_reset_ldb_queue_registers(hw, domain);
3128 dlb2_domain_reset_dir_queue_registers(hw, domain);
3131 DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3132 DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3135 DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3136 DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3139 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3140 struct dlb2_hw_domain *domain)
3142 struct dlb2_dir_pq_pair *tmp_dir_port;
3143 struct dlb2_ldb_queue *tmp_ldb_queue;
3144 struct dlb2_ldb_port *tmp_ldb_port;
3145 struct dlb2_list_entry *iter1;
3146 struct dlb2_list_entry *iter2;
3147 struct dlb2_function_resources *rsrcs;
3148 struct dlb2_dir_pq_pair *dir_port;
3149 struct dlb2_ldb_queue *ldb_queue;
3150 struct dlb2_ldb_port *ldb_port;
3151 struct dlb2_list_head *list;
3153 RTE_SET_USED(tmp_dir_port);
3154 RTE_SET_USED(tmp_ldb_queue);
3155 RTE_SET_USED(tmp_ldb_port);
3156 RTE_SET_USED(iter1);
3157 RTE_SET_USED(iter2);
3159 rsrcs = domain->parent_func;
3161 /* Move the domain's ldb queues to the function's avail list */
3162 list = &domain->used_ldb_queues;
3163 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3164 if (ldb_queue->sn_cfg_valid) {
3165 struct dlb2_sn_group *grp;
3167 grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3169 dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3170 ldb_queue->sn_cfg_valid = false;
3173 ldb_queue->owned = false;
3174 ldb_queue->num_mappings = 0;
3175 ldb_queue->num_pending_additions = 0;
3177 dlb2_list_del(&domain->used_ldb_queues,
3178 &ldb_queue->domain_list);
3179 dlb2_list_add(&rsrcs->avail_ldb_queues,
3180 &ldb_queue->func_list);
3181 rsrcs->num_avail_ldb_queues++;
3184 list = &domain->avail_ldb_queues;
3185 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3186 ldb_queue->owned = false;
3188 dlb2_list_del(&domain->avail_ldb_queues,
3189 &ldb_queue->domain_list);
3190 dlb2_list_add(&rsrcs->avail_ldb_queues,
3191 &ldb_queue->func_list);
3192 rsrcs->num_avail_ldb_queues++;
3195 /* Move the domain's ldb ports to the function's avail list */
3196 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3197 list = &domain->used_ldb_ports[i];
3198 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3202 ldb_port->owned = false;
3203 ldb_port->configured = false;
3204 ldb_port->num_pending_removals = 0;
3205 ldb_port->num_mappings = 0;
3206 ldb_port->init_tkn_cnt = 0;
3207 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3208 ldb_port->qid_map[j].state =
3209 DLB2_QUEUE_UNMAPPED;
3211 dlb2_list_del(&domain->used_ldb_ports[i],
3212 &ldb_port->domain_list);
3213 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3214 &ldb_port->func_list);
3215 rsrcs->num_avail_ldb_ports[i]++;
3218 list = &domain->avail_ldb_ports[i];
3219 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3221 ldb_port->owned = false;
3223 dlb2_list_del(&domain->avail_ldb_ports[i],
3224 &ldb_port->domain_list);
3225 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3226 &ldb_port->func_list);
3227 rsrcs->num_avail_ldb_ports[i]++;
3231 /* Move the domain's dir ports to the function's avail list */
3232 list = &domain->used_dir_pq_pairs;
3233 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3234 dir_port->owned = false;
3235 dir_port->port_configured = false;
3236 dir_port->init_tkn_cnt = 0;
3238 dlb2_list_del(&domain->used_dir_pq_pairs,
3239 &dir_port->domain_list);
3241 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3242 &dir_port->func_list);
3243 rsrcs->num_avail_dir_pq_pairs++;
3246 list = &domain->avail_dir_pq_pairs;
3247 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3248 dir_port->owned = false;
3250 dlb2_list_del(&domain->avail_dir_pq_pairs,
3251 &dir_port->domain_list);
3253 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3254 &dir_port->func_list);
3255 rsrcs->num_avail_dir_pq_pairs++;
3258 /* Return hist list entries to the function */
3259 ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3260 domain->hist_list_entry_base,
3261 domain->total_hist_list_entries);
3264 "[%s()] Internal error: domain hist list base doesn't match the function's bitmap.\n",
3269 domain->total_hist_list_entries = 0;
3270 domain->avail_hist_list_entries = 0;
3271 domain->hist_list_entry_base = 0;
3272 domain->hist_list_entry_offset = 0;
3274 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3275 domain->num_ldb_credits = 0;
3277 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3278 domain->num_dir_credits = 0;
3280 rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3281 rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3282 domain->num_avail_aqed_entries = 0;
3283 domain->num_used_aqed_entries = 0;
3285 domain->num_pending_removals = 0;
3286 domain->num_pending_additions = 0;
3287 domain->configured = false;
3288 domain->started = false;
3291 * Move the domain out of the used_domains list and back to the
3292 * function's avail_domains list.
3294 dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3295 dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3296 rsrcs->num_avail_domains++;
3301 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3302 struct dlb2_hw_domain *domain,
3303 struct dlb2_ldb_queue *queue)
3305 struct dlb2_ldb_port *port;
3308 /* If a domain has LDB queues, it must have LDB ports */
3309 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3310 if (!dlb2_list_empty(&domain->used_ldb_ports[i]))
3314 if (i == DLB2_NUM_COS_DOMAINS) {
3316 "[%s()] Internal error: No configured LDB ports\n",
3321 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i], typeof(*port));
3323 /* If necessary, free up a QID slot in this CQ */
3324 if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3325 struct dlb2_ldb_queue *mapped_queue;
3327 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3329 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3334 ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3338 return dlb2_domain_drain_mapped_queues(hw, domain);
3341 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3342 struct dlb2_hw_domain *domain)
3344 struct dlb2_list_entry *iter;
3345 struct dlb2_ldb_queue *queue;
3349 /* If the domain hasn't been started, there's no traffic to drain */
3350 if (!domain->started)
3354 * Pre-condition: the unattached queue must not have any outstanding
3355 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3356 * prior to this in dlb2_domain_drain_mapped_queues().
3358 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3359 if (queue->num_mappings != 0 ||
3360 dlb2_ldb_queue_is_empty(hw, queue))
3363 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3372 * dlb2_reset_domain() - Reset a DLB scheduling domain and its associated
3373 * hardware resources.
3374 * @hw: Contains the current state of the DLB2 hardware.
3375 * @domain_id: Domain ID
3376 * @vdev_req: Request came from a virtual device.
3377 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
3379 * Note: User software *must* stop sending to this domain's producer ports
3380 * before invoking this function, otherwise undefined behavior will result.
3382 * Return: returns < 0 on error, 0 otherwise.
3384 int dlb2_reset_domain(struct dlb2_hw *hw,
3387 unsigned int vdev_id)
3389 struct dlb2_hw_domain *domain;
3392 dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3394 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3396 if (domain == NULL || !domain->configured)
3401 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3403 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3406 /* Disable CQ interrupts */
3407 dlb2_domain_disable_dir_port_interrupts(hw, domain);
3409 dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3412 * For each queue owned by this domain, disable its write permissions to
3413 * cause any traffic sent to it to be dropped. Well-behaved software
3414 * should not be sending QEs at this point.
3416 dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3418 dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3420 /* Turn off completion tracking on all the domain's PPs. */
3421 dlb2_domain_disable_ldb_seq_checks(hw, domain);
3424 * Disable the LDB CQs and drain them in order to complete the map and
3425 * unmap procedures, which require zero CQ inflights and zero QID
3426 * inflights respectively.
3428 dlb2_domain_disable_ldb_cqs(hw, domain);
3430 ret = dlb2_domain_drain_ldb_cqs(hw, domain, false);
3434 ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3438 ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3442 ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3446 /* Re-enable the CQs in order to drain the mapped queues. */
3447 dlb2_domain_enable_ldb_cqs(hw, domain);
3449 ret = dlb2_domain_drain_mapped_queues(hw, domain);
3453 ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3457 /* Done draining LDB QEs, so disable the CQs. */
3458 dlb2_domain_disable_ldb_cqs(hw, domain);
3460 dlb2_domain_drain_dir_queues(hw, domain);
3462 /* Done draining DIR QEs, so disable the CQs. */
3463 dlb2_domain_disable_dir_cqs(hw, domain);
3466 dlb2_domain_disable_dir_producer_ports(hw, domain);
3468 dlb2_domain_disable_ldb_producer_ports(hw, domain);
3470 ret = dlb2_domain_verify_reset_success(hw, domain);
3474 /* Reset the QID and port state. */
3475 dlb2_domain_reset_registers(hw, domain);
3477 /* Hardware reset complete. Reset the domain's software state */
3478 ret = dlb2_domain_reset_software_state(hw, domain);
3485 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
3489 /* Finish queue unmap jobs for any domain that needs it */
3490 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
3491 struct dlb2_hw_domain *domain = &hw->domains[i];
3493 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3499 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
3503 /* Finish queue map jobs for any domain that needs it */
3504 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
3505 struct dlb2_hw_domain *domain = &hw->domains[i];
3507 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
3514 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3515 struct dlb2_hw_domain *domain,
3516 struct dlb2_ldb_queue *queue,
3517 struct dlb2_create_ldb_queue_args *args,
3519 unsigned int vdev_id)
3521 union dlb2_sys_vf_ldb_vqid_v r0 = { {0} };
3522 union dlb2_sys_vf_ldb_vqid2qid r1 = { {0} };
3523 union dlb2_sys_ldb_qid2vqid r2 = { {0} };
3524 union dlb2_sys_ldb_vasqid_v r3 = { {0} };
3525 union dlb2_lsp_qid_ldb_infl_lim r4 = { {0} };
3526 union dlb2_lsp_qid_aqed_active_lim r5 = { {0} };
3527 union dlb2_aqed_pipe_qid_hid_width r6 = { {0} };
3528 union dlb2_sys_ldb_qid_its r7 = { {0} };
3529 union dlb2_lsp_qid_atm_depth_thrsh r8 = { {0} };
3530 union dlb2_lsp_qid_naldb_depth_thrsh r9 = { {0} };
3531 union dlb2_aqed_pipe_qid_fid_lim r10 = { {0} };
3532 union dlb2_chp_ord_qid_sn_map r11 = { {0} };
3533 union dlb2_sys_ldb_qid_cfg_v r12 = { {0} };
3534 union dlb2_sys_ldb_qid_v r13 = { {0} };
3536 struct dlb2_sn_group *sn_group;
3539 /* QID write permissions are turned on when the domain is started */
3540 r3.field.vasqid_v = 0;
3542 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
3545 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r3.val);
3548 * Unordered QIDs get 4K inflights, ordered get as many as the number
3549 * of sequence numbers.
3551 r4.field.limit = args->num_qid_inflights;
3553 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r4.val);
3555 r5.field.limit = queue->aqed_limit;
3557 if (r5.field.limit > DLB2_MAX_NUM_AQED_ENTRIES)
3558 r5.field.limit = DLB2_MAX_NUM_AQED_ENTRIES;
3561 DLB2_LSP_QID_AQED_ACTIVE_LIM(queue->id.phys_id),
3564 switch (args->lock_id_comp_level) {
3566 r6.field.compress_code = 1;
3569 r6.field.compress_code = 2;
3572 r6.field.compress_code = 3;
3575 r6.field.compress_code = 4;
3578 r6.field.compress_code = 5;
3581 r6.field.compress_code = 6;
3584 r6.field.compress_code = 7;
3588 r6.field.compress_code = 0;
3592 DLB2_AQED_PIPE_QID_HID_WIDTH(queue->id.phys_id),
3595 /* Don't timestamp QEs that pass through this queue */
3596 r7.field.qid_its = 0;
3599 DLB2_SYS_LDB_QID_ITS(queue->id.phys_id),
3602 r8.field.thresh = args->depth_threshold;
3605 DLB2_LSP_QID_ATM_DEPTH_THRSH(queue->id.phys_id),
3608 r9.field.thresh = args->depth_threshold;
3611 DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue->id.phys_id),
3615 * This register limits the number of inflight flows a queue can have
3616 * at one time. It has an upper bound of 2048, but can be
3617 * over-subscribed. 512 is chosen so that a single queue doesn't use
3618 * the entire atomic storage, but can use a substantial portion if
3621 r10.field.qid_fid_limit = 512;
3624 DLB2_AQED_PIPE_QID_FID_LIM(queue->id.phys_id),
3628 sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3629 r11.field.mode = sn_group->mode;
3630 r11.field.slot = queue->sn_slot;
3631 r11.field.grp = sn_group->id;
3633 DLB2_CSR_WR(hw, DLB2_CHP_ORD_QID_SN_MAP(queue->id.phys_id), r11.val);
3635 r12.field.sn_cfg_v = (args->num_sequence_numbers != 0);
3636 r12.field.fid_cfg_v = (args->num_atomic_inflights != 0);
3638 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), r12.val);
3641 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3643 r0.field.vqid_v = 1;
3645 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), r0.val);
3647 r1.field.qid = queue->id.phys_id;
3649 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), r1.val);
3651 r2.field.vqid = queue->id.virt_id;
3654 DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
3658 r13.field.qid_v = 1;
3660 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), r13.val);
3664 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3665 struct dlb2_ldb_queue *queue,
3666 struct dlb2_create_ldb_queue_args *args)
3671 queue->sn_cfg_valid = false;
3673 if (args->num_sequence_numbers == 0)
3676 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3677 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3679 if (group->sequence_numbers_per_queue ==
3680 args->num_sequence_numbers &&
3681 !dlb2_sn_group_full(group)) {
3682 slot = dlb2_sn_group_alloc_slot(group);
3690 "[%s():%d] Internal error: no sequence number slots available\n",
3691 __func__, __LINE__);
3695 queue->sn_cfg_valid = true;
3696 queue->sn_group = i;
3697 queue->sn_slot = slot;
3702 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3703 struct dlb2_hw_domain *domain,
3704 struct dlb2_ldb_queue *queue,
3705 struct dlb2_create_ldb_queue_args *args)
3709 ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3713 /* Attach QID inflights */
3714 queue->num_qid_inflights = args->num_qid_inflights;
3716 /* Attach atomic inflights */
3717 queue->aqed_limit = args->num_atomic_inflights;
3719 domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3720 domain->num_used_aqed_entries += args->num_atomic_inflights;
3726 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3728 struct dlb2_create_ldb_queue_args *args,
3729 struct dlb2_cmd_response *resp,
3731 unsigned int vdev_id)
3733 struct dlb2_hw_domain *domain;
3736 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3738 if (domain == NULL) {
3739 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3743 if (!domain->configured) {
3744 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3748 if (domain->started) {
3749 resp->status = DLB2_ST_DOMAIN_STARTED;
3753 if (dlb2_list_empty(&domain->avail_ldb_queues)) {
3754 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3758 if (args->num_sequence_numbers) {
3759 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3760 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3762 if (group->sequence_numbers_per_queue ==
3763 args->num_sequence_numbers &&
3764 !dlb2_sn_group_full(group))
3768 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3769 resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3774 if (args->num_qid_inflights > 4096) {
3775 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3779 /* Inflights must be <= number of sequence numbers if ordered */
3780 if (args->num_sequence_numbers != 0 &&
3781 args->num_qid_inflights > args->num_sequence_numbers) {
3782 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3786 if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3787 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3791 if (args->num_atomic_inflights &&
3792 args->lock_id_comp_level != 0 &&
3793 args->lock_id_comp_level != 64 &&
3794 args->lock_id_comp_level != 128 &&
3795 args->lock_id_comp_level != 256 &&
3796 args->lock_id_comp_level != 512 &&
3797 args->lock_id_comp_level != 1024 &&
3798 args->lock_id_comp_level != 2048 &&
3799 args->lock_id_comp_level != 4096 &&
3800 args->lock_id_comp_level != 65536) {
3801 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3809 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3811 struct dlb2_create_ldb_queue_args *args,
3813 unsigned int vdev_id)
3815 DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3817 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3818 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
3820 DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3821 args->num_sequence_numbers);
3822 DLB2_HW_DBG(hw, "\tNumber of QID inflights: %d\n",
3823 args->num_qid_inflights);
3824 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
3825 args->num_atomic_inflights);
3829 * dlb2_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.
3830 * @hw: Contains the current state of the DLB2 hardware.
3831 * @domain_id: Domain ID
3832 * @args: User-provided arguments.
3833 * @resp: Response to user.
3834 * @vdev_req: Request came from a virtual device.
3835 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
3837 * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
3838 * satisfy a request, resp->status will be set accordingly.
3840 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3842 struct dlb2_create_ldb_queue_args *args,
3843 struct dlb2_cmd_response *resp,
3845 unsigned int vdev_id)
3847 struct dlb2_hw_domain *domain;
3848 struct dlb2_ldb_queue *queue;
3851 dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3854 * Verify that hardware resources are available before attempting to
3855 * satisfy the request. This simplifies the error unwinding code.
3857 ret = dlb2_verify_create_ldb_queue_args(hw,
3866 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3867 if (domain == NULL) {
3869 "[%s():%d] Internal error: domain not found\n",
3870 __func__, __LINE__);
3874 queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3875 if (queue == NULL) {
3877 "[%s():%d] Internal error: no available ldb queues\n",
3878 __func__, __LINE__);
3882 ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3885 "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3886 __func__, __LINE__);
3890 dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3892 queue->num_mappings = 0;
3894 queue->configured = true;
3897 * Configuration succeeded, so move the resource from the 'avail' to
3900 dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
3902 dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
3905 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
3910 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id)
3912 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
3915 return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
3918 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw,
3919 unsigned int group_id)
3921 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
3924 return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
3927 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
3928 unsigned int group_id,
3931 DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
3932 DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
3933 DLB2_HW_DBG(hw, "\tValue: %lu\n", val);
3936 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
3937 unsigned int group_id,
3940 u32 valid_allocations[] = {64, 128, 256, 512, 1024};
3941 union dlb2_ro_pipe_grp_sn_mode r0 = { {0} };
3942 struct dlb2_sn_group *group;
3945 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
3948 group = &hw->rsrcs.sn_groups[group_id];
3951 * Once the first load-balanced queue using an SN group is configured,
3952 * the group cannot be changed.
3954 if (group->slot_use_bitmap != 0)
3957 for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
3958 if (val == valid_allocations[mode])
3961 if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
3965 group->sequence_numbers_per_queue = val;
3967 r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
3968 r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
3970 DLB2_CSR_WR(hw, DLB2_RO_PIPE_GRP_SN_MODE, r0.val);
3972 dlb2_log_set_group_sequence_numbers(hw, group_id, val);
3977 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
3978 struct dlb2_hw_domain *domain,
3979 struct dlb2_ldb_port *port,
3981 unsigned int vdev_id)
3983 union dlb2_sys_ldb_pp2vas r0 = { {0} };
3984 union dlb2_sys_ldb_pp_v r4 = { {0} };
3986 r0.field.vas = domain->id.phys_id;
3988 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), r0.val);
3991 union dlb2_sys_vf_ldb_vpp2pp r1 = { {0} };
3992 union dlb2_sys_ldb_pp2vdev r2 = { {0} };
3993 union dlb2_sys_vf_ldb_vpp_v r3 = { {0} };
3998 * DLB uses producer port address bits 17:12 to determine the
3999 * producer port ID. In Scalable IOV mode, PP accesses come
4000 * through the PF MMIO window for the physical producer port,
4001 * so for translation purposes the virtual and physical port
4004 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4005 virt_id = port->id.virt_id;
4007 virt_id = port->id.phys_id;
4009 r1.field.pp = port->id.phys_id;
4011 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4013 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), r1.val);
4015 r2.field.vdev = vdev_id;
4018 DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
4023 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r3.val);
4029 DLB2_SYS_LDB_PP_V(port->id.phys_id),
4033 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4034 struct dlb2_hw_domain *domain,
4035 struct dlb2_ldb_port *port,
4036 uintptr_t cq_dma_base,
4037 struct dlb2_create_ldb_port_args *args,
4039 unsigned int vdev_id)
4041 union dlb2_sys_ldb_cq_addr_l r0 = { {0} };
4042 union dlb2_sys_ldb_cq_addr_u r1 = { {0} };
4043 union dlb2_sys_ldb_cq2vf_pf_ro r2 = { {0} };
4044 union dlb2_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
4045 union dlb2_lsp_cq_ldb_tkn_depth_sel r4 = { {0} };
4046 union dlb2_chp_hist_list_lim r5 = { {0} };
4047 union dlb2_chp_hist_list_base r6 = { {0} };
4048 union dlb2_lsp_cq_ldb_infl_lim r7 = { {0} };
4049 union dlb2_chp_hist_list_push_ptr r8 = { {0} };
4050 union dlb2_chp_hist_list_pop_ptr r9 = { {0} };
4051 union dlb2_sys_ldb_cq_at r10 = { {0} };
4052 union dlb2_sys_ldb_cq_pasid r11 = { {0} };
4053 union dlb2_chp_ldb_cq2vas r12 = { {0} };
4054 union dlb2_lsp_cq2priov r13 = { {0} };
4056 /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4057 r0.field.addr_l = cq_dma_base >> 6;
4059 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), r0.val);
4061 r1.field.addr_u = cq_dma_base >> 32;
4063 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), r1.val);
4066 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4067 * cache lines out-of-order (but QEs within a cache line are always
4068 * updated in-order).
4070 r2.field.vf = vdev_id;
4071 r2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);
4074 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), r2.val);
4076 if (args->cq_depth <= 8) {
4077 r3.field.token_depth_select = 1;
4078 } else if (args->cq_depth == 16) {
4079 r3.field.token_depth_select = 2;
4080 } else if (args->cq_depth == 32) {
4081 r3.field.token_depth_select = 3;
4082 } else if (args->cq_depth == 64) {
4083 r3.field.token_depth_select = 4;
4084 } else if (args->cq_depth == 128) {
4085 r3.field.token_depth_select = 5;
4086 } else if (args->cq_depth == 256) {
4087 r3.field.token_depth_select = 6;
4088 } else if (args->cq_depth == 512) {
4089 r3.field.token_depth_select = 7;
4090 } else if (args->cq_depth == 1024) {
4091 r3.field.token_depth_select = 8;
4094 "[%s():%d] Internal error: invalid CQ depth\n",
4095 __func__, __LINE__);
4100 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
4104 * To support CQs with depth less than 8, program the token count
4105 * register with a non-zero initial value. Operations such as domain
4106 * reset must take this initial value into account when quiescing the
4109 port->init_tkn_cnt = 0;
4111 if (args->cq_depth < 8) {
4112 union dlb2_lsp_cq_ldb_tkn_cnt r14 = { {0} };
4114 port->init_tkn_cnt = 8 - args->cq_depth;
4116 r14.field.token_count = port->init_tkn_cnt;
4119 DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
4123 DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
4124 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4127 r4.field.token_depth_select = r3.field.token_depth_select;
4128 r4.field.ignore_depth = 0;
4131 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
4134 /* Reset the CQ write pointer */
4136 DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
4137 DLB2_CHP_LDB_CQ_WPTR_RST);
4139 r5.field.limit = port->hist_list_entry_limit - 1;
4141 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(port->id.phys_id), r5.val);
4143 r6.field.base = port->hist_list_entry_base;
4145 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_BASE(port->id.phys_id), r6.val);
4148 * The inflight limit sets a cap on the number of QEs for which this CQ
4149 * can owe completions at one time.
4151 r7.field.limit = args->cq_history_list_size;
4153 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id), r7.val);
4155 r8.field.push_ptr = r6.field.base;
4156 r8.field.generation = 0;
4159 DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
4162 r9.field.pop_ptr = r6.field.base;
4163 r9.field.generation = 0;
4165 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id), r9.val);
4168 * Address translation (AT) settings: 0: untranslated, 2: translated
4169 * (see ATS spec regarding Address Type field for more details)
4171 r10.field.cq_at = 0;
4173 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), r10.val);
4175 if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4176 r11.field.pasid = hw->pasid[vdev_id];
4181 DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
4184 r12.field.cq2vas = domain->id.phys_id;
4186 DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(port->id.phys_id), r12.val);
4188 /* Disable the port's QID mappings */
4191 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r13.val);
4196 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4197 struct dlb2_hw_domain *domain,
4198 struct dlb2_ldb_port *port,
4199 uintptr_t cq_dma_base,
4200 struct dlb2_create_ldb_port_args *args,
4202 unsigned int vdev_id)
4206 port->hist_list_entry_base = domain->hist_list_entry_base +
4207 domain->hist_list_entry_offset;
4208 port->hist_list_entry_limit = port->hist_list_entry_base +
4209 args->cq_history_list_size;
4211 domain->hist_list_entry_offset += args->cq_history_list_size;
4212 domain->avail_hist_list_entries -= args->cq_history_list_size;
4214 ret = dlb2_ldb_port_configure_cq(hw,
4224 dlb2_ldb_port_configure_pp(hw,
4230 dlb2_ldb_port_cq_enable(hw, port);
4232 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4233 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4234 port->num_mappings = 0;
4236 port->enabled = true;
4238 port->configured = true;
4244 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4246 uintptr_t cq_dma_base,
4247 struct dlb2_create_ldb_port_args *args,
4249 unsigned int vdev_id)
4251 DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4253 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4254 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
4256 DLB2_HW_DBG(hw, "\tCQ depth: %d\n",
4258 DLB2_HW_DBG(hw, "\tCQ hist list size: %d\n",
4259 args->cq_history_list_size);
4260 DLB2_HW_DBG(hw, "\tCQ base address: 0x%lx\n",
4262 DLB2_HW_DBG(hw, "\tCoS ID: %u\n", args->cos_id);
4263 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %u\n",
4268 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4270 uintptr_t cq_dma_base,
4271 struct dlb2_create_ldb_port_args *args,
4272 struct dlb2_cmd_response *resp,
4274 unsigned int vdev_id)
4276 struct dlb2_hw_domain *domain;
4279 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4281 if (domain == NULL) {
4282 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4286 if (!domain->configured) {
4287 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4291 if (domain->started) {
4292 resp->status = DLB2_ST_DOMAIN_STARTED;
4296 if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4297 resp->status = DLB2_ST_INVALID_COS_ID;
4301 if (args->cos_strict) {
4302 if (dlb2_list_empty(&domain->avail_ldb_ports[args->cos_id])) {
4303 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4307 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4308 if (!dlb2_list_empty(&domain->avail_ldb_ports[i]))
4312 if (i == DLB2_NUM_COS_DOMAINS) {
4313 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4318 /* Check cache-line alignment */
4319 if ((cq_dma_base & 0x3F) != 0) {
4320 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4324 if (args->cq_depth != 1 &&
4325 args->cq_depth != 2 &&
4326 args->cq_depth != 4 &&
4327 args->cq_depth != 8 &&
4328 args->cq_depth != 16 &&
4329 args->cq_depth != 32 &&
4330 args->cq_depth != 64 &&
4331 args->cq_depth != 128 &&
4332 args->cq_depth != 256 &&
4333 args->cq_depth != 512 &&
4334 args->cq_depth != 1024) {
4335 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4339 /* The history list size must be >= 1 */
4340 if (!args->cq_history_list_size) {
4341 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4345 if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4346 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4355 * dlb2_hw_create_ldb_port() - Allocate and initialize a load-balanced port and
4357 * @hw: Contains the current state of the DLB2 hardware.
4358 * @domain_id: Domain ID
4359 * @args: User-provided arguments.
4360 * @cq_dma_base: Base DMA address for consumer queue memory
4361 * @resp: Response to user.
4362 * @vdev_req: Request came from a virtual device.
4363 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
4365 * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
4366 * satisfy a request, resp->status will be set accordingly.
4368 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4370 struct dlb2_create_ldb_port_args *args,
4371 uintptr_t cq_dma_base,
4372 struct dlb2_cmd_response *resp,
4374 unsigned int vdev_id)
4376 struct dlb2_hw_domain *domain;
4377 struct dlb2_ldb_port *port;
4380 dlb2_log_create_ldb_port_args(hw,
4388 * Verify that hardware resources are available before attempting to
4389 * satisfy the request. This simplifies the error unwinding code.
4391 ret = dlb2_verify_create_ldb_port_args(hw,
4401 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4402 if (domain == NULL) {
4404 "[%s():%d] Internal error: domain not found\n",
4405 __func__, __LINE__);
4409 if (args->cos_strict) {
4410 cos_id = args->cos_id;
4412 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[cos_id],
4417 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4418 idx = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4420 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[idx],
4431 "[%s():%d] Internal error: no available ldb ports\n",
4432 __func__, __LINE__);
4436 if (port->configured) {
4438 "[%s()] Internal error: avail_ldb_ports contains configured ports.\n",
4443 ret = dlb2_configure_ldb_port(hw,
4454 * Configuration succeeded, so move the resource from the 'avail' to
4457 dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4459 dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4462 resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4468 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4470 uintptr_t cq_dma_base,
4471 struct dlb2_create_dir_port_args *args,
4473 unsigned int vdev_id)
4475 DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4477 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4478 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
4480 DLB2_HW_DBG(hw, "\tCQ depth: %d\n",
4482 DLB2_HW_DBG(hw, "\tCQ base address: 0x%lx\n",
4486 static struct dlb2_dir_pq_pair *
4487 dlb2_get_domain_used_dir_pq(u32 id,
4489 struct dlb2_hw_domain *domain)
4491 struct dlb2_list_entry *iter;
4492 struct dlb2_dir_pq_pair *port;
4495 if (id >= DLB2_MAX_NUM_DIR_PORTS)
4498 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
4499 if ((!vdev_req && port->id.phys_id == id) ||
4500 (vdev_req && port->id.virt_id == id))
4507 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4509 uintptr_t cq_dma_base,
4510 struct dlb2_create_dir_port_args *args,
4511 struct dlb2_cmd_response *resp,
4513 unsigned int vdev_id)
4515 struct dlb2_hw_domain *domain;
4517 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4519 if (domain == NULL) {
4520 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4524 if (!domain->configured) {
4525 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4529 if (domain->started) {
4530 resp->status = DLB2_ST_DOMAIN_STARTED;
4535 * If the user claims the queue is already configured, validate
4536 * the queue ID, its domain, and whether the queue is configured.
4538 if (args->queue_id != -1) {
4539 struct dlb2_dir_pq_pair *queue;
4541 queue = dlb2_get_domain_used_dir_pq(args->queue_id,
4545 if (queue == NULL || queue->domain_id.phys_id !=
4546 domain->id.phys_id ||
4547 !queue->queue_configured) {
4548 resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4554 * If the port's queue is not configured, validate that a free
4555 * port-queue pair is available.
4557 if (args->queue_id == -1 &&
4558 dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
4559 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4563 /* Check cache-line alignment */
4564 if ((cq_dma_base & 0x3F) != 0) {
4565 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4569 if (args->cq_depth != 1 &&
4570 args->cq_depth != 2 &&
4571 args->cq_depth != 4 &&
4572 args->cq_depth != 8 &&
4573 args->cq_depth != 16 &&
4574 args->cq_depth != 32 &&
4575 args->cq_depth != 64 &&
4576 args->cq_depth != 128 &&
4577 args->cq_depth != 256 &&
4578 args->cq_depth != 512 &&
4579 args->cq_depth != 1024) {
4580 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4587 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4588 struct dlb2_hw_domain *domain,
4589 struct dlb2_dir_pq_pair *port,
4591 unsigned int vdev_id)
4593 union dlb2_sys_dir_pp2vas r0 = { {0} };
4594 union dlb2_sys_dir_pp_v r4 = { {0} };
4596 r0.field.vas = domain->id.phys_id;
4598 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), r0.val);
4601 union dlb2_sys_vf_dir_vpp2pp r1 = { {0} };
4602 union dlb2_sys_dir_pp2vdev r2 = { {0} };
4603 union dlb2_sys_vf_dir_vpp_v r3 = { {0} };
4608 * DLB uses producer port address bits 17:12 to determine the
4609 * producer port ID. In Scalable IOV mode, PP accesses come
4610 * through the PF MMIO window for the physical producer port,
4611 * so for translation purposes the virtual and physical port
4614 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4615 virt_id = port->id.virt_id;
4617 virt_id = port->id.phys_id;
4619 r1.field.pp = port->id.phys_id;
4621 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
4623 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), r1.val);
4625 r2.field.vdev = vdev_id;
4628 DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
4633 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r3.val);
4639 DLB2_SYS_DIR_PP_V(port->id.phys_id),
4643 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4644 struct dlb2_hw_domain *domain,
4645 struct dlb2_dir_pq_pair *port,
4646 uintptr_t cq_dma_base,
4647 struct dlb2_create_dir_port_args *args,
4649 unsigned int vdev_id)
4651 union dlb2_sys_dir_cq_addr_l r0 = { {0} };
4652 union dlb2_sys_dir_cq_addr_u r1 = { {0} };
4653 union dlb2_sys_dir_cq2vf_pf_ro r2 = { {0} };
4654 union dlb2_chp_dir_cq_tkn_depth_sel r3 = { {0} };
4655 union dlb2_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
4656 union dlb2_sys_dir_cq_fmt r9 = { {0} };
4657 union dlb2_sys_dir_cq_at r10 = { {0} };
4658 union dlb2_sys_dir_cq_pasid r11 = { {0} };
4659 union dlb2_chp_dir_cq2vas r12 = { {0} };
4661 /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4662 r0.field.addr_l = cq_dma_base >> 6;
4664 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), r0.val);
4666 r1.field.addr_u = cq_dma_base >> 32;
4668 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), r1.val);
4671 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4672 * cache lines out-of-order (but QEs within a cache line are always
4673 * updated in-order).
4675 r2.field.vf = vdev_id;
4676 r2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);
4679 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), r2.val);
4681 if (args->cq_depth <= 8) {
4682 r3.field.token_depth_select = 1;
4683 } else if (args->cq_depth == 16) {
4684 r3.field.token_depth_select = 2;
4685 } else if (args->cq_depth == 32) {
4686 r3.field.token_depth_select = 3;
4687 } else if (args->cq_depth == 64) {
4688 r3.field.token_depth_select = 4;
4689 } else if (args->cq_depth == 128) {
4690 r3.field.token_depth_select = 5;
4691 } else if (args->cq_depth == 256) {
4692 r3.field.token_depth_select = 6;
4693 } else if (args->cq_depth == 512) {
4694 r3.field.token_depth_select = 7;
4695 } else if (args->cq_depth == 1024) {
4696 r3.field.token_depth_select = 8;
4699 "[%s():%d] Internal error: invalid CQ depth\n",
4700 __func__, __LINE__);
4705 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
4709 * To support CQs with depth less than 8, program the token count
4710 * register with a non-zero initial value. Operations such as domain
4711 * reset must take this initial value into account when quiescing the
4714 port->init_tkn_cnt = 0;
4716 if (args->cq_depth < 8) {
4717 union dlb2_lsp_cq_dir_tkn_cnt r13 = { {0} };
4719 port->init_tkn_cnt = 8 - args->cq_depth;
4721 r13.field.count = port->init_tkn_cnt;
4724 DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
4728 DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
4729 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4732 r4.field.token_depth_select = r3.field.token_depth_select;
4733 r4.field.disable_wb_opt = 0;
4734 r4.field.ignore_depth = 0;
4737 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
4740 /* Reset the CQ write pointer */
4742 DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
4743 DLB2_CHP_DIR_CQ_WPTR_RST);
4745 /* Virtualize the PPID */
4746 r9.field.keep_pf_ppid = 0;
4748 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), r9.val);
4751 * Address translation (AT) settings: 0: untranslated, 2: translated
4752 * (see ATS spec regarding Address Type field for more details)
4754 r10.field.cq_at = 0;
4756 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), r10.val);
4758 if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4759 r11.field.pasid = hw->pasid[vdev_id];
4764 DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
4767 r12.field.cq2vas = domain->id.phys_id;
4769 DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(port->id.phys_id), r12.val);
4774 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
4775 struct dlb2_hw_domain *domain,
4776 struct dlb2_dir_pq_pair *port,
4777 uintptr_t cq_dma_base,
4778 struct dlb2_create_dir_port_args *args,
4780 unsigned int vdev_id)
4784 ret = dlb2_dir_port_configure_cq(hw,
4795 dlb2_dir_port_configure_pp(hw,
4801 dlb2_dir_port_cq_enable(hw, port);
4803 port->enabled = true;
4805 port->port_configured = true;
4811 * dlb2_hw_create_dir_port() - Allocate and initialize a DLB directed port
4812 * and queue. The port/queue pair have the same ID and name.
4813 * @hw: Contains the current state of the DLB2 hardware.
4814 * @domain_id: Domain ID
4815 * @args: User-provided arguments.
4816 * @cq_dma_base: Base DMA address for consumer queue memory
4817 * @resp: Response to user.
4818 * @vdev_req: Request came from a virtual device.
4819 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
4821 * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
4822 * satisfy a request, resp->status will be set accordingly.
4824 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
4826 struct dlb2_create_dir_port_args *args,
4827 uintptr_t cq_dma_base,
4828 struct dlb2_cmd_response *resp,
4830 unsigned int vdev_id)
4832 struct dlb2_dir_pq_pair *port;
4833 struct dlb2_hw_domain *domain;
4836 dlb2_log_create_dir_port_args(hw,
4844 * Verify that hardware resources are available before attempting to
4845 * satisfy the request. This simplifies the error unwinding code.
4847 ret = dlb2_verify_create_dir_port_args(hw,
4857 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4859 if (args->queue_id != -1)
4860 port = dlb2_get_domain_used_dir_pq(args->queue_id,
4864 port = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4868 "[%s():%d] Internal error: no available dir ports\n",
4869 __func__, __LINE__);
4873 ret = dlb2_configure_dir_port(hw,
4884 * Configuration succeeded, so move the resource from the 'avail' to
4885 * the 'used' list (if it's not already there).
4887 if (args->queue_id == -1) {
4888 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
4890 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
4894 resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4899 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
4900 struct dlb2_hw_domain *domain,
4901 struct dlb2_dir_pq_pair *queue,
4902 struct dlb2_create_dir_queue_args *args,
4904 unsigned int vdev_id)
4906 union dlb2_sys_dir_vasqid_v r0 = { {0} };
4907 union dlb2_sys_dir_qid_its r1 = { {0} };
4908 union dlb2_lsp_qid_dir_depth_thrsh r2 = { {0} };
4909 union dlb2_sys_dir_qid_v r5 = { {0} };
4913 /* QID write permissions are turned on when the domain is started */
4914 r0.field.vasqid_v = 0;
4916 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES +
4919 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
4921 /* Don't timestamp QEs that pass through this queue */
4922 r1.field.qid_its = 0;
4925 DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
4928 r2.field.thresh = args->depth_threshold;
4931 DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
4935 union dlb2_sys_vf_dir_vqid_v r3 = { {0} };
4936 union dlb2_sys_vf_dir_vqid2qid r4 = { {0} };
4938 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES + queue->id.virt_id;
4940 r3.field.vqid_v = 1;
4942 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), r3.val);
4944 r4.field.qid = queue->id.phys_id;
4946 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), r4.val);
4951 DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), r5.val);
4953 queue->queue_configured = true;
4957 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
4959 struct dlb2_create_dir_queue_args *args,
4961 unsigned int vdev_id)
4963 DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
4965 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4966 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
4967 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
4971 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
4973 struct dlb2_create_dir_queue_args *args,
4974 struct dlb2_cmd_response *resp,
4976 unsigned int vdev_id)
4978 struct dlb2_hw_domain *domain;
4980 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4982 if (domain == NULL) {
4983 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4987 if (!domain->configured) {
4988 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4992 if (domain->started) {
4993 resp->status = DLB2_ST_DOMAIN_STARTED;
4998 * If the user claims the port is already configured, validate the port
4999 * ID, its domain, and whether the port is configured.
5001 if (args->port_id != -1) {
5002 struct dlb2_dir_pq_pair *port;
5004 port = dlb2_get_domain_used_dir_pq(args->port_id,
5008 if (port == NULL || port->domain_id.phys_id !=
5009 domain->id.phys_id || !port->port_configured) {
5010 resp->status = DLB2_ST_INVALID_PORT_ID;
5016 * If the queue's port is not configured, validate that a free
5017 * port-queue pair is available.
5019 if (args->port_id == -1 &&
5020 dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
5021 resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
5029 * dlb2_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
5030 * @hw: Contains the current state of the DLB2 hardware.
5031 * @domain_id: Domain ID
5032 * @args: User-provided arguments.
5033 * @resp: Response to user.
5034 * @vdev_req: Request came from a virtual device.
5035 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
5037 * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
5038 * satisfy a request, resp->status will be set accordingly.
5040 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5042 struct dlb2_create_dir_queue_args *args,
5043 struct dlb2_cmd_response *resp,
5045 unsigned int vdev_id)
5047 struct dlb2_dir_pq_pair *queue;
5048 struct dlb2_hw_domain *domain;
5051 dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5054 * Verify that hardware resources are available before attempting to
5055 * satisfy the request. This simplifies the error unwinding code.
5057 ret = dlb2_verify_create_dir_queue_args(hw,
5066 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5067 if (domain == NULL) {
5069 "[%s():%d] Internal error: domain not found\n",
5070 __func__, __LINE__);
5074 if (args->port_id != -1)
5075 queue = dlb2_get_domain_used_dir_pq(args->port_id,
5079 queue = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
5081 if (queue == NULL) {
5083 "[%s():%d] Internal error: no available dir queues\n",
5084 __func__, __LINE__);
5088 dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5091 * Configuration succeeded, so move the resource from the 'avail' to
5092 * the 'used' list (if it's not already there).
5094 if (args->port_id == -1) {
5095 dlb2_list_del(&domain->avail_dir_pq_pairs,
5096 &queue->domain_list);
5098 dlb2_list_add(&domain->used_dir_pq_pairs,
5099 &queue->domain_list);
5104 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5110 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5111 struct dlb2_ldb_queue *queue,
5116 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5117 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5119 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5120 map->pending_qid == queue->id.phys_id)
5126 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5129 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5130 struct dlb2_ldb_port *port,
5132 struct dlb2_map_qid_args *args)
5134 union dlb2_lsp_cq2priov r0;
5136 /* Read-modify-write the priority and valid bit register */
5137 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id));
5139 r0.field.v |= 1 << slot;
5140 r0.field.prio |= (args->priority & 0x7) << slot * 3;
5142 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r0.val);
5146 port->qid_map[slot].priority = args->priority;
5149 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5150 struct dlb2_ldb_queue *queue,
5151 struct dlb2_cmd_response *resp)
5153 enum dlb2_qid_map_state state;
5156 /* Unused slot available? */
5157 if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5161 * If the queue is already mapped (from the application's perspective),
5162 * this is simply a priority update.
5164 state = DLB2_QUEUE_MAPPED;
5165 if (dlb2_port_find_slot_queue(port, state, queue, &i))
5168 state = DLB2_QUEUE_MAP_IN_PROG;
5169 if (dlb2_port_find_slot_queue(port, state, queue, &i))
5172 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5176 * If the slot contains an unmap in progress, it's considered
5179 state = DLB2_QUEUE_UNMAP_IN_PROG;
5180 if (dlb2_port_find_slot(port, state, &i))
5183 state = DLB2_QUEUE_UNMAPPED;
5184 if (dlb2_port_find_slot(port, state, &i))
5187 resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5191 static struct dlb2_ldb_queue *
5192 dlb2_get_domain_ldb_queue(u32 id,
5194 struct dlb2_hw_domain *domain)
5196 struct dlb2_list_entry *iter;
5197 struct dlb2_ldb_queue *queue;
5200 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5203 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
5204 if ((!vdev_req && queue->id.phys_id == id) ||
5205 (vdev_req && queue->id.virt_id == id))
5211 static struct dlb2_ldb_port *
5212 dlb2_get_domain_used_ldb_port(u32 id,
5214 struct dlb2_hw_domain *domain)
5216 struct dlb2_list_entry *iter;
5217 struct dlb2_ldb_port *port;
5221 if (id >= DLB2_MAX_NUM_LDB_PORTS)
5224 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5225 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
5226 if ((!vdev_req && port->id.phys_id == id) ||
5227 (vdev_req && port->id.virt_id == id))
5230 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter)
5231 if ((!vdev_req && port->id.phys_id == id) ||
5232 (vdev_req && port->id.virt_id == id))
5239 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5241 struct dlb2_map_qid_args *args,
5242 struct dlb2_cmd_response *resp,
5244 unsigned int vdev_id)
5246 struct dlb2_hw_domain *domain;
5247 struct dlb2_ldb_port *port;
5248 struct dlb2_ldb_queue *queue;
5251 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5253 if (domain == NULL) {
5254 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5258 if (!domain->configured) {
5259 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5265 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5267 if (port == NULL || !port->configured) {
5268 resp->status = DLB2_ST_INVALID_PORT_ID;
5272 if (args->priority >= DLB2_QID_PRIORITIES) {
5273 resp->status = DLB2_ST_INVALID_PRIORITY;
5277 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5279 if (queue == NULL || !queue->configured) {
5280 resp->status = DLB2_ST_INVALID_QID;
5284 if (queue->domain_id.phys_id != domain->id.phys_id) {
5285 resp->status = DLB2_ST_INVALID_QID;
5289 if (port->domain_id.phys_id != domain->id.phys_id) {
5290 resp->status = DLB2_ST_INVALID_PORT_ID;
5297 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5299 struct dlb2_map_qid_args *args,
5301 unsigned int vdev_id)
5303 DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5305 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5306 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5308 DLB2_HW_DBG(hw, "\tPort ID: %d\n",
5310 DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
5312 DLB2_HW_DBG(hw, "\tPriority: %d\n",
5316 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5318 struct dlb2_map_qid_args *args,
5319 struct dlb2_cmd_response *resp,
5321 unsigned int vdev_id)
5323 struct dlb2_hw_domain *domain;
5324 struct dlb2_ldb_queue *queue;
5325 enum dlb2_qid_map_state st;
5326 struct dlb2_ldb_port *port;
5330 dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5333 * Verify that hardware resources are available before attempting to
5334 * satisfy the request. This simplifies the error unwinding code.
5336 ret = dlb2_verify_map_qid_args(hw,
5345 prio = args->priority;
5347 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5348 if (domain == NULL) {
5350 "[%s():%d] Internal error: domain not found\n",
5351 __func__, __LINE__);
5357 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5360 "[%s():%d] Internal error: port not found\n",
5361 __func__, __LINE__);
5365 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5366 if (queue == NULL) {
5368 "[%s():%d] Internal error: queue not found\n",
5369 __func__, __LINE__);
5374 * If there are any outstanding detach operations for this port,
5375 * attempt to complete them. This may be necessary to free up a QID
5376 * slot for this requested mapping.
5378 if (port->num_pending_removals)
5379 dlb2_domain_finish_unmap_port(hw, domain, port);
5381 ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5385 /* Hardware requires disabling the CQ before mapping QIDs. */
5387 dlb2_ldb_port_cq_disable(hw, port);
5390 * If this is only a priority change, don't perform the full QID->CQ
5393 st = DLB2_QUEUE_MAPPED;
5394 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5395 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5397 "[%s():%d] Internal error: port slot tracking failed\n",
5398 __func__, __LINE__);
5402 if (prio != port->qid_map[i].priority) {
5403 dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5404 DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5407 st = DLB2_QUEUE_MAPPED;
5408 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5415 st = DLB2_QUEUE_UNMAP_IN_PROG;
5416 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5417 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5419 "[%s():%d] Internal error: port slot tracking failed\n",
5420 __func__, __LINE__);
5424 if (prio != port->qid_map[i].priority) {
5425 dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5426 DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5429 st = DLB2_QUEUE_MAPPED;
5430 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5438 * If this is a priority change on an in-progress mapping, don't
5439 * perform the full QID->CQ mapping procedure.
5441 st = DLB2_QUEUE_MAP_IN_PROG;
5442 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5443 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5445 "[%s():%d] Internal error: port slot tracking failed\n",
5446 __func__, __LINE__);
5450 port->qid_map[i].priority = prio;
5452 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5458 * If this is a priority change on a pending mapping, update the
5461 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5462 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5464 "[%s():%d] Internal error: port slot tracking failed\n",
5465 __func__, __LINE__);
5469 port->qid_map[i].pending_priority = prio;
5471 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5477 * If all the CQ's slots are in use, then there's an unmap in progress
5478 * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5479 * mapping to pending_map and return. When the removal is completed for
5480 * the slot's current occupant, this mapping will be performed.
5482 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5483 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5484 enum dlb2_qid_map_state st;
5486 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5488 "[%s():%d] Internal error: port slot tracking failed\n",
5489 __func__, __LINE__);
5493 port->qid_map[i].pending_qid = queue->id.phys_id;
5494 port->qid_map[i].pending_priority = prio;
5496 st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5498 ret = dlb2_port_slot_state_transition(hw, port, queue,
5503 DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5510 * If the domain has started, a special "dynamic" CQ->queue mapping
5511 * procedure is required in order to safely update the CQ<->QID tables.
5512 * The "static" procedure cannot be used when traffic is flowing,
5513 * because the CQ<->QID tables cannot be updated atomically and the
5514 * scheduler won't see the new mapping unless the queue's if_status
5515 * changes, which isn't guaranteed.
5517 ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5519 /* If ret is less than zero, it's due to an internal error */
5525 dlb2_ldb_port_cq_enable(hw, port);
5532 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5534 struct dlb2_unmap_qid_args *args,
5536 unsigned int vdev_id)
5538 DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5540 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5541 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5543 DLB2_HW_DBG(hw, "\tPort ID: %d\n",
5545 DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
5547 if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5548 DLB2_HW_DBG(hw, "\tQueue's num mappings: %d\n",
5549 hw->rsrcs.ldb_queues[args->qid].num_mappings);
5552 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5554 struct dlb2_unmap_qid_args *args,
5555 struct dlb2_cmd_response *resp,
5557 unsigned int vdev_id)
5559 enum dlb2_qid_map_state state;
5560 struct dlb2_hw_domain *domain;
5561 struct dlb2_ldb_queue *queue;
5562 struct dlb2_ldb_port *port;
5566 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5568 if (domain == NULL) {
5569 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5573 if (!domain->configured) {
5574 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5580 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5582 if (port == NULL || !port->configured) {
5583 resp->status = DLB2_ST_INVALID_PORT_ID;
5587 if (port->domain_id.phys_id != domain->id.phys_id) {
5588 resp->status = DLB2_ST_INVALID_PORT_ID;
5592 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5594 if (queue == NULL || !queue->configured) {
5595 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5596 __func__, args->qid);
5597 resp->status = DLB2_ST_INVALID_QID;
5602 * Verify that the port has the queue mapped. From the application's
5603 * perspective a queue is mapped if it is actually mapped, the map is
5604 * in progress, or the map is blocked pending an unmap.
5606 state = DLB2_QUEUE_MAPPED;
5607 if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5610 state = DLB2_QUEUE_MAP_IN_PROG;
5611 if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5614 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5617 resp->status = DLB2_ST_INVALID_QID;
5621 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5623 struct dlb2_unmap_qid_args *args,
5624 struct dlb2_cmd_response *resp,
5626 unsigned int vdev_id)
5628 struct dlb2_hw_domain *domain;
5629 struct dlb2_ldb_queue *queue;
5630 enum dlb2_qid_map_state st;
5631 struct dlb2_ldb_port *port;
5632 bool unmap_complete;
5635 dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5638 * Verify that hardware resources are available before attempting to
5639 * satisfy the request. This simplifies the error unwinding code.
5641 ret = dlb2_verify_unmap_qid_args(hw,
5650 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5651 if (domain == NULL) {
5653 "[%s():%d] Internal error: domain not found\n",
5654 __func__, __LINE__);
5660 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5663 "[%s():%d] Internal error: port not found\n",
5664 __func__, __LINE__);
5668 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5669 if (queue == NULL) {
5671 "[%s():%d] Internal error: queue not found\n",
5672 __func__, __LINE__);
5677 * If the queue hasn't been mapped yet, we need to update the slot's
5678 * state and re-enable the queue's inflights.
5680 st = DLB2_QUEUE_MAP_IN_PROG;
5681 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5682 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5684 "[%s():%d] Internal error: port slot tracking failed\n",
5685 __func__, __LINE__);
5690 * Since the in-progress map was aborted, re-enable the QID's
5693 if (queue->num_pending_additions == 0)
5694 dlb2_ldb_queue_set_inflight_limit(hw, queue);
5696 st = DLB2_QUEUE_UNMAPPED;
5697 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5701 goto unmap_qid_done;
5705 * If the queue mapping is on hold pending an unmap, we simply need to
5706 * update the slot's state.
5708 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5709 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5711 "[%s():%d] Internal error: port slot tracking failed\n",
5712 __func__, __LINE__);
5716 st = DLB2_QUEUE_UNMAP_IN_PROG;
5717 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5721 goto unmap_qid_done;
5724 st = DLB2_QUEUE_MAPPED;
5725 if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5727 "[%s()] Internal error: no available CQ slots\n",
5732 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
5734 "[%s():%d] Internal error: port slot tracking failed\n",
5735 __func__, __LINE__);
5740 * QID->CQ mapping removal is an asynchronous procedure. It requires
5741 * stopping the DLB2 from scheduling this CQ, draining all inflights
5742 * from the CQ, then unmapping the queue from the CQ. This function
5743 * simply marks the port as needing the queue unmapped, and (if
5744 * necessary) starts the unmapping worker thread.
5746 dlb2_ldb_port_cq_disable(hw, port);
5748 st = DLB2_QUEUE_UNMAP_IN_PROG;
5749 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5754 * Attempt to finish the unmapping now, in case the port has no
5755 * outstanding inflights. If that's not the case, this will fail and
5756 * the unmapping will be completed at a later time.
5758 unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
5761 * If the unmapping couldn't complete immediately, launch the worker
5762 * thread (if it isn't already launched) to finish it later.
5764 if (!unmap_complete && !os_worker_active(hw))
5765 os_schedule_work(hw);
5774 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
5775 struct dlb2_pending_port_unmaps_args *args,
5777 unsigned int vdev_id)
5779 DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
5781 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
5782 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
5785 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
5787 struct dlb2_pending_port_unmaps_args *args,
5788 struct dlb2_cmd_response *resp,
5790 unsigned int vdev_id)
5792 struct dlb2_hw_domain *domain;
5793 struct dlb2_ldb_port *port;
5795 dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
5797 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5799 if (domain == NULL) {
5800 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5804 port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
5805 if (port == NULL || !port->configured) {
5806 resp->status = DLB2_ST_INVALID_PORT_ID;
5810 resp->id = port->num_pending_removals;
5815 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
5817 struct dlb2_cmd_response *resp,
5819 unsigned int vdev_id)
5821 struct dlb2_hw_domain *domain;
5823 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5825 if (domain == NULL) {
5826 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5830 if (!domain->configured) {
5831 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5835 if (domain->started) {
5836 resp->status = DLB2_ST_DOMAIN_STARTED;
5843 static void dlb2_log_start_domain(struct dlb2_hw *hw,
5846 unsigned int vdev_id)
5848 DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
5850 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5851 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5855 * dlb2_hw_start_domain() - Lock the domain configuration
5856 * @hw: Contains the current state of the DLB2 hardware.
5857 * @domain_id: Domain ID
5858 * @arg: User-provided arguments (unused, here for ioctl callback template).
5859 * @resp: Response to user.
5860 * @vdev_req: Request came from a virtual device.
5861 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
5863 * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
5864 * satisfy a request, resp->status will be set accordingly.
5867 dlb2_hw_start_domain(struct dlb2_hw *hw,
5869 __attribute((unused)) struct dlb2_start_domain_args *arg,
5870 struct dlb2_cmd_response *resp,
5872 unsigned int vdev_id)
5874 struct dlb2_list_entry *iter;
5875 struct dlb2_dir_pq_pair *dir_queue;
5876 struct dlb2_ldb_queue *ldb_queue;
5877 struct dlb2_hw_domain *domain;
5882 dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
5884 ret = dlb2_verify_start_domain_args(hw,
5892 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5893 if (domain == NULL) {
5895 "[%s():%d] Internal error: domain not found\n",
5896 __func__, __LINE__);
5901 * Enable load-balanced and directed queue write permissions for the
5902 * queues this domain owns. Without this, the DLB2 will drop all
5903 * incoming traffic to those queues.
5905 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
5906 union dlb2_sys_ldb_vasqid_v r0 = { {0} };
5909 r0.field.vasqid_v = 1;
5911 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
5912 ldb_queue->id.phys_id;
5914 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val);
5917 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
5918 union dlb2_sys_dir_vasqid_v r0 = { {0} };
5921 r0.field.vasqid_v = 1;
5923 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS +
5924 dir_queue->id.phys_id;
5926 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
5931 domain->started = true;