1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18 DLB2_LIST_HEAD((head), type, domain_list)
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21 DLB2_LIST_HEAD((head), type, func_list)
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24 DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27 DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
36 * The PF driver cannot assume that a register write will affect subsequent HCW
37 * writes. To ensure a write completes, the driver must read back a CSR. This
38 * function only need be called for configuration that can occur after the
39 * domain has started; prior to starting, applications can't send HCWs.
41 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
43 DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
46 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
50 dlb2_list_init_head(&domain->used_ldb_queues);
51 dlb2_list_init_head(&domain->used_dir_pq_pairs);
52 dlb2_list_init_head(&domain->avail_ldb_queues);
53 dlb2_list_init_head(&domain->avail_dir_pq_pairs);
55 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
56 dlb2_list_init_head(&domain->used_ldb_ports[i]);
57 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
58 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
61 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
64 dlb2_list_init_head(&rsrc->avail_domains);
65 dlb2_list_init_head(&rsrc->used_domains);
66 dlb2_list_init_head(&rsrc->avail_ldb_queues);
67 dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
69 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
70 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
74 * dlb2_resource_free() - free device state memory
75 * @hw: dlb2_hw handle for a particular device.
77 * This function frees software state pointed to by dlb2_hw. This function
78 * should be called when resetting the device or unloading the driver.
80 void dlb2_resource_free(struct dlb2_hw *hw)
84 if (hw->pf.avail_hist_list_entries)
85 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
87 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
88 if (hw->vdev[i].avail_hist_list_entries)
89 dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
94 * dlb2_resource_init() - initialize the device
95 * @hw: pointer to struct dlb2_hw.
96 * @ver: device version.
98 * This function initializes the device's software state (pointed to by the hw
99 * argument) and programs global scheduling QoS registers. This function should
100 * be called during driver initialization, and the dlb2_hw structure should
101 * be zero-initialized before calling the function.
103 * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
107 * Returns 0 upon success, <0 otherwise.
109 int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
111 struct dlb2_list_entry *list;
116 * For optimal load-balancing, ports that map to one or more QIDs in
117 * common should not be in numerical sequence. The port->QID mapping is
118 * application dependent, but the driver interleaves port IDs as much
119 * as possible to reduce the likelihood of sequential ports mapping to
120 * the same QID(s). This initial allocation of port IDs maximizes the
121 * average distance between an ID and its immediate neighbors (i.e.
122 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
125 const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
126 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,
127 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
128 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
129 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
134 dlb2_init_fn_rsrc_lists(&hw->pf);
136 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
137 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
139 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
140 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
141 hw->domains[i].parent_func = &hw->pf;
144 /* Give all resources to the PF driver */
145 hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
146 for (i = 0; i < hw->pf.num_avail_domains; i++) {
147 list = &hw->domains[i].func_list;
149 dlb2_list_add(&hw->pf.avail_domains, list);
152 hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
153 for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
154 list = &hw->rsrcs.ldb_queues[i].func_list;
156 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
159 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
160 hw->pf.num_avail_ldb_ports[i] =
161 DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
163 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
164 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
165 struct dlb2_ldb_port *port;
167 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
169 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
173 hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
174 for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
175 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
177 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
180 if (hw->ver == DLB2_HW_V2) {
181 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
182 hw->pf.num_avail_dqed_entries =
183 DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
185 hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
188 hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
190 ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
191 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
195 ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
199 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
200 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
201 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
205 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
210 /* Initialize the hardware resource IDs */
211 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
212 hw->domains[i].id.phys_id = i;
213 hw->domains[i].id.vdev_owned = false;
216 for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
217 hw->rsrcs.ldb_queues[i].id.phys_id = i;
218 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
221 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
222 hw->rsrcs.ldb_ports[i].id.phys_id = i;
223 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
226 for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
227 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
228 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
231 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
232 hw->rsrcs.sn_groups[i].id = i;
233 /* Default mode (0) is 64 sequence numbers per queue */
234 hw->rsrcs.sn_groups[i].mode = 0;
235 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
236 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
239 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
240 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
245 dlb2_resource_free(hw);
251 * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
252 * @hw: dlb2_hw handle for a particular device.
253 * @ver: device version.
255 * Clearing the PMCSR must be done at initialization to make the device fully
258 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
262 pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
264 DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
266 DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
270 * dlb2_hw_get_num_resources() - query the PCI function's available resources
271 * @hw: dlb2_hw handle for a particular device.
272 * @arg: pointer to resource counts.
273 * @vdev_req: indicates whether this request came from a vdev.
274 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
276 * This function returns the number of available resources for the PF or for a
279 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
283 * Returns 0 upon success, -EINVAL if vdev_req is true and vdev_id is
286 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
287 struct dlb2_get_num_resources_args *arg,
289 unsigned int vdev_id)
291 struct dlb2_function_resources *rsrcs;
292 struct dlb2_bitmap *map;
295 if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
299 rsrcs = &hw->vdev[vdev_id];
303 arg->num_sched_domains = rsrcs->num_avail_domains;
305 arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
307 arg->num_ldb_ports = 0;
308 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
309 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
311 arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
312 arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
313 arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
314 arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
316 arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
318 arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
320 map = rsrcs->avail_hist_list_entries;
322 arg->num_hist_list_entries = dlb2_bitmap_count(map);
324 arg->max_contiguous_hist_list_entries =
325 dlb2_bitmap_longest_set_range(map);
327 if (hw->ver == DLB2_HW_V2) {
328 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
329 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
331 arg->num_credits = rsrcs->num_avail_entries;
336 static void dlb2_configure_domain_credits_v2_5(struct dlb2_hw *hw,
337 struct dlb2_hw_domain *domain)
341 DLB2_BITS_SET(reg, domain->num_credits, DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
342 DLB2_CSR_WR(hw, DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id), reg);
345 static void dlb2_configure_domain_credits_v2(struct dlb2_hw *hw,
346 struct dlb2_hw_domain *domain)
350 DLB2_BITS_SET(reg, domain->num_ldb_credits,
351 DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
352 DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), reg);
355 DLB2_BITS_SET(reg, domain->num_dir_credits,
356 DLB2_CHP_CFG_DIR_VAS_CRD_COUNT);
357 DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), reg);
360 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
361 struct dlb2_hw_domain *domain)
363 if (hw->ver == DLB2_HW_V2)
364 dlb2_configure_domain_credits_v2(hw, domain);
366 dlb2_configure_domain_credits_v2_5(hw, domain);
369 static int dlb2_attach_credits(struct dlb2_function_resources *rsrcs,
370 struct dlb2_hw_domain *domain,
372 struct dlb2_cmd_response *resp)
374 if (rsrcs->num_avail_entries < num_credits) {
375 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
379 rsrcs->num_avail_entries -= num_credits;
380 domain->num_credits += num_credits;
384 static struct dlb2_ldb_port *
385 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
386 struct dlb2_function_resources *rsrcs,
390 struct dlb2_list_entry *iter;
391 struct dlb2_ldb_port *port;
395 * To reduce the odds of consecutive load-balanced ports mapping to the
396 * same queue(s), the driver attempts to allocate ports whose neighbors
397 * are owned by a different domain.
399 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
403 phys_id = port->id.phys_id;
407 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
410 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
412 if (!hw->rsrcs.ldb_ports[next].owned ||
413 hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
416 if (!hw->rsrcs.ldb_ports[prev].owned ||
417 hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
424 * Failing that, the driver looks for a port with one neighbor owned by
425 * a different domain and the other unallocated.
427 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
431 phys_id = port->id.phys_id;
435 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
438 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
440 if (!hw->rsrcs.ldb_ports[prev].owned &&
441 hw->rsrcs.ldb_ports[next].owned &&
442 hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
445 if (!hw->rsrcs.ldb_ports[next].owned &&
446 hw->rsrcs.ldb_ports[prev].owned &&
447 hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
452 * Failing that, the driver looks for a port with both neighbors
455 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
459 phys_id = port->id.phys_id;
463 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
466 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
468 if (!hw->rsrcs.ldb_ports[prev].owned &&
469 !hw->rsrcs.ldb_ports[next].owned)
473 /* If all else fails, the driver returns the next available port. */
474 return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
478 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
479 struct dlb2_function_resources *rsrcs,
480 struct dlb2_hw_domain *domain,
483 struct dlb2_cmd_response *resp)
487 if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
488 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
492 for (i = 0; i < num_ports; i++) {
493 struct dlb2_ldb_port *port;
495 port = dlb2_get_next_ldb_port(hw, rsrcs,
496 domain->id.phys_id, cos_id);
499 "[%s()] Internal error: domain validation failed\n",
504 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
507 port->domain_id = domain->id;
510 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
514 rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
520 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
521 struct dlb2_function_resources *rsrcs,
522 struct dlb2_hw_domain *domain,
523 struct dlb2_create_sched_domain_args *args,
524 struct dlb2_cmd_response *resp)
529 if (args->cos_strict) {
530 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
531 u32 num = args->num_cos_ldb_ports[i];
533 /* Allocate ports from specific classes-of-service */
534 ret = __dlb2_attach_ldb_ports(hw,
548 * Attempt to allocate from specific class-of-service, but
549 * fallback to the other classes if that fails.
551 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
552 for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
553 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
554 cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
556 ret = __dlb2_attach_ldb_ports(hw,
572 /* Allocate num_ldb_ports from any class-of-service */
573 for (i = 0; i < args->num_ldb_ports; i++) {
574 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
575 ret = __dlb2_attach_ldb_ports(hw,
592 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
593 struct dlb2_function_resources *rsrcs,
594 struct dlb2_hw_domain *domain,
596 struct dlb2_cmd_response *resp)
600 if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
601 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
605 for (i = 0; i < num_ports; i++) {
606 struct dlb2_dir_pq_pair *port;
608 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
612 "[%s()] Internal error: domain validation failed\n",
617 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
619 port->domain_id = domain->id;
622 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
625 rsrcs->num_avail_dir_pq_pairs -= num_ports;
630 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
631 struct dlb2_hw_domain *domain,
633 struct dlb2_cmd_response *resp)
635 if (rsrcs->num_avail_qed_entries < num_credits) {
636 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
640 rsrcs->num_avail_qed_entries -= num_credits;
641 domain->num_ldb_credits += num_credits;
645 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
646 struct dlb2_hw_domain *domain,
648 struct dlb2_cmd_response *resp)
650 if (rsrcs->num_avail_dqed_entries < num_credits) {
651 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
655 rsrcs->num_avail_dqed_entries -= num_credits;
656 domain->num_dir_credits += num_credits;
661 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
662 struct dlb2_hw_domain *domain,
663 u32 num_atomic_inflights,
664 struct dlb2_cmd_response *resp)
666 if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
667 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
671 rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
672 domain->num_avail_aqed_entries += num_atomic_inflights;
677 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
678 struct dlb2_hw_domain *domain,
679 u32 num_hist_list_entries,
680 struct dlb2_cmd_response *resp)
682 struct dlb2_bitmap *bitmap;
685 if (num_hist_list_entries) {
686 bitmap = rsrcs->avail_hist_list_entries;
688 base = dlb2_bitmap_find_set_bit_range(bitmap,
689 num_hist_list_entries);
693 domain->total_hist_list_entries = num_hist_list_entries;
694 domain->avail_hist_list_entries = num_hist_list_entries;
695 domain->hist_list_entry_base = base;
696 domain->hist_list_entry_offset = 0;
698 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
703 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
707 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
708 struct dlb2_function_resources *rsrcs,
709 struct dlb2_hw_domain *domain,
711 struct dlb2_cmd_response *resp)
715 if (rsrcs->num_avail_ldb_queues < num_queues) {
716 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
720 for (i = 0; i < num_queues; i++) {
721 struct dlb2_ldb_queue *queue;
723 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
727 "[%s()] Internal error: domain validation failed\n",
732 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
734 queue->domain_id = domain->id;
737 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
740 rsrcs->num_avail_ldb_queues -= num_queues;
746 dlb2_domain_attach_resources(struct dlb2_hw *hw,
747 struct dlb2_function_resources *rsrcs,
748 struct dlb2_hw_domain *domain,
749 struct dlb2_create_sched_domain_args *args,
750 struct dlb2_cmd_response *resp)
754 ret = dlb2_attach_ldb_queues(hw,
757 args->num_ldb_queues,
762 ret = dlb2_attach_ldb_ports(hw,
770 ret = dlb2_attach_dir_ports(hw,
778 if (hw->ver == DLB2_HW_V2) {
779 ret = dlb2_attach_ldb_credits(rsrcs,
781 args->num_ldb_credits,
786 ret = dlb2_attach_dir_credits(rsrcs,
788 args->num_dir_credits,
792 } else { /* DLB 2.5 */
793 ret = dlb2_attach_credits(rsrcs,
801 ret = dlb2_attach_domain_hist_list_entries(rsrcs,
803 args->num_hist_list_entries,
808 ret = dlb2_attach_atomic_inflights(rsrcs,
810 args->num_atomic_inflights,
815 dlb2_configure_domain_credits(hw, domain);
817 domain->configured = true;
819 domain->started = false;
821 rsrcs->num_avail_domains--;
827 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
828 struct dlb2_create_sched_domain_args *args,
829 struct dlb2_cmd_response *resp,
831 struct dlb2_hw_domain **out_domain)
833 u32 num_avail_ldb_ports, req_ldb_ports;
834 struct dlb2_bitmap *avail_hl_entries;
835 unsigned int max_contig_hl_range;
836 struct dlb2_hw_domain *domain;
839 avail_hl_entries = rsrcs->avail_hist_list_entries;
841 max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
843 num_avail_ldb_ports = 0;
845 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
846 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
848 req_ldb_ports += args->num_cos_ldb_ports[i];
851 req_ldb_ports += args->num_ldb_ports;
853 if (rsrcs->num_avail_domains < 1) {
854 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
858 domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
859 if (domain == NULL) {
860 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
864 if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
865 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
869 if (req_ldb_ports > num_avail_ldb_ports) {
870 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
874 for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
875 if (args->num_cos_ldb_ports[i] >
876 rsrcs->num_avail_ldb_ports[i]) {
877 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
882 if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
883 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
887 if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
888 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
891 if (hw->ver == DLB2_HW_V2_5) {
892 if (rsrcs->num_avail_entries < args->num_credits) {
893 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
897 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
898 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
901 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
902 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
907 if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
908 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
912 if (max_contig_hl_range < args->num_hist_list_entries) {
913 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
917 *out_domain = domain;
923 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
924 struct dlb2_create_sched_domain_args *args,
926 unsigned int vdev_id)
928 DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
930 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
931 DLB2_HW_DBG(hw, "\tNumber of LDB queues: %d\n",
932 args->num_ldb_queues);
933 DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
934 args->num_ldb_ports);
935 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0): %d\n",
936 args->num_cos_ldb_ports[0]);
937 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1): %d\n",
938 args->num_cos_ldb_ports[1]);
939 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2): %d\n",
940 args->num_cos_ldb_ports[2]);
941 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3): %d\n",
942 args->num_cos_ldb_ports[3]);
943 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %d\n",
945 DLB2_HW_DBG(hw, "\tNumber of DIR ports: %d\n",
946 args->num_dir_ports);
947 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
948 args->num_atomic_inflights);
949 DLB2_HW_DBG(hw, "\tNumber of hist list entries: %d\n",
950 args->num_hist_list_entries);
951 if (hw->ver == DLB2_HW_V2) {
952 DLB2_HW_DBG(hw, "\tNumber of LDB credits: %d\n",
953 args->num_ldb_credits);
954 DLB2_HW_DBG(hw, "\tNumber of DIR credits: %d\n",
955 args->num_dir_credits);
957 DLB2_HW_DBG(hw, "\tNumber of credits: %d\n",
963 * dlb2_hw_create_sched_domain() - create a scheduling domain
964 * @hw: dlb2_hw handle for a particular device.
965 * @args: scheduling domain creation arguments.
966 * @resp: response structure.
967 * @vdev_req: indicates whether this request came from a vdev.
968 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
970 * This function creates a scheduling domain containing the resources specified
971 * in args. The individual resources (queues, ports, credits) can be configured
972 * after creating a scheduling domain.
974 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
978 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
979 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
980 * contains the domain ID.
982 * resp->id contains a virtual ID if vdev_req is true.
985 * EINVAL - A requested resource is unavailable, or the requested domain name
987 * EFAULT - Internal error (resp->status not set).
989 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
990 struct dlb2_create_sched_domain_args *args,
991 struct dlb2_cmd_response *resp,
993 unsigned int vdev_id)
995 struct dlb2_function_resources *rsrcs;
996 struct dlb2_hw_domain *domain;
999 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1001 dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
1004 * Verify that hardware resources are available before attempting to
1005 * satisfy the request. This simplifies the error unwinding code.
1007 ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp, hw, &domain);
1011 dlb2_init_domain_rsrc_lists(domain);
1013 ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
1016 "[%s()] Internal error: failed to verify args.\n",
1022 dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
1024 dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
1026 resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
1032 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
1033 struct dlb2_dir_pq_pair *port)
1037 DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
1038 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1043 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
1044 struct dlb2_dir_pq_pair *port)
1048 cnt = DLB2_CSR_RD(hw,
1049 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
1052 * Account for the initial token count, which is used in order to
1053 * provide a CQ with depth less than 8.
1056 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
1060 static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
1061 struct dlb2_dir_pq_pair *port)
1063 unsigned int port_id = port->id.phys_id;
1066 /* Return any outstanding tokens */
1067 cnt = dlb2_dir_cq_token_count(hw, port);
1070 struct dlb2_hcw hcw_mem[8], *hcw;
1071 void __iomem *pp_addr;
1073 pp_addr = os_map_producer_port(hw, port_id, false);
1075 /* Point hcw to a 64B-aligned location */
1076 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1079 * Program the first HCW for a batch token return and
1082 memset(hcw, 0, 4 * sizeof(*hcw));
1084 hcw->lock_id = cnt - 1;
1086 dlb2_movdir64b(pp_addr, hcw);
1088 os_fence_hcw(hw, pp_addr);
1090 os_unmap_producer_port(hw, pp_addr);
1094 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1095 struct dlb2_dir_pq_pair *port)
1099 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1104 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1105 struct dlb2_hw_domain *domain,
1108 struct dlb2_list_entry *iter;
1109 struct dlb2_dir_pq_pair *port;
1112 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1114 * Can't drain a port if it's not configured, and there's
1115 * nothing to drain if its queue is unconfigured.
1117 if (!port->port_configured || !port->queue_configured)
1121 dlb2_dir_port_cq_disable(hw, port);
1123 dlb2_drain_dir_cq(hw, port);
1126 dlb2_dir_port_cq_enable(hw, port);
1132 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1133 struct dlb2_dir_pq_pair *queue)
1137 cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
1138 queue->id.phys_id));
1140 return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
1143 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1144 struct dlb2_dir_pq_pair *queue)
1146 return dlb2_dir_queue_depth(hw, queue) == 0;
1149 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1150 struct dlb2_hw_domain *domain)
1152 struct dlb2_list_entry *iter;
1153 struct dlb2_dir_pq_pair *queue;
1156 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1157 if (!dlb2_dir_queue_is_empty(hw, queue))
1163 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1164 struct dlb2_hw_domain *domain)
1168 /* If the domain hasn't been started, there's no traffic to drain */
1169 if (!domain->started)
1172 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1173 dlb2_domain_drain_dir_cqs(hw, domain, true);
1175 if (dlb2_domain_dir_queues_empty(hw, domain))
1179 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1181 "[%s()] Internal error: failed to empty queues\n",
1187 * Drain the CQs one more time. For the queues to go empty, they would
1188 * have scheduled one or more QEs.
1190 dlb2_domain_drain_dir_cqs(hw, domain, true);
1195 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1196 struct dlb2_ldb_port *port)
1201 * Don't re-enable the port if a removal is pending. The caller should
1202 * mark this port as enabled (if it isn't already), and when the
1203 * removal completes the port will be enabled.
1205 if (port->num_pending_removals)
1208 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1213 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1214 struct dlb2_ldb_port *port)
1218 DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
1219 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1224 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1225 struct dlb2_ldb_port *port)
1229 cnt = DLB2_CSR_RD(hw,
1230 DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
1232 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
1235 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1236 struct dlb2_ldb_port *port)
1240 cnt = DLB2_CSR_RD(hw,
1241 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
1244 * Account for the initial token count, which is used in order to
1245 * provide a CQ with depth less than 8.
1248 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
1252 static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1254 u32 infl_cnt, tkn_cnt;
1257 infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1258 tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1260 if (infl_cnt || tkn_cnt) {
1261 struct dlb2_hcw hcw_mem[8], *hcw;
1262 void __iomem *pp_addr;
1264 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1266 /* Point hcw to a 64B-aligned location */
1267 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1270 * Program the first HCW for a completion and token return and
1271 * the other HCWs as NOOPS
1274 memset(hcw, 0, 4 * sizeof(*hcw));
1275 hcw->qe_comp = (infl_cnt > 0);
1276 hcw->cq_token = (tkn_cnt > 0);
1277 hcw->lock_id = tkn_cnt - 1;
1279 /* Return tokens in the first HCW */
1280 dlb2_movdir64b(pp_addr, hcw);
1284 /* Issue remaining completions (if any) */
1285 for (i = 1; i < infl_cnt; i++)
1286 dlb2_movdir64b(pp_addr, hcw);
1288 os_fence_hcw(hw, pp_addr);
1290 os_unmap_producer_port(hw, pp_addr);
1294 static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1295 struct dlb2_hw_domain *domain,
1298 struct dlb2_list_entry *iter;
1299 struct dlb2_ldb_port *port;
1303 /* If the domain hasn't been started, there's no traffic to drain */
1304 if (!domain->started)
1307 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1308 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1310 dlb2_ldb_port_cq_disable(hw, port);
1312 dlb2_drain_ldb_cq(hw, port);
1315 dlb2_ldb_port_cq_enable(hw, port);
1320 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1321 struct dlb2_ldb_queue *queue)
1325 aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1326 queue->id.phys_id));
1327 ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1328 queue->id.phys_id));
1329 atm = DLB2_CSR_RD(hw,
1330 DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
1332 return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
1333 + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
1334 + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
1337 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1338 struct dlb2_ldb_queue *queue)
1340 return dlb2_ldb_queue_depth(hw, queue) == 0;
1343 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1344 struct dlb2_hw_domain *domain)
1346 struct dlb2_list_entry *iter;
1347 struct dlb2_ldb_queue *queue;
1350 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1351 if (queue->num_mappings == 0)
1354 if (!dlb2_ldb_queue_is_empty(hw, queue))
1361 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1362 struct dlb2_hw_domain *domain)
1366 /* If the domain hasn't been started, there's no traffic to drain */
1367 if (!domain->started)
1370 if (domain->num_pending_removals > 0) {
1372 "[%s()] Internal error: failed to unmap domain queues\n",
1377 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1378 dlb2_domain_drain_ldb_cqs(hw, domain, true);
1380 if (dlb2_domain_mapped_queues_empty(hw, domain))
1384 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1386 "[%s()] Internal error: failed to empty queues\n",
1392 * Drain the CQs one more time. For the queues to go empty, they would
1393 * have scheduled one or more QEs.
1395 dlb2_domain_drain_ldb_cqs(hw, domain, true);
1400 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1401 struct dlb2_hw_domain *domain)
1403 struct dlb2_list_entry *iter;
1404 struct dlb2_ldb_port *port;
1408 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1409 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1410 port->enabled = true;
1412 dlb2_ldb_port_cq_enable(hw, port);
1417 static struct dlb2_ldb_queue *
1418 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1421 unsigned int vdev_id)
1423 struct dlb2_list_entry *iter1;
1424 struct dlb2_list_entry *iter2;
1425 struct dlb2_function_resources *rsrcs;
1426 struct dlb2_hw_domain *domain;
1427 struct dlb2_ldb_queue *queue;
1428 RTE_SET_USED(iter1);
1429 RTE_SET_USED(iter2);
1431 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1434 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1437 return &hw->rsrcs.ldb_queues[id];
1439 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1440 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
1441 if (queue->id.virt_id == id)
1446 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
1447 if (queue->id.virt_id == id)
1454 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1457 unsigned int vdev_id)
1459 struct dlb2_list_entry *iteration;
1460 struct dlb2_function_resources *rsrcs;
1461 struct dlb2_hw_domain *domain;
1462 RTE_SET_USED(iteration);
1464 if (id >= DLB2_MAX_NUM_DOMAINS)
1468 return &hw->domains[id];
1470 rsrcs = &hw->vdev[vdev_id];
1472 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
1473 if (domain->id.virt_id == id)
1480 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1481 struct dlb2_ldb_port *port,
1482 struct dlb2_ldb_queue *queue,
1484 enum dlb2_qid_map_state new_state)
1486 enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1487 struct dlb2_hw_domain *domain;
1490 domain_id = port->domain_id.phys_id;
1492 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1493 if (domain == NULL) {
1495 "[%s()] Internal error: unable to find domain %d\n",
1496 __func__, domain_id);
1500 switch (curr_state) {
1501 case DLB2_QUEUE_UNMAPPED:
1502 switch (new_state) {
1503 case DLB2_QUEUE_MAPPED:
1504 queue->num_mappings++;
1505 port->num_mappings++;
1507 case DLB2_QUEUE_MAP_IN_PROG:
1508 queue->num_pending_additions++;
1509 domain->num_pending_additions++;
1515 case DLB2_QUEUE_MAPPED:
1516 switch (new_state) {
1517 case DLB2_QUEUE_UNMAPPED:
1518 queue->num_mappings--;
1519 port->num_mappings--;
1521 case DLB2_QUEUE_UNMAP_IN_PROG:
1522 port->num_pending_removals++;
1523 domain->num_pending_removals++;
1525 case DLB2_QUEUE_MAPPED:
1526 /* Priority change, nothing to update */
1532 case DLB2_QUEUE_MAP_IN_PROG:
1533 switch (new_state) {
1534 case DLB2_QUEUE_UNMAPPED:
1535 queue->num_pending_additions--;
1536 domain->num_pending_additions--;
1538 case DLB2_QUEUE_MAPPED:
1539 queue->num_mappings++;
1540 port->num_mappings++;
1541 queue->num_pending_additions--;
1542 domain->num_pending_additions--;
1548 case DLB2_QUEUE_UNMAP_IN_PROG:
1549 switch (new_state) {
1550 case DLB2_QUEUE_UNMAPPED:
1551 port->num_pending_removals--;
1552 domain->num_pending_removals--;
1553 queue->num_mappings--;
1554 port->num_mappings--;
1556 case DLB2_QUEUE_MAPPED:
1557 port->num_pending_removals--;
1558 domain->num_pending_removals--;
1560 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1561 /* Nothing to update */
1567 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1568 switch (new_state) {
1569 case DLB2_QUEUE_UNMAP_IN_PROG:
1570 /* Nothing to update */
1572 case DLB2_QUEUE_UNMAPPED:
1574 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1575 * becomes UNMAPPED before it transitions to
1578 queue->num_mappings--;
1579 port->num_mappings--;
1580 port->num_pending_removals--;
1581 domain->num_pending_removals--;
1591 port->qid_map[slot].state = new_state;
1594 "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1595 __func__, queue->id.phys_id, port->id.phys_id,
1596 curr_state, new_state);
1601 "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1602 __func__, queue->id.phys_id, port->id.phys_id,
1603 curr_state, new_state);
1607 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1608 enum dlb2_qid_map_state state,
1613 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1614 if (port->qid_map[i].state == state)
1620 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1623 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1624 enum dlb2_qid_map_state state,
1625 struct dlb2_ldb_queue *queue,
1630 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1631 if (port->qid_map[i].state == state &&
1632 port->qid_map[i].qid == queue->id.phys_id)
1638 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1642 * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1643 * their function names imply, and should only be called by the dynamic CQ
1646 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1647 struct dlb2_hw_domain *domain,
1648 struct dlb2_ldb_queue *queue)
1650 struct dlb2_list_entry *iter;
1651 struct dlb2_ldb_port *port;
1655 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1656 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1657 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1659 if (!dlb2_port_find_slot_queue(port, state,
1664 dlb2_ldb_port_cq_disable(hw, port);
1669 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1670 struct dlb2_hw_domain *domain,
1671 struct dlb2_ldb_queue *queue)
1673 struct dlb2_list_entry *iter;
1674 struct dlb2_ldb_port *port;
1678 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1679 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1680 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1682 if (!dlb2_port_find_slot_queue(port, state,
1687 dlb2_ldb_port_cq_enable(hw, port);
1692 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1693 struct dlb2_ldb_port *port,
1698 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1699 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1700 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1702 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1707 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1708 struct dlb2_ldb_port *port,
1713 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1714 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1715 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1716 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1718 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1723 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1724 struct dlb2_ldb_port *p,
1725 struct dlb2_ldb_queue *q,
1728 enum dlb2_qid_map_state state;
1736 /* Look for a pending or already mapped slot, else an unused slot */
1737 if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1738 !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1739 !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1741 "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1742 __func__, __LINE__);
1746 /* Read-modify-write the priority and valid bit register */
1747 cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
1749 cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
1750 cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
1751 & DLB2_LSP_CQ2PRIOV_PRIO;
1753 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
1755 /* Read-modify-write the QID map register */
1757 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
1760 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
1763 if (i == 0 || i == 4)
1764 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
1765 if (i == 1 || i == 5)
1766 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
1767 if (i == 2 || i == 6)
1768 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
1769 if (i == 3 || i == 7)
1770 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
1774 DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
1777 DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
1779 atm_qid2cq = DLB2_CSR_RD(hw,
1780 DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1781 p->id.phys_id / 4));
1783 lsp_qid2cq = DLB2_CSR_RD(hw,
1784 DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
1785 p->id.phys_id / 4));
1787 lsp_qid2cq2 = DLB2_CSR_RD(hw,
1788 DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
1789 p->id.phys_id / 4));
1791 switch (p->id.phys_id % 4) {
1793 DLB2_BIT_SET(atm_qid2cq,
1794 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
1795 DLB2_BIT_SET(lsp_qid2cq,
1796 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
1797 DLB2_BIT_SET(lsp_qid2cq2,
1798 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
1802 DLB2_BIT_SET(atm_qid2cq,
1803 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
1804 DLB2_BIT_SET(lsp_qid2cq,
1805 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
1806 DLB2_BIT_SET(lsp_qid2cq2,
1807 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
1811 DLB2_BIT_SET(atm_qid2cq,
1812 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
1813 DLB2_BIT_SET(lsp_qid2cq,
1814 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
1815 DLB2_BIT_SET(lsp_qid2cq2,
1816 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
1820 DLB2_BIT_SET(atm_qid2cq,
1821 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
1822 DLB2_BIT_SET(lsp_qid2cq,
1823 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
1824 DLB2_BIT_SET(lsp_qid2cq2,
1825 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
1830 DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1834 DLB2_LSP_QID2CQIDIX(hw->ver,
1835 q->id.phys_id, p->id.phys_id / 4),
1839 DLB2_LSP_QID2CQIDIX2(hw->ver,
1840 q->id.phys_id, p->id.phys_id / 4),
1845 p->qid_map[i].qid = q->id.phys_id;
1846 p->qid_map[i].priority = priority;
1848 state = DLB2_QUEUE_MAPPED;
1850 return dlb2_port_slot_state_transition(hw, p, q, i, state);
1853 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1854 struct dlb2_ldb_port *port,
1855 struct dlb2_ldb_queue *queue,
1862 /* Set the atomic scheduling haswork bit */
1863 active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1864 queue->id.phys_id));
1866 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1867 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1868 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1870 DLB2_BITS_GET(active,
1871 DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
1872 DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1874 /* Set the non-atomic scheduling haswork bit */
1875 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1877 enq = DLB2_CSR_RD(hw,
1878 DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1879 queue->id.phys_id));
1881 memset(&ctrl, 0, sizeof(ctrl));
1883 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1884 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1885 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1888 DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
1889 DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1891 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1898 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1899 struct dlb2_ldb_port *port,
1904 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1905 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1906 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1908 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1910 memset(&ctrl, 0, sizeof(ctrl));
1912 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1913 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1914 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1916 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1922 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1923 struct dlb2_ldb_queue *queue)
1927 DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
1928 DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
1930 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1934 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1935 struct dlb2_ldb_queue *queue)
1938 DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1939 DLB2_LSP_QID_LDB_INFL_LIM_RST);
1942 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1943 struct dlb2_hw_domain *domain,
1944 struct dlb2_ldb_port *port,
1945 struct dlb2_ldb_queue *queue)
1947 struct dlb2_list_entry *iter;
1948 enum dlb2_qid_map_state state;
1954 infl_cnt = DLB2_CSR_RD(hw,
1955 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
1956 queue->id.phys_id));
1958 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
1960 "[%s()] Internal error: non-zero QID inflight count\n",
1966 * Static map the port and set its corresponding has_work bits.
1968 state = DLB2_QUEUE_MAP_IN_PROG;
1969 if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1972 prio = port->qid_map[slot].priority;
1975 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1976 * the port's qid_map state.
1978 ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1982 ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1987 * Ensure IF_status(cq,qid) is 0 before enabling the port to
1988 * prevent spurious schedules to cause the queue's inflight
1989 * count to increase.
1991 dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1993 /* Reset the queue's inflight status */
1994 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1995 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1996 state = DLB2_QUEUE_MAPPED;
1997 if (!dlb2_port_find_slot_queue(port, state,
2001 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2005 dlb2_ldb_queue_set_inflight_limit(hw, queue);
2007 /* Re-enable CQs mapped to this queue */
2008 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2010 /* If this queue has other mappings pending, clear its inflight limit */
2011 if (queue->num_pending_additions > 0)
2012 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
2018 * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
2019 * @hw: dlb2_hw handle for a particular device.
2020 * @port: load-balanced port
2021 * @queue: load-balanced queue
2022 * @priority: queue servicing priority
2024 * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
2025 * at a later point, and <0 if an error occurred.
2027 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
2028 struct dlb2_ldb_port *port,
2029 struct dlb2_ldb_queue *queue,
2032 enum dlb2_qid_map_state state;
2033 struct dlb2_hw_domain *domain;
2034 int domain_id, slot, ret;
2037 domain_id = port->domain_id.phys_id;
2039 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
2040 if (domain == NULL) {
2042 "[%s()] Internal error: unable to find domain %d\n",
2043 __func__, port->domain_id.phys_id);
2048 * Set the QID inflight limit to 0 to prevent further scheduling of the
2051 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
2052 queue->id.phys_id), 0);
2054 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
2056 "Internal error: No available unmapped slots\n");
2060 port->qid_map[slot].qid = queue->id.phys_id;
2061 port->qid_map[slot].priority = priority;
2063 state = DLB2_QUEUE_MAP_IN_PROG;
2064 ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
2068 infl_cnt = DLB2_CSR_RD(hw,
2069 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2070 queue->id.phys_id));
2072 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2074 * The queue is owed completions so it's not safe to map it
2075 * yet. Schedule a kernel thread to complete the mapping later,
2076 * once software has completed all the queue's inflight events.
2078 if (!os_worker_active(hw))
2079 os_schedule_work(hw);
2085 * Disable the affected CQ, and the CQs already mapped to the QID,
2086 * before reading the QID's inflight count a second time. There is an
2087 * unlikely race in which the QID may schedule one more QE after we
2088 * read an inflight count of 0, and disabling the CQs guarantees that
2089 * the race will not occur after a re-read of the inflight count
2093 dlb2_ldb_port_cq_disable(hw, port);
2095 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2097 infl_cnt = DLB2_CSR_RD(hw,
2098 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2099 queue->id.phys_id));
2101 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2103 dlb2_ldb_port_cq_enable(hw, port);
2105 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2108 * The queue is owed completions so it's not safe to map it
2109 * yet. Schedule a kernel thread to complete the mapping later,
2110 * once software has completed all the queue's inflight events.
2112 if (!os_worker_active(hw))
2113 os_schedule_work(hw);
2118 return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2121 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2122 struct dlb2_hw_domain *domain,
2123 struct dlb2_ldb_port *port)
2127 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2129 struct dlb2_ldb_queue *queue;
2132 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2135 qid = port->qid_map[i].qid;
2137 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2139 if (queue == NULL) {
2141 "[%s()] Internal error: unable to find queue %d\n",
2146 infl_cnt = DLB2_CSR_RD(hw,
2147 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2149 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
2153 * Disable the affected CQ, and the CQs already mapped to the
2154 * QID, before reading the QID's inflight count a second time.
2155 * There is an unlikely race in which the QID may schedule one
2156 * more QE after we read an inflight count of 0, and disabling
2157 * the CQs guarantees that the race will not occur after a
2158 * re-read of the inflight count register.
2161 dlb2_ldb_port_cq_disable(hw, port);
2163 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2165 infl_cnt = DLB2_CSR_RD(hw,
2166 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2168 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2170 dlb2_ldb_port_cq_enable(hw, port);
2172 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2177 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2182 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2183 struct dlb2_hw_domain *domain)
2185 struct dlb2_list_entry *iter;
2186 struct dlb2_ldb_port *port;
2190 if (!domain->configured || domain->num_pending_additions == 0)
2193 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2194 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2195 dlb2_domain_finish_map_port(hw, domain, port);
2198 return domain->num_pending_additions;
2201 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2202 struct dlb2_ldb_port *port,
2203 struct dlb2_ldb_queue *queue)
2205 enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2214 /* Find the queue's slot */
2215 mapped = DLB2_QUEUE_MAPPED;
2216 in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2217 pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2219 if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2220 !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2221 !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2223 "[%s():%d] Internal error: QID %d isn't mapped\n",
2224 __func__, __LINE__, queue->id.phys_id);
2228 port_id = port->id.phys_id;
2229 queue_id = queue->id.phys_id;
2231 /* Read-modify-write the priority and valid bit register */
2232 cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
2234 cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
2236 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
2238 atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
2241 lsp_qid2cq = DLB2_CSR_RD(hw,
2242 DLB2_LSP_QID2CQIDIX(hw->ver,
2243 queue_id, port_id / 4));
2245 lsp_qid2cq2 = DLB2_CSR_RD(hw,
2246 DLB2_LSP_QID2CQIDIX2(hw->ver,
2247 queue_id, port_id / 4));
2249 switch (port_id % 4) {
2251 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2252 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2253 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2257 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2258 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2259 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2263 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2264 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2265 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2269 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2270 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2271 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2275 DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
2277 DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
2280 DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
2285 unmapped = DLB2_QUEUE_UNMAPPED;
2287 return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2290 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2291 struct dlb2_hw_domain *domain,
2292 struct dlb2_ldb_port *port,
2293 struct dlb2_ldb_queue *queue,
2296 if (domain->started)
2297 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2299 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2303 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2304 struct dlb2_hw_domain *domain,
2305 struct dlb2_ldb_port *port,
2308 enum dlb2_qid_map_state state;
2309 struct dlb2_ldb_queue *queue;
2311 queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2313 state = port->qid_map[slot].state;
2315 /* Update the QID2CQIDX and CQ2QID vectors */
2316 dlb2_ldb_port_unmap_qid(hw, port, queue);
2319 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2322 dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2324 /* Reset the {CQ, slot} to its default state */
2325 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2327 /* Re-enable the CQ if it was not manually disabled by the user */
2329 dlb2_ldb_port_cq_enable(hw, port);
2332 * If there is a mapping that is pending this slot's removal, perform
2335 if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2336 struct dlb2_ldb_port_qid_map *map;
2337 struct dlb2_ldb_queue *map_queue;
2340 map = &port->qid_map[slot];
2342 map->qid = map->pending_qid;
2343 map->priority = map->pending_priority;
2345 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2346 prio = map->priority;
2348 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2353 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2354 struct dlb2_hw_domain *domain,
2355 struct dlb2_ldb_port *port)
2360 if (port->num_pending_removals == 0)
2364 * The unmap requires all the CQ's outstanding inflights to be
2367 infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
2369 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
2372 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2373 struct dlb2_ldb_port_qid_map *map;
2375 map = &port->qid_map[i];
2377 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2378 map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2381 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2388 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2389 struct dlb2_hw_domain *domain)
2391 struct dlb2_list_entry *iter;
2392 struct dlb2_ldb_port *port;
2396 if (!domain->configured || domain->num_pending_removals == 0)
2399 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2400 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2401 dlb2_domain_finish_unmap_port(hw, domain, port);
2404 return domain->num_pending_removals;
2407 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2408 struct dlb2_hw_domain *domain)
2410 struct dlb2_list_entry *iter;
2411 struct dlb2_ldb_port *port;
2415 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2416 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2417 port->enabled = false;
2419 dlb2_ldb_port_cq_disable(hw, port);
2425 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2428 unsigned int vdev_id)
2430 DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2432 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2433 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2436 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2437 struct dlb2_hw_domain *domain,
2438 unsigned int vdev_id)
2440 struct dlb2_list_entry *iter;
2441 struct dlb2_dir_pq_pair *port;
2445 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2449 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2450 virt_id = port->id.virt_id;
2452 virt_id = port->id.phys_id;
2454 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2456 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
2460 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2461 struct dlb2_hw_domain *domain,
2462 unsigned int vdev_id)
2464 struct dlb2_list_entry *iter;
2465 struct dlb2_ldb_port *port;
2470 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2471 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2475 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2476 virt_id = port->id.virt_id;
2478 virt_id = port->id.phys_id;
2480 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2482 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
2488 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2489 struct dlb2_hw_domain *domain)
2491 struct dlb2_list_entry *iter;
2492 struct dlb2_ldb_port *port;
2498 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2499 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2501 DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
2506 DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
2514 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2515 struct dlb2_hw_domain *domain)
2517 struct dlb2_list_entry *iter;
2518 struct dlb2_dir_pq_pair *port;
2523 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2525 DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2529 DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
2535 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2536 struct dlb2_hw_domain *domain)
2538 int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2539 struct dlb2_list_entry *iter;
2540 struct dlb2_ldb_queue *queue;
2543 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2544 int idx = domain_offset + queue->id.phys_id;
2546 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
2548 if (queue->id.vdev_owned) {
2550 DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2553 idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2556 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
2558 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
2564 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2565 struct dlb2_hw_domain *domain)
2567 struct dlb2_list_entry *iter;
2568 struct dlb2_dir_pq_pair *queue;
2569 unsigned long max_ports;
2573 max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
2575 domain_offset = domain->id.phys_id * max_ports;
2577 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2578 int idx = domain_offset + queue->id.phys_id;
2580 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
2582 if (queue->id.vdev_owned) {
2583 idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
2585 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
2587 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
2592 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2593 struct dlb2_hw_domain *domain)
2595 struct dlb2_list_entry *iter;
2596 struct dlb2_ldb_port *port;
2601 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2602 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2604 DLB2_CHP_SN_CHK_ENBL(hw->ver,
2611 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2612 struct dlb2_hw_domain *domain)
2614 struct dlb2_list_entry *iter;
2615 struct dlb2_ldb_port *port;
2619 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2620 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2623 for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
2624 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2628 if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2630 "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2631 __func__, port->id.phys_id);
2640 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2641 struct dlb2_hw_domain *domain)
2643 struct dlb2_list_entry *iter;
2644 struct dlb2_dir_pq_pair *port;
2647 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2648 port->enabled = false;
2650 dlb2_dir_port_cq_disable(hw, port);
2655 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2656 struct dlb2_hw_domain *domain)
2658 struct dlb2_list_entry *iter;
2659 struct dlb2_dir_pq_pair *port;
2663 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2665 DLB2_SYS_DIR_PP_V(port->id.phys_id),
2671 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2672 struct dlb2_hw_domain *domain)
2674 struct dlb2_list_entry *iter;
2675 struct dlb2_ldb_port *port;
2680 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2681 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2683 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2689 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2690 struct dlb2_hw_domain *domain)
2692 struct dlb2_list_entry *iter;
2693 struct dlb2_dir_pq_pair *dir_port;
2694 struct dlb2_ldb_port *ldb_port;
2695 struct dlb2_ldb_queue *queue;
2700 * Confirm that all the domain's queue's inflight counts and AQED
2701 * active counts are 0.
2703 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2704 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2706 "[%s()] Internal error: failed to empty ldb queue %d\n",
2707 __func__, queue->id.phys_id);
2712 /* Confirm that all the domain's CQs inflight and token counts are 0. */
2713 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2714 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2715 if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2716 dlb2_ldb_cq_token_count(hw, ldb_port)) {
2718 "[%s()] Internal error: failed to empty ldb port %d\n",
2719 __func__, ldb_port->id.phys_id);
2725 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2726 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2728 "[%s()] Internal error: failed to empty dir queue %d\n",
2729 __func__, dir_port->id.phys_id);
2733 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2735 "[%s()] Internal error: failed to empty dir port %d\n",
2736 __func__, dir_port->id.phys_id);
2744 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2745 struct dlb2_ldb_port *port)
2748 DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2749 DLB2_SYS_LDB_PP2VAS_RST);
2752 DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
2753 DLB2_CHP_LDB_CQ2VAS_RST);
2756 DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2757 DLB2_SYS_LDB_PP2VDEV_RST);
2759 if (port->id.vdev_owned) {
2764 * DLB uses producer port address bits 17:12 to determine the
2765 * producer port ID. In Scalable IOV mode, PP accesses come
2766 * through the PF MMIO window for the physical producer port,
2767 * so for translation purposes the virtual and physical port
2770 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2771 virt_id = port->id.virt_id;
2773 virt_id = port->id.phys_id;
2775 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2778 DLB2_SYS_VF_LDB_VPP2PP(offs),
2779 DLB2_SYS_VF_LDB_VPP2PP_RST);
2782 DLB2_SYS_VF_LDB_VPP_V(offs),
2783 DLB2_SYS_VF_LDB_VPP_V_RST);
2787 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2788 DLB2_SYS_LDB_PP_V_RST);
2791 DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
2792 DLB2_LSP_CQ_LDB_DSBL_RST);
2795 DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
2796 DLB2_CHP_LDB_CQ_DEPTH_RST);
2798 if (hw->ver != DLB2_HW_V2)
2800 DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
2801 DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
2804 DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
2805 DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2808 DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
2809 DLB2_CHP_HIST_LIST_LIM_RST);
2812 DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
2813 DLB2_CHP_HIST_LIST_BASE_RST);
2816 DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
2817 DLB2_CHP_HIST_LIST_POP_PTR_RST);
2820 DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
2821 DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2824 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2825 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2828 DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2829 DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2832 DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
2833 DLB2_CHP_LDB_CQ_INT_ENB_RST);
2836 DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2837 DLB2_SYS_LDB_CQ_ISR_RST);
2840 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2841 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2844 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2845 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2848 DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
2849 DLB2_CHP_LDB_CQ_WPTR_RST);
2852 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
2853 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2856 DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2857 DLB2_SYS_LDB_CQ_ADDR_L_RST);
2860 DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2861 DLB2_SYS_LDB_CQ_ADDR_U_RST);
2863 if (hw->ver == DLB2_HW_V2)
2865 DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2866 DLB2_SYS_LDB_CQ_AT_RST);
2869 DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
2870 DLB2_SYS_LDB_CQ_PASID_RST);
2873 DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2874 DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2877 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
2878 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2881 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
2882 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2885 DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
2886 DLB2_LSP_CQ2QID0_RST);
2889 DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
2890 DLB2_LSP_CQ2QID1_RST);
2893 DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
2894 DLB2_LSP_CQ2PRIOV_RST);
2897 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2898 struct dlb2_hw_domain *domain)
2900 struct dlb2_list_entry *iter;
2901 struct dlb2_ldb_port *port;
2905 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2906 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2907 __dlb2_domain_reset_ldb_port_registers(hw, port);
2912 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2913 struct dlb2_dir_pq_pair *port)
2918 DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
2919 DLB2_CHP_DIR_CQ2VAS_RST);
2922 DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
2923 DLB2_LSP_CQ_DIR_DSBL_RST);
2925 DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
2927 if (hw->ver == DLB2_HW_V2)
2928 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2931 DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
2934 DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
2935 DLB2_CHP_DIR_CQ_DEPTH_RST);
2938 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2939 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2942 DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2943 DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2946 DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2947 DLB2_CHP_DIR_CQ_INT_ENB_RST);
2950 DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2951 DLB2_SYS_DIR_CQ_ISR_RST);
2954 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
2956 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2959 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2960 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2963 DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
2964 DLB2_CHP_DIR_CQ_WPTR_RST);
2967 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
2968 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2971 DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2972 DLB2_SYS_DIR_CQ_ADDR_L_RST);
2975 DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2976 DLB2_SYS_DIR_CQ_ADDR_U_RST);
2979 DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2980 DLB2_SYS_DIR_CQ_AT_RST);
2982 if (hw->ver == DLB2_HW_V2)
2984 DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2985 DLB2_SYS_DIR_CQ_AT_RST);
2988 DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
2989 DLB2_SYS_DIR_CQ_PASID_RST);
2992 DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2993 DLB2_SYS_DIR_CQ_FMT_RST);
2996 DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2997 DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
3000 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3001 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
3004 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3005 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
3008 DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
3009 DLB2_SYS_DIR_PP2VAS_RST);
3012 DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3013 DLB2_CHP_DIR_CQ2VAS_RST);
3016 DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
3017 DLB2_SYS_DIR_PP2VDEV_RST);
3019 if (port->id.vdev_owned) {
3024 * DLB uses producer port address bits 17:12 to determine the
3025 * producer port ID. In Scalable IOV mode, PP accesses come
3026 * through the PF MMIO window for the physical producer port,
3027 * so for translation purposes the virtual and physical port
3030 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3031 virt_id = port->id.virt_id;
3033 virt_id = port->id.phys_id;
3035 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3039 DLB2_SYS_VF_DIR_VPP2PP(offs),
3040 DLB2_SYS_VF_DIR_VPP2PP_RST);
3043 DLB2_SYS_VF_DIR_VPP_V(offs),
3044 DLB2_SYS_VF_DIR_VPP_V_RST);
3048 DLB2_SYS_DIR_PP_V(port->id.phys_id),
3049 DLB2_SYS_DIR_PP_V_RST);
3052 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3053 struct dlb2_hw_domain *domain)
3055 struct dlb2_list_entry *iter;
3056 struct dlb2_dir_pq_pair *port;
3059 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3060 __dlb2_domain_reset_dir_port_registers(hw, port);
3063 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
3064 struct dlb2_hw_domain *domain)
3066 struct dlb2_list_entry *iter;
3067 struct dlb2_ldb_queue *queue;
3070 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3071 unsigned int queue_id = queue->id.phys_id;
3075 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
3076 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3079 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
3080 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3083 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
3084 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3087 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
3088 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3091 DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
3092 DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3095 DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
3096 DLB2_LSP_QID_LDB_INFL_LIM_RST);
3099 DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
3100 DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3103 DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
3104 DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3107 DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
3108 DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3111 DLB2_SYS_LDB_QID_ITS(queue_id),
3112 DLB2_SYS_LDB_QID_ITS_RST);
3115 DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
3116 DLB2_CHP_ORD_QID_SN_RST);
3119 DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
3120 DLB2_CHP_ORD_QID_SN_MAP_RST);
3123 DLB2_SYS_LDB_QID_V(queue_id),
3124 DLB2_SYS_LDB_QID_V_RST);
3127 DLB2_SYS_LDB_QID_CFG_V(queue_id),
3128 DLB2_SYS_LDB_QID_CFG_V_RST);
3130 if (queue->sn_cfg_valid) {
3133 offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
3135 offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
3139 offs[queue->sn_group],
3140 DLB2_RO_GRP_0_SLT_SHFT_RST);
3143 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3145 DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
3146 DLB2_LSP_QID2CQIDIX_00_RST);
3149 DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
3150 DLB2_LSP_QID2CQIDIX2_00_RST);
3153 DLB2_ATM_QID2CQIDIX(queue_id, i),
3154 DLB2_ATM_QID2CQIDIX_00_RST);
3159 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3160 struct dlb2_hw_domain *domain)
3162 struct dlb2_list_entry *iter;
3163 struct dlb2_dir_pq_pair *queue;
3166 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3168 DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
3170 DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3173 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
3175 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3178 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
3180 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3183 DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
3185 DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3188 DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3189 DLB2_SYS_DIR_QID_ITS_RST);
3192 DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3193 DLB2_SYS_DIR_QID_V_RST);
3201 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3202 struct dlb2_hw_domain *domain)
3204 dlb2_domain_reset_ldb_port_registers(hw, domain);
3206 dlb2_domain_reset_dir_port_registers(hw, domain);
3208 dlb2_domain_reset_ldb_queue_registers(hw, domain);
3210 dlb2_domain_reset_dir_queue_registers(hw, domain);
3212 if (hw->ver == DLB2_HW_V2) {
3214 DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3215 DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3218 DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3219 DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3222 DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
3223 DLB2_CHP_CFG_VAS_CRD_RST);
3226 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3227 struct dlb2_hw_domain *domain)
3229 struct dlb2_dir_pq_pair *tmp_dir_port;
3230 struct dlb2_ldb_queue *tmp_ldb_queue;
3231 struct dlb2_ldb_port *tmp_ldb_port;
3232 struct dlb2_list_entry *iter1;
3233 struct dlb2_list_entry *iter2;
3234 struct dlb2_function_resources *rsrcs;
3235 struct dlb2_dir_pq_pair *dir_port;
3236 struct dlb2_ldb_queue *ldb_queue;
3237 struct dlb2_ldb_port *ldb_port;
3238 struct dlb2_list_head *list;
3240 RTE_SET_USED(tmp_dir_port);
3241 RTE_SET_USED(tmp_ldb_queue);
3242 RTE_SET_USED(tmp_ldb_port);
3243 RTE_SET_USED(iter1);
3244 RTE_SET_USED(iter2);
3246 rsrcs = domain->parent_func;
3248 /* Move the domain's ldb queues to the function's avail list */
3249 list = &domain->used_ldb_queues;
3250 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3251 if (ldb_queue->sn_cfg_valid) {
3252 struct dlb2_sn_group *grp;
3254 grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3256 dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3257 ldb_queue->sn_cfg_valid = false;
3260 ldb_queue->owned = false;
3261 ldb_queue->num_mappings = 0;
3262 ldb_queue->num_pending_additions = 0;
3264 dlb2_list_del(&domain->used_ldb_queues,
3265 &ldb_queue->domain_list);
3266 dlb2_list_add(&rsrcs->avail_ldb_queues,
3267 &ldb_queue->func_list);
3268 rsrcs->num_avail_ldb_queues++;
3271 list = &domain->avail_ldb_queues;
3272 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3273 ldb_queue->owned = false;
3275 dlb2_list_del(&domain->avail_ldb_queues,
3276 &ldb_queue->domain_list);
3277 dlb2_list_add(&rsrcs->avail_ldb_queues,
3278 &ldb_queue->func_list);
3279 rsrcs->num_avail_ldb_queues++;
3282 /* Move the domain's ldb ports to the function's avail list */
3283 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3284 list = &domain->used_ldb_ports[i];
3285 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3289 ldb_port->owned = false;
3290 ldb_port->configured = false;
3291 ldb_port->num_pending_removals = 0;
3292 ldb_port->num_mappings = 0;
3293 ldb_port->init_tkn_cnt = 0;
3294 ldb_port->cq_depth = 0;
3295 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3296 ldb_port->qid_map[j].state =
3297 DLB2_QUEUE_UNMAPPED;
3299 dlb2_list_del(&domain->used_ldb_ports[i],
3300 &ldb_port->domain_list);
3301 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3302 &ldb_port->func_list);
3303 rsrcs->num_avail_ldb_ports[i]++;
3306 list = &domain->avail_ldb_ports[i];
3307 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3309 ldb_port->owned = false;
3311 dlb2_list_del(&domain->avail_ldb_ports[i],
3312 &ldb_port->domain_list);
3313 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3314 &ldb_port->func_list);
3315 rsrcs->num_avail_ldb_ports[i]++;
3319 /* Move the domain's dir ports to the function's avail list */
3320 list = &domain->used_dir_pq_pairs;
3321 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3322 dir_port->owned = false;
3323 dir_port->port_configured = false;
3324 dir_port->init_tkn_cnt = 0;
3326 dlb2_list_del(&domain->used_dir_pq_pairs,
3327 &dir_port->domain_list);
3329 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3330 &dir_port->func_list);
3331 rsrcs->num_avail_dir_pq_pairs++;
3334 list = &domain->avail_dir_pq_pairs;
3335 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3336 dir_port->owned = false;
3338 dlb2_list_del(&domain->avail_dir_pq_pairs,
3339 &dir_port->domain_list);
3341 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3342 &dir_port->func_list);
3343 rsrcs->num_avail_dir_pq_pairs++;
3346 /* Return hist list entries to the function */
3347 ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3348 domain->hist_list_entry_base,
3349 domain->total_hist_list_entries);
3352 "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3357 domain->total_hist_list_entries = 0;
3358 domain->avail_hist_list_entries = 0;
3359 domain->hist_list_entry_base = 0;
3360 domain->hist_list_entry_offset = 0;
3362 if (hw->ver == DLB2_HW_V2_5) {
3363 rsrcs->num_avail_entries += domain->num_credits;
3364 domain->num_credits = 0;
3366 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3367 domain->num_ldb_credits = 0;
3369 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3370 domain->num_dir_credits = 0;
3372 rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3373 rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3374 domain->num_avail_aqed_entries = 0;
3375 domain->num_used_aqed_entries = 0;
3377 domain->num_pending_removals = 0;
3378 domain->num_pending_additions = 0;
3379 domain->configured = false;
3380 domain->started = false;
3383 * Move the domain out of the used_domains list and back to the
3384 * function's avail_domains list.
3386 dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3387 dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3388 rsrcs->num_avail_domains++;
3393 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3394 struct dlb2_hw_domain *domain,
3395 struct dlb2_ldb_queue *queue)
3397 struct dlb2_ldb_port *port = NULL;
3400 /* If a domain has LDB queues, it must have LDB ports */
3401 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3402 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
3410 "[%s()] Internal error: No configured LDB ports\n",
3415 /* If necessary, free up a QID slot in this CQ */
3416 if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3417 struct dlb2_ldb_queue *mapped_queue;
3419 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3421 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3426 ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3430 return dlb2_domain_drain_mapped_queues(hw, domain);
3433 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3434 struct dlb2_hw_domain *domain)
3436 struct dlb2_list_entry *iter;
3437 struct dlb2_ldb_queue *queue;
3441 /* If the domain hasn't been started, there's no traffic to drain */
3442 if (!domain->started)
3446 * Pre-condition: the unattached queue must not have any outstanding
3447 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3448 * prior to this in dlb2_domain_drain_mapped_queues().
3450 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3451 if (queue->num_mappings != 0 ||
3452 dlb2_ldb_queue_is_empty(hw, queue))
3455 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3464 * dlb2_reset_domain() - reset a scheduling domain
3465 * @hw: dlb2_hw handle for a particular device.
3466 * @domain_id: domain ID.
3467 * @vdev_req: indicates whether this request came from a vdev.
3468 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3470 * This function resets and frees a DLB 2.0 scheduling domain and its associated
3473 * Pre-condition: the driver must ensure software has stopped sending QEs
3474 * through this domain's producer ports before invoking this function, or
3475 * undefined behavior will result.
3477 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3481 * Returns 0 upon success, -1 otherwise.
3483 * EINVAL - Invalid domain ID, or the domain is not configured.
3484 * EFAULT - Internal error. (Possibly caused if software is the pre-condition
3486 * ETIMEDOUT - Hardware component didn't reset in the expected time.
3488 int dlb2_reset_domain(struct dlb2_hw *hw,
3491 unsigned int vdev_id)
3493 struct dlb2_hw_domain *domain;
3496 dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3498 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3500 if (domain == NULL || !domain->configured)
3505 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3507 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3510 /* Disable CQ interrupts */
3511 dlb2_domain_disable_dir_port_interrupts(hw, domain);
3513 dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3516 * For each queue owned by this domain, disable its write permissions to
3517 * cause any traffic sent to it to be dropped. Well-behaved software
3518 * should not be sending QEs at this point.
3520 dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3522 dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3524 /* Turn off completion tracking on all the domain's PPs. */
3525 dlb2_domain_disable_ldb_seq_checks(hw, domain);
3528 * Disable the LDB CQs and drain them in order to complete the map and
3529 * unmap procedures, which require zero CQ inflights and zero QID
3530 * inflights respectively.
3532 dlb2_domain_disable_ldb_cqs(hw, domain);
3534 dlb2_domain_drain_ldb_cqs(hw, domain, false);
3536 ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3540 ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3544 ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3548 /* Re-enable the CQs in order to drain the mapped queues. */
3549 dlb2_domain_enable_ldb_cqs(hw, domain);
3551 ret = dlb2_domain_drain_mapped_queues(hw, domain);
3555 ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3559 /* Done draining LDB QEs, so disable the CQs. */
3560 dlb2_domain_disable_ldb_cqs(hw, domain);
3562 dlb2_domain_drain_dir_queues(hw, domain);
3564 /* Done draining DIR QEs, so disable the CQs. */
3565 dlb2_domain_disable_dir_cqs(hw, domain);
3568 dlb2_domain_disable_dir_producer_ports(hw, domain);
3570 dlb2_domain_disable_ldb_producer_ports(hw, domain);
3572 ret = dlb2_domain_verify_reset_success(hw, domain);
3576 /* Reset the QID and port state. */
3577 dlb2_domain_reset_registers(hw, domain);
3579 /* Hardware reset complete. Reset the domain's software state */
3580 return dlb2_domain_reset_software_state(hw, domain);
3584 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3586 struct dlb2_create_ldb_queue_args *args,
3588 unsigned int vdev_id)
3590 DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3592 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3593 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
3595 DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3596 args->num_sequence_numbers);
3597 DLB2_HW_DBG(hw, "\tNumber of QID inflights: %d\n",
3598 args->num_qid_inflights);
3599 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
3600 args->num_atomic_inflights);
3604 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3605 struct dlb2_ldb_queue *queue,
3606 struct dlb2_create_ldb_queue_args *args)
3611 queue->sn_cfg_valid = false;
3613 if (args->num_sequence_numbers == 0)
3616 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3617 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3619 if (group->sequence_numbers_per_queue ==
3620 args->num_sequence_numbers &&
3621 !dlb2_sn_group_full(group)) {
3622 slot = dlb2_sn_group_alloc_slot(group);
3630 "[%s():%d] Internal error: no sequence number slots available\n",
3631 __func__, __LINE__);
3635 queue->sn_cfg_valid = true;
3636 queue->sn_group = i;
3637 queue->sn_slot = slot;
3642 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3644 struct dlb2_create_ldb_queue_args *args,
3645 struct dlb2_cmd_response *resp,
3647 unsigned int vdev_id,
3648 struct dlb2_hw_domain **out_domain,
3649 struct dlb2_ldb_queue **out_queue)
3651 struct dlb2_hw_domain *domain;
3652 struct dlb2_ldb_queue *queue;
3655 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3658 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3662 if (!domain->configured) {
3663 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3667 if (domain->started) {
3668 resp->status = DLB2_ST_DOMAIN_STARTED;
3672 queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3674 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3678 if (args->num_sequence_numbers) {
3679 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3680 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3682 if (group->sequence_numbers_per_queue ==
3683 args->num_sequence_numbers &&
3684 !dlb2_sn_group_full(group))
3688 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3689 resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3694 if (args->num_qid_inflights > 4096) {
3695 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3699 /* Inflights must be <= number of sequence numbers if ordered */
3700 if (args->num_sequence_numbers != 0 &&
3701 args->num_qid_inflights > args->num_sequence_numbers) {
3702 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3706 if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3707 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3711 if (args->num_atomic_inflights &&
3712 args->lock_id_comp_level != 0 &&
3713 args->lock_id_comp_level != 64 &&
3714 args->lock_id_comp_level != 128 &&
3715 args->lock_id_comp_level != 256 &&
3716 args->lock_id_comp_level != 512 &&
3717 args->lock_id_comp_level != 1024 &&
3718 args->lock_id_comp_level != 2048 &&
3719 args->lock_id_comp_level != 4096 &&
3720 args->lock_id_comp_level != 65536) {
3721 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3725 *out_domain = domain;
3732 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3733 struct dlb2_hw_domain *domain,
3734 struct dlb2_ldb_queue *queue,
3735 struct dlb2_create_ldb_queue_args *args)
3738 ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3742 /* Attach QID inflights */
3743 queue->num_qid_inflights = args->num_qid_inflights;
3745 /* Attach atomic inflights */
3746 queue->aqed_limit = args->num_atomic_inflights;
3748 domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3749 domain->num_used_aqed_entries += args->num_atomic_inflights;
3754 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3755 struct dlb2_hw_domain *domain,
3756 struct dlb2_ldb_queue *queue,
3757 struct dlb2_create_ldb_queue_args *args,
3759 unsigned int vdev_id)
3761 struct dlb2_sn_group *sn_group;
3766 /* QID write permissions are turned on when the domain is started */
3767 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.phys_id;
3769 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), reg);
3772 * Unordered QIDs get 4K inflights, ordered get as many as the number
3773 * of sequence numbers.
3775 DLB2_BITS_SET(reg, args->num_qid_inflights,
3776 DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
3777 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
3778 queue->id.phys_id), reg);
3780 alimit = queue->aqed_limit;
3782 if (alimit > DLB2_MAX_NUM_AQED_ENTRIES)
3783 alimit = DLB2_MAX_NUM_AQED_ENTRIES;
3786 DLB2_BITS_SET(reg, alimit, DLB2_LSP_QID_AQED_ACTIVE_LIM_LIMIT);
3788 DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver,
3789 queue->id.phys_id), reg);
3792 switch (args->lock_id_comp_level) {
3794 DLB2_BITS_SET(reg, 1, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3797 DLB2_BITS_SET(reg, 2, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3800 DLB2_BITS_SET(reg, 3, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3803 DLB2_BITS_SET(reg, 4, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3806 DLB2_BITS_SET(reg, 5, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3809 DLB2_BITS_SET(reg, 6, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3812 DLB2_BITS_SET(reg, 7, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3815 /* No compression by default */
3819 DLB2_CSR_WR(hw, DLB2_AQED_QID_HID_WIDTH(queue->id.phys_id), reg);
3822 /* Don't timestamp QEs that pass through this queue */
3823 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_ITS(queue->id.phys_id), reg);
3825 DLB2_BITS_SET(reg, args->depth_threshold,
3826 DLB2_LSP_QID_ATM_DEPTH_THRSH_THRESH);
3828 DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver,
3829 queue->id.phys_id), reg);
3832 DLB2_BITS_SET(reg, args->depth_threshold,
3833 DLB2_LSP_QID_NALDB_DEPTH_THRSH_THRESH);
3835 DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue->id.phys_id),
3839 * This register limits the number of inflight flows a queue can have
3840 * at one time. It has an upper bound of 2048, but can be
3841 * over-subscribed. 512 is chosen so that a single queue does not use
3842 * the entire atomic storage, but can use a substantial portion if
3846 DLB2_BITS_SET(reg, 512, DLB2_AQED_QID_FID_LIM_QID_FID_LIMIT);
3847 DLB2_CSR_WR(hw, DLB2_AQED_QID_FID_LIM(queue->id.phys_id), reg);
3851 sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3852 DLB2_BITS_SET(reg, sn_group->mode, DLB2_CHP_ORD_QID_SN_MAP_MODE);
3853 DLB2_BITS_SET(reg, queue->sn_slot, DLB2_CHP_ORD_QID_SN_MAP_SLOT);
3854 DLB2_BITS_SET(reg, sn_group->id, DLB2_CHP_ORD_QID_SN_MAP_GRP);
3857 DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue->id.phys_id), reg);
3860 DLB2_BITS_SET(reg, (args->num_sequence_numbers != 0),
3861 DLB2_SYS_LDB_QID_CFG_V_SN_CFG_V);
3862 DLB2_BITS_SET(reg, (args->num_atomic_inflights != 0),
3863 DLB2_SYS_LDB_QID_CFG_V_FID_CFG_V);
3865 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), reg);
3868 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3871 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VQID_V_VQID_V);
3872 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), reg);
3875 DLB2_BITS_SET(reg, queue->id.phys_id,
3876 DLB2_SYS_VF_LDB_VQID2QID_QID);
3877 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), reg);
3880 DLB2_BITS_SET(reg, queue->id.virt_id,
3881 DLB2_SYS_LDB_QID2VQID_VQID);
3882 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID2VQID(queue->id.phys_id), reg);
3886 DLB2_BIT_SET(reg, DLB2_SYS_LDB_QID_V_QID_V);
3887 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), reg);
3891 * dlb2_hw_create_ldb_queue() - create a load-balanced queue
3892 * @hw: dlb2_hw handle for a particular device.
3893 * @domain_id: domain ID.
3894 * @args: queue creation arguments.
3895 * @resp: response structure.
3896 * @vdev_req: indicates whether this request came from a vdev.
3897 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3899 * This function creates a load-balanced queue.
3901 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3905 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
3906 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
3907 * contains the queue ID.
3909 * resp->id contains a virtual ID if vdev_req is true.
3912 * EINVAL - A requested resource is unavailable, the domain is not configured,
3913 * the domain has already been started, or the requested queue name is
3915 * EFAULT - Internal error (resp->status not set).
3917 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3919 struct dlb2_create_ldb_queue_args *args,
3920 struct dlb2_cmd_response *resp,
3922 unsigned int vdev_id)
3924 struct dlb2_hw_domain *domain;
3925 struct dlb2_ldb_queue *queue;
3928 dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3931 * Verify that hardware resources are available before attempting to
3932 * satisfy the request. This simplifies the error unwinding code.
3934 ret = dlb2_verify_create_ldb_queue_args(hw,
3945 ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3949 "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3950 __func__, __LINE__);
3954 dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3956 queue->num_mappings = 0;
3958 queue->configured = true;
3961 * Configuration succeeded, so move the resource from the 'avail' to
3964 dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
3966 dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
3969 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
3974 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
3975 struct dlb2_hw_domain *domain,
3976 struct dlb2_ldb_port *port,
3978 unsigned int vdev_id)
3982 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
3983 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
3990 * DLB uses producer port address bits 17:12 to determine the
3991 * producer port ID. In Scalable IOV mode, PP accesses come
3992 * through the PF MMIO window for the physical producer port,
3993 * so for translation purposes the virtual and physical port
3996 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3997 virt_id = port->id.virt_id;
3999 virt_id = port->id.phys_id;
4002 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
4003 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4004 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
4007 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
4008 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
4011 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
4012 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
4016 DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
4017 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
4020 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4021 struct dlb2_hw_domain *domain,
4022 struct dlb2_ldb_port *port,
4023 uintptr_t cq_dma_base,
4024 struct dlb2_create_ldb_port_args *args,
4026 unsigned int vdev_id)
4032 /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4033 DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
4034 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
4036 reg = cq_dma_base >> 32;
4037 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
4040 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4041 * cache lines out-of-order (but QEs within a cache line are always
4042 * updated in-order).
4045 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
4047 !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4048 DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
4049 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
4051 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
4053 port->cq_depth = args->cq_depth;
4055 if (args->cq_depth <= 8) {
4057 } else if (args->cq_depth == 16) {
4059 } else if (args->cq_depth == 32) {
4061 } else if (args->cq_depth == 64) {
4063 } else if (args->cq_depth == 128) {
4065 } else if (args->cq_depth == 256) {
4067 } else if (args->cq_depth == 512) {
4069 } else if (args->cq_depth == 1024) {
4073 "[%s():%d] Internal error: invalid CQ depth\n",
4074 __func__, __LINE__);
4079 DLB2_BITS_SET(reg, ds,
4080 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4082 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4086 * To support CQs with depth less than 8, program the token count
4087 * register with a non-zero initial value. Operations such as domain
4088 * reset must take this initial value into account when quiescing the
4091 port->init_tkn_cnt = 0;
4093 if (args->cq_depth < 8) {
4095 port->init_tkn_cnt = 8 - args->cq_depth;
4099 DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
4101 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4105 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4106 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4110 DLB2_BITS_SET(reg, ds,
4111 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
4113 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4116 /* Reset the CQ write pointer */
4118 DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
4119 DLB2_CHP_LDB_CQ_WPTR_RST);
4123 port->hist_list_entry_limit - 1,
4124 DLB2_CHP_HIST_LIST_LIM_LIMIT);
4125 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
4127 DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
4128 DLB2_CHP_HIST_LIST_BASE_BASE);
4130 DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
4134 * The inflight limit sets a cap on the number of QEs for which this CQ
4135 * can owe completions at one time.
4138 DLB2_BITS_SET(reg, args->cq_history_list_size,
4139 DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
4140 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
4144 DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4145 DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
4146 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
4150 DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4151 DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
4152 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
4156 * Address translation (AT) settings: 0: untranslated, 2: translated
4157 * (see ATS spec regarding Address Type field for more details)
4160 if (hw->ver == DLB2_HW_V2) {
4162 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
4165 if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4167 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4168 DLB2_SYS_LDB_CQ_PASID_PASID);
4169 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
4172 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
4175 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
4176 DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
4178 /* Disable the port's QID mappings */
4180 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
4186 dlb2_cq_depth_is_valid(u32 depth)
4188 if (depth != 1 && depth != 2 &&
4189 depth != 4 && depth != 8 &&
4190 depth != 16 && depth != 32 &&
4191 depth != 64 && depth != 128 &&
4192 depth != 256 && depth != 512 &&
4199 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4200 struct dlb2_hw_domain *domain,
4201 struct dlb2_ldb_port *port,
4202 uintptr_t cq_dma_base,
4203 struct dlb2_create_ldb_port_args *args,
4205 unsigned int vdev_id)
4209 port->hist_list_entry_base = domain->hist_list_entry_base +
4210 domain->hist_list_entry_offset;
4211 port->hist_list_entry_limit = port->hist_list_entry_base +
4212 args->cq_history_list_size;
4214 domain->hist_list_entry_offset += args->cq_history_list_size;
4215 domain->avail_hist_list_entries -= args->cq_history_list_size;
4217 ret = dlb2_ldb_port_configure_cq(hw,
4227 dlb2_ldb_port_configure_pp(hw,
4233 dlb2_ldb_port_cq_enable(hw, port);
4235 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4236 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4237 port->num_mappings = 0;
4239 port->enabled = true;
4241 port->configured = true;
4247 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4249 uintptr_t cq_dma_base,
4250 struct dlb2_create_ldb_port_args *args,
4252 unsigned int vdev_id)
4254 DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4256 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4257 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
4259 DLB2_HW_DBG(hw, "\tCQ depth: %d\n",
4261 DLB2_HW_DBG(hw, "\tCQ hist list size: %d\n",
4262 args->cq_history_list_size);
4263 DLB2_HW_DBG(hw, "\tCQ base address: 0x%lx\n",
4265 DLB2_HW_DBG(hw, "\tCoS ID: %u\n", args->cos_id);
4266 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %u\n",
4271 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4273 uintptr_t cq_dma_base,
4274 struct dlb2_create_ldb_port_args *args,
4275 struct dlb2_cmd_response *resp,
4277 unsigned int vdev_id,
4278 struct dlb2_hw_domain **out_domain,
4279 struct dlb2_ldb_port **out_port,
4282 struct dlb2_hw_domain *domain;
4283 struct dlb2_ldb_port *port;
4286 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4289 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4293 if (!domain->configured) {
4294 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4298 if (domain->started) {
4299 resp->status = DLB2_ST_DOMAIN_STARTED;
4303 if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4304 resp->status = DLB2_ST_INVALID_COS_ID;
4308 if (args->cos_strict) {
4310 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4313 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4314 id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4316 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4324 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4328 /* Check cache-line alignment */
4329 if ((cq_dma_base & 0x3F) != 0) {
4330 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4334 if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4335 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4339 /* The history list size must be >= 1 */
4340 if (!args->cq_history_list_size) {
4341 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4345 if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4346 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4350 *out_domain = domain;
4358 * dlb2_hw_create_ldb_port() - create a load-balanced port
4359 * @hw: dlb2_hw handle for a particular device.
4360 * @domain_id: domain ID.
4361 * @args: port creation arguments.
4362 * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4363 * @resp: response structure.
4364 * @vdev_req: indicates whether this request came from a vdev.
4365 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4367 * This function creates a load-balanced port.
4369 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4373 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4374 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4375 * contains the port ID.
4377 * resp->id contains a virtual ID if vdev_req is true.
4380 * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4381 * pointer address is not properly aligned, the domain is not
4382 * configured, or the domain has already been started.
4383 * EFAULT - Internal error (resp->status not set).
4385 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4387 struct dlb2_create_ldb_port_args *args,
4388 uintptr_t cq_dma_base,
4389 struct dlb2_cmd_response *resp,
4391 unsigned int vdev_id)
4393 struct dlb2_hw_domain *domain;
4394 struct dlb2_ldb_port *port;
4397 dlb2_log_create_ldb_port_args(hw,
4405 * Verify that hardware resources are available before attempting to
4406 * satisfy the request. This simplifies the error unwinding code.
4408 ret = dlb2_verify_create_ldb_port_args(hw,
4421 ret = dlb2_configure_ldb_port(hw,
4432 * Configuration succeeded, so move the resource from the 'avail' to
4435 dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4437 dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4440 resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4446 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4448 uintptr_t cq_dma_base,
4449 struct dlb2_create_dir_port_args *args,
4451 unsigned int vdev_id)
4453 DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4455 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4456 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
4458 DLB2_HW_DBG(hw, "\tCQ depth: %d\n",
4460 DLB2_HW_DBG(hw, "\tCQ base address: 0x%lx\n",
4464 static struct dlb2_dir_pq_pair *
4465 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
4468 struct dlb2_hw_domain *domain)
4470 struct dlb2_list_entry *iter;
4471 struct dlb2_dir_pq_pair *port;
4474 if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
4477 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
4478 if ((!vdev_req && port->id.phys_id == id) ||
4479 (vdev_req && port->id.virt_id == id))
4487 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4489 uintptr_t cq_dma_base,
4490 struct dlb2_create_dir_port_args *args,
4491 struct dlb2_cmd_response *resp,
4493 unsigned int vdev_id,
4494 struct dlb2_hw_domain **out_domain,
4495 struct dlb2_dir_pq_pair **out_port)
4497 struct dlb2_hw_domain *domain;
4498 struct dlb2_dir_pq_pair *pq;
4500 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4503 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4507 if (!domain->configured) {
4508 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4512 if (domain->started) {
4513 resp->status = DLB2_ST_DOMAIN_STARTED;
4517 if (args->queue_id != -1) {
4519 * If the user claims the queue is already configured, validate
4520 * the queue ID, its domain, and whether the queue is
4523 pq = dlb2_get_domain_used_dir_pq(hw,
4528 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4529 !pq->queue_configured) {
4530 resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4535 * If the port's queue is not configured, validate that a free
4536 * port-queue pair is available.
4538 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4541 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4546 /* Check cache-line alignment */
4547 if ((cq_dma_base & 0x3F) != 0) {
4548 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4552 if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4553 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4557 *out_domain = domain;
4563 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4564 struct dlb2_hw_domain *domain,
4565 struct dlb2_dir_pq_pair *port,
4567 unsigned int vdev_id)
4571 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_DIR_PP2VAS_VAS);
4572 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), reg);
4579 * DLB uses producer port address bits 17:12 to determine the
4580 * producer port ID. In Scalable IOV mode, PP accesses come
4581 * through the PF MMIO window for the physical producer port,
4582 * so for translation purposes the virtual and physical port
4585 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4586 virt_id = port->id.virt_id;
4588 virt_id = port->id.phys_id;
4591 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_DIR_VPP2PP_PP);
4592 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
4593 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), reg);
4596 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_PP2VDEV_VDEV);
4597 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VDEV(port->id.phys_id), reg);
4600 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VPP_V_VPP_V);
4601 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), reg);
4605 DLB2_BIT_SET(reg, DLB2_SYS_DIR_PP_V_PP_V);
4606 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP_V(port->id.phys_id), reg);
4609 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4610 struct dlb2_hw_domain *domain,
4611 struct dlb2_dir_pq_pair *port,
4612 uintptr_t cq_dma_base,
4613 struct dlb2_create_dir_port_args *args,
4615 unsigned int vdev_id)
4620 /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4621 DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_DIR_CQ_ADDR_L_ADDR_L);
4622 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), reg);
4624 reg = cq_dma_base >> 32;
4625 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), reg);
4628 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4629 * cache lines out-of-order (but QEs within a cache line are always
4630 * updated in-order).
4633 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_CQ2VF_PF_RO_VF);
4634 DLB2_BITS_SET(reg, !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4635 DLB2_SYS_DIR_CQ2VF_PF_RO_IS_PF);
4636 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ2VF_PF_RO_RO);
4638 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), reg);
4640 if (args->cq_depth <= 8) {
4642 } else if (args->cq_depth == 16) {
4644 } else if (args->cq_depth == 32) {
4646 } else if (args->cq_depth == 64) {
4648 } else if (args->cq_depth == 128) {
4650 } else if (args->cq_depth == 256) {
4652 } else if (args->cq_depth == 512) {
4654 } else if (args->cq_depth == 1024) {
4658 "[%s():%d] Internal error: invalid CQ depth\n",
4659 __func__, __LINE__);
4664 DLB2_BITS_SET(reg, ds,
4665 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4667 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4671 * To support CQs with depth less than 8, program the token count
4672 * register with a non-zero initial value. Operations such as domain
4673 * reset must take this initial value into account when quiescing the
4676 port->init_tkn_cnt = 0;
4678 if (args->cq_depth < 8) {
4680 port->init_tkn_cnt = 8 - args->cq_depth;
4682 DLB2_BITS_SET(reg, port->init_tkn_cnt,
4683 DLB2_LSP_CQ_DIR_TKN_CNT_COUNT);
4685 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4689 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4690 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4694 DLB2_BITS_SET(reg, ds,
4695 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_TOKEN_DEPTH_SELECT_V2);
4697 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
4701 /* Reset the CQ write pointer */
4703 DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
4704 DLB2_CHP_DIR_CQ_WPTR_RST);
4706 /* Virtualize the PPID */
4708 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), reg);
4711 * Address translation (AT) settings: 0: untranslated, 2: translated
4712 * (see ATS spec regarding Address Type field for more details)
4714 if (hw->ver == DLB2_HW_V2) {
4716 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), reg);
4719 if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4720 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4721 DLB2_SYS_DIR_CQ_PASID_PASID);
4722 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ_PASID_FMT2);
4725 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id), reg);
4728 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_DIR_CQ2VAS_CQ2VAS);
4729 DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id), reg);
4734 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
4735 struct dlb2_hw_domain *domain,
4736 struct dlb2_dir_pq_pair *port,
4737 uintptr_t cq_dma_base,
4738 struct dlb2_create_dir_port_args *args,
4740 unsigned int vdev_id)
4744 ret = dlb2_dir_port_configure_cq(hw,
4755 dlb2_dir_port_configure_pp(hw,
4761 dlb2_dir_port_cq_enable(hw, port);
4763 port->enabled = true;
4765 port->port_configured = true;
4771 * dlb2_hw_create_dir_port() - create a directed port
4772 * @hw: dlb2_hw handle for a particular device.
4773 * @domain_id: domain ID.
4774 * @args: port creation arguments.
4775 * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4776 * @resp: response structure.
4777 * @vdev_req: indicates whether this request came from a vdev.
4778 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4780 * This function creates a directed port.
4782 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4786 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4787 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4788 * contains the port ID.
4790 * resp->id contains a virtual ID if vdev_req is true.
4793 * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4794 * pointer address is not properly aligned, the domain is not
4795 * configured, or the domain has already been started.
4796 * EFAULT - Internal error (resp->status not set).
4798 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
4800 struct dlb2_create_dir_port_args *args,
4801 uintptr_t cq_dma_base,
4802 struct dlb2_cmd_response *resp,
4804 unsigned int vdev_id)
4806 struct dlb2_dir_pq_pair *port;
4807 struct dlb2_hw_domain *domain;
4810 dlb2_log_create_dir_port_args(hw,
4818 * Verify that hardware resources are available before attempting to
4819 * satisfy the request. This simplifies the error unwinding code.
4821 ret = dlb2_verify_create_dir_port_args(hw,
4833 ret = dlb2_configure_dir_port(hw,
4844 * Configuration succeeded, so move the resource from the 'avail' to
4845 * the 'used' list (if it's not already there).
4847 if (args->queue_id == -1) {
4848 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
4850 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
4854 resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4859 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
4860 struct dlb2_hw_domain *domain,
4861 struct dlb2_dir_pq_pair *queue,
4862 struct dlb2_create_dir_queue_args *args,
4864 unsigned int vdev_id)
4869 /* QID write permissions are turned on when the domain is started */
4870 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4873 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), reg);
4875 /* Don't timestamp QEs that pass through this queue */
4876 DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_ITS(queue->id.phys_id), reg);
4879 DLB2_BITS_SET(reg, args->depth_threshold,
4880 DLB2_LSP_QID_DIR_DEPTH_THRSH_THRESH);
4882 DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver, queue->id.phys_id),
4886 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4890 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VQID_V_VQID_V);
4891 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), reg);
4894 DLB2_BITS_SET(reg, queue->id.phys_id,
4895 DLB2_SYS_VF_DIR_VQID2QID_QID);
4896 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), reg);
4900 DLB2_BIT_SET(reg, DLB2_SYS_DIR_QID_V_QID_V);
4901 DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), reg);
4903 queue->queue_configured = true;
4907 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
4909 struct dlb2_create_dir_queue_args *args,
4911 unsigned int vdev_id)
4913 DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
4915 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4916 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
4917 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
4921 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
4923 struct dlb2_create_dir_queue_args *args,
4924 struct dlb2_cmd_response *resp,
4926 unsigned int vdev_id,
4927 struct dlb2_hw_domain **out_domain,
4928 struct dlb2_dir_pq_pair **out_queue)
4930 struct dlb2_hw_domain *domain;
4931 struct dlb2_dir_pq_pair *pq;
4933 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4936 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4940 if (!domain->configured) {
4941 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4945 if (domain->started) {
4946 resp->status = DLB2_ST_DOMAIN_STARTED;
4951 * If the user claims the port is already configured, validate the port
4952 * ID, its domain, and whether the port is configured.
4954 if (args->port_id != -1) {
4955 pq = dlb2_get_domain_used_dir_pq(hw,
4960 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4961 !pq->port_configured) {
4962 resp->status = DLB2_ST_INVALID_PORT_ID;
4967 * If the queue's port is not configured, validate that a free
4968 * port-queue pair is available.
4970 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4973 resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
4978 *out_domain = domain;
4985 * dlb2_hw_create_dir_queue() - create a directed queue
4986 * @hw: dlb2_hw handle for a particular device.
4987 * @domain_id: domain ID.
4988 * @args: queue creation arguments.
4989 * @resp: response structure.
4990 * @vdev_req: indicates whether this request came from a vdev.
4991 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4993 * This function creates a directed queue.
4995 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4999 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5000 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5001 * contains the queue ID.
5003 * resp->id contains a virtual ID if vdev_req is true.
5006 * EINVAL - A requested resource is unavailable, the domain is not configured,
5007 * or the domain has already been started.
5008 * EFAULT - Internal error (resp->status not set).
5010 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5012 struct dlb2_create_dir_queue_args *args,
5013 struct dlb2_cmd_response *resp,
5015 unsigned int vdev_id)
5017 struct dlb2_dir_pq_pair *queue;
5018 struct dlb2_hw_domain *domain;
5021 dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5024 * Verify that hardware resources are available before attempting to
5025 * satisfy the request. This simplifies the error unwinding code.
5027 ret = dlb2_verify_create_dir_queue_args(hw,
5038 dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5041 * Configuration succeeded, so move the resource from the 'avail' to
5042 * the 'used' list (if it's not already there).
5044 if (args->port_id == -1) {
5045 dlb2_list_del(&domain->avail_dir_pq_pairs,
5046 &queue->domain_list);
5048 dlb2_list_add(&domain->used_dir_pq_pairs,
5049 &queue->domain_list);
5054 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5060 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5061 struct dlb2_ldb_queue *queue,
5066 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5067 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5069 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5070 map->pending_qid == queue->id.phys_id)
5076 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5079 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5080 struct dlb2_ldb_queue *queue,
5081 struct dlb2_cmd_response *resp)
5083 enum dlb2_qid_map_state state;
5086 /* Unused slot available? */
5087 if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5091 * If the queue is already mapped (from the application's perspective),
5092 * this is simply a priority update.
5094 state = DLB2_QUEUE_MAPPED;
5095 if (dlb2_port_find_slot_queue(port, state, queue, &i))
5098 state = DLB2_QUEUE_MAP_IN_PROG;
5099 if (dlb2_port_find_slot_queue(port, state, queue, &i))
5102 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5106 * If the slot contains an unmap in progress, it's considered
5109 state = DLB2_QUEUE_UNMAP_IN_PROG;
5110 if (dlb2_port_find_slot(port, state, &i))
5113 state = DLB2_QUEUE_UNMAPPED;
5114 if (dlb2_port_find_slot(port, state, &i))
5117 resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5121 static struct dlb2_ldb_queue *
5122 dlb2_get_domain_ldb_queue(u32 id,
5124 struct dlb2_hw_domain *domain)
5126 struct dlb2_list_entry *iter;
5127 struct dlb2_ldb_queue *queue;
5130 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5133 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
5134 if ((!vdev_req && queue->id.phys_id == id) ||
5135 (vdev_req && queue->id.virt_id == id))
5142 static struct dlb2_ldb_port *
5143 dlb2_get_domain_used_ldb_port(u32 id,
5145 struct dlb2_hw_domain *domain)
5147 struct dlb2_list_entry *iter;
5148 struct dlb2_ldb_port *port;
5152 if (id >= DLB2_MAX_NUM_LDB_PORTS)
5155 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5156 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
5157 if ((!vdev_req && port->id.phys_id == id) ||
5158 (vdev_req && port->id.virt_id == id))
5162 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter) {
5163 if ((!vdev_req && port->id.phys_id == id) ||
5164 (vdev_req && port->id.virt_id == id))
5172 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5173 struct dlb2_ldb_port *port,
5175 struct dlb2_map_qid_args *args)
5179 /* Read-modify-write the priority and valid bit register */
5180 cq2priov = DLB2_CSR_RD(hw,
5181 DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id));
5183 cq2priov |= (1 << (slot + DLB2_LSP_CQ2PRIOV_V_LOC)) &
5184 DLB2_LSP_CQ2PRIOV_V;
5185 cq2priov |= ((args->priority & 0x7) << slot * 3) &
5186 DLB2_LSP_CQ2PRIOV_PRIO;
5188 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), cq2priov);
5192 port->qid_map[slot].priority = args->priority;
5195 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5197 struct dlb2_map_qid_args *args,
5198 struct dlb2_cmd_response *resp,
5200 unsigned int vdev_id,
5201 struct dlb2_hw_domain **out_domain,
5202 struct dlb2_ldb_port **out_port,
5203 struct dlb2_ldb_queue **out_queue)
5205 struct dlb2_hw_domain *domain;
5206 struct dlb2_ldb_queue *queue;
5207 struct dlb2_ldb_port *port;
5210 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5213 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5217 if (!domain->configured) {
5218 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5224 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5226 if (!port || !port->configured) {
5227 resp->status = DLB2_ST_INVALID_PORT_ID;
5231 if (args->priority >= DLB2_QID_PRIORITIES) {
5232 resp->status = DLB2_ST_INVALID_PRIORITY;
5236 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5238 if (!queue || !queue->configured) {
5239 resp->status = DLB2_ST_INVALID_QID;
5243 if (queue->domain_id.phys_id != domain->id.phys_id) {
5244 resp->status = DLB2_ST_INVALID_QID;
5248 if (port->domain_id.phys_id != domain->id.phys_id) {
5249 resp->status = DLB2_ST_INVALID_PORT_ID;
5253 *out_domain = domain;
5260 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5262 struct dlb2_map_qid_args *args,
5264 unsigned int vdev_id)
5266 DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5268 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5269 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5271 DLB2_HW_DBG(hw, "\tPort ID: %d\n",
5273 DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
5275 DLB2_HW_DBG(hw, "\tPriority: %d\n",
5280 * dlb2_hw_map_qid() - map a load-balanced queue to a load-balanced port
5281 * @hw: dlb2_hw handle for a particular device.
5282 * @domain_id: domain ID.
5283 * @args: map QID arguments.
5284 * @resp: response structure.
5285 * @vdev_req: indicates whether this request came from a vdev.
5286 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5288 * This function configures the DLB to schedule QEs from the specified queue
5289 * to the specified port. Each load-balanced port can be mapped to up to 8
5290 * queues; each load-balanced queue can potentially map to all the
5291 * load-balanced ports.
5293 * A successful return does not necessarily mean the mapping was configured. If
5294 * this function is unable to immediately map the queue to the port, it will
5295 * add the requested operation to a per-port list of pending map/unmap
5296 * operations, and (if it's not already running) launch a kernel thread that
5297 * periodically attempts to process all pending operations. In a sense, this is
5298 * an asynchronous function.
5300 * This asynchronicity creates two views of the state of hardware: the actual
5301 * hardware state and the requested state (as if every request completed
5302 * immediately). If there are any pending map/unmap operations, the requested
5303 * state will differ from the actual state. All validation is performed with
5304 * respect to the pending state; for instance, if there are 8 pending map
5305 * operations for port X, a request for a 9th will fail because a load-balanced
5306 * port can only map up to 8 queues.
5308 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5312 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5313 * assigned a detailed error code from enum dlb2_error.
5316 * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5317 * the domain is not configured.
5318 * EFAULT - Internal error (resp->status not set).
5320 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5322 struct dlb2_map_qid_args *args,
5323 struct dlb2_cmd_response *resp,
5325 unsigned int vdev_id)
5327 struct dlb2_hw_domain *domain;
5328 struct dlb2_ldb_queue *queue;
5329 enum dlb2_qid_map_state st;
5330 struct dlb2_ldb_port *port;
5334 dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5337 * Verify that hardware resources are available before attempting to
5338 * satisfy the request. This simplifies the error unwinding code.
5340 ret = dlb2_verify_map_qid_args(hw,
5352 prio = args->priority;
5355 * If there are any outstanding detach operations for this port,
5356 * attempt to complete them. This may be necessary to free up a QID
5357 * slot for this requested mapping.
5359 if (port->num_pending_removals)
5360 dlb2_domain_finish_unmap_port(hw, domain, port);
5362 ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5366 /* Hardware requires disabling the CQ before mapping QIDs. */
5368 dlb2_ldb_port_cq_disable(hw, port);
5371 * If this is only a priority change, don't perform the full QID->CQ
5374 st = DLB2_QUEUE_MAPPED;
5375 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5376 if (prio != port->qid_map[i].priority) {
5377 dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5378 DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5381 st = DLB2_QUEUE_MAPPED;
5382 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5389 st = DLB2_QUEUE_UNMAP_IN_PROG;
5390 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5391 if (prio != port->qid_map[i].priority) {
5392 dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5393 DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5396 st = DLB2_QUEUE_MAPPED;
5397 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5405 * If this is a priority change on an in-progress mapping, don't
5406 * perform the full QID->CQ mapping procedure.
5408 st = DLB2_QUEUE_MAP_IN_PROG;
5409 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5410 port->qid_map[i].priority = prio;
5412 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5418 * If this is a priority change on a pending mapping, update the
5421 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5422 port->qid_map[i].pending_priority = prio;
5424 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5430 * If all the CQ's slots are in use, then there's an unmap in progress
5431 * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5432 * mapping to pending_map and return. When the removal is completed for
5433 * the slot's current occupant, this mapping will be performed.
5435 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5436 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5437 enum dlb2_qid_map_state new_st;
5439 port->qid_map[i].pending_qid = queue->id.phys_id;
5440 port->qid_map[i].pending_priority = prio;
5442 new_st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5444 ret = dlb2_port_slot_state_transition(hw, port, queue,
5449 DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5456 * If the domain has started, a special "dynamic" CQ->queue mapping
5457 * procedure is required in order to safely update the CQ<->QID tables.
5458 * The "static" procedure cannot be used when traffic is flowing,
5459 * because the CQ<->QID tables cannot be updated atomically and the
5460 * scheduler won't see the new mapping unless the queue's if_status
5461 * changes, which isn't guaranteed.
5463 ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5465 /* If ret is less than zero, it's due to an internal error */
5471 dlb2_ldb_port_cq_enable(hw, port);
5478 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5480 struct dlb2_unmap_qid_args *args,
5482 unsigned int vdev_id)
5484 DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5486 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5487 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5489 DLB2_HW_DBG(hw, "\tPort ID: %d\n",
5491 DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
5493 if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5494 DLB2_HW_DBG(hw, "\tQueue's num mappings: %d\n",
5495 hw->rsrcs.ldb_queues[args->qid].num_mappings);
5498 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5500 struct dlb2_unmap_qid_args *args,
5501 struct dlb2_cmd_response *resp,
5503 unsigned int vdev_id,
5504 struct dlb2_hw_domain **out_domain,
5505 struct dlb2_ldb_port **out_port,
5506 struct dlb2_ldb_queue **out_queue)
5508 enum dlb2_qid_map_state state;
5509 struct dlb2_hw_domain *domain;
5510 struct dlb2_ldb_queue *queue;
5511 struct dlb2_ldb_port *port;
5515 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5518 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5522 if (!domain->configured) {
5523 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5529 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5531 if (!port || !port->configured) {
5532 resp->status = DLB2_ST_INVALID_PORT_ID;
5536 if (port->domain_id.phys_id != domain->id.phys_id) {
5537 resp->status = DLB2_ST_INVALID_PORT_ID;
5541 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5543 if (!queue || !queue->configured) {
5544 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5545 __func__, args->qid);
5546 resp->status = DLB2_ST_INVALID_QID;
5551 * Verify that the port has the queue mapped. From the application's
5552 * perspective a queue is mapped if it is actually mapped, the map is
5553 * in progress, or the map is blocked pending an unmap.
5555 state = DLB2_QUEUE_MAPPED;
5556 if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5559 state = DLB2_QUEUE_MAP_IN_PROG;
5560 if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5563 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5566 resp->status = DLB2_ST_INVALID_QID;
5570 *out_domain = domain;
5578 * dlb2_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
5579 * @hw: dlb2_hw handle for a particular device.
5580 * @domain_id: domain ID.
5581 * @args: unmap QID arguments.
5582 * @resp: response structure.
5583 * @vdev_req: indicates whether this request came from a vdev.
5584 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5586 * This function configures the DLB to stop scheduling QEs from the specified
5587 * queue to the specified port.
5589 * A successful return does not necessarily mean the mapping was removed. If
5590 * this function is unable to immediately unmap the queue from the port, it
5591 * will add the requested operation to a per-port list of pending map/unmap
5592 * operations, and (if it's not already running) launch a kernel thread that
5593 * periodically attempts to process all pending operations. See
5594 * dlb2_hw_map_qid() for more details.
5596 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5600 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5601 * assigned a detailed error code from enum dlb2_error.
5604 * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5605 * the domain is not configured.
5606 * EFAULT - Internal error (resp->status not set).
5608 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5610 struct dlb2_unmap_qid_args *args,
5611 struct dlb2_cmd_response *resp,
5613 unsigned int vdev_id)
5615 struct dlb2_hw_domain *domain;
5616 struct dlb2_ldb_queue *queue;
5617 enum dlb2_qid_map_state st;
5618 struct dlb2_ldb_port *port;
5619 bool unmap_complete;
5622 dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5625 * Verify that hardware resources are available before attempting to
5626 * satisfy the request. This simplifies the error unwinding code.
5628 ret = dlb2_verify_unmap_qid_args(hw,
5641 * If the queue hasn't been mapped yet, we need to update the slot's
5642 * state and re-enable the queue's inflights.
5644 st = DLB2_QUEUE_MAP_IN_PROG;
5645 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5647 * Since the in-progress map was aborted, re-enable the QID's
5650 if (queue->num_pending_additions == 0)
5651 dlb2_ldb_queue_set_inflight_limit(hw, queue);
5653 st = DLB2_QUEUE_UNMAPPED;
5654 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5658 goto unmap_qid_done;
5662 * If the queue mapping is on hold pending an unmap, we simply need to
5663 * update the slot's state.
5665 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5666 st = DLB2_QUEUE_UNMAP_IN_PROG;
5667 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5671 goto unmap_qid_done;
5674 st = DLB2_QUEUE_MAPPED;
5675 if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5677 "[%s()] Internal error: no available CQ slots\n",
5683 * QID->CQ mapping removal is an asynchronous procedure. It requires
5684 * stopping the DLB2 from scheduling this CQ, draining all inflights
5685 * from the CQ, then unmapping the queue from the CQ. This function
5686 * simply marks the port as needing the queue unmapped, and (if
5687 * necessary) starts the unmapping worker thread.
5689 dlb2_ldb_port_cq_disable(hw, port);
5691 st = DLB2_QUEUE_UNMAP_IN_PROG;
5692 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5697 * Attempt to finish the unmapping now, in case the port has no
5698 * outstanding inflights. If that's not the case, this will fail and
5699 * the unmapping will be completed at a later time.
5701 unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
5704 * If the unmapping couldn't complete immediately, launch the worker
5705 * thread (if it isn't already launched) to finish it later.
5707 if (!unmap_complete && !os_worker_active(hw))
5708 os_schedule_work(hw);
5717 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
5718 struct dlb2_pending_port_unmaps_args *args,
5720 unsigned int vdev_id)
5722 DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
5724 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
5725 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
5729 * dlb2_hw_pending_port_unmaps() - returns the number of unmap operations in
5731 * @hw: dlb2_hw handle for a particular device.
5732 * @domain_id: domain ID.
5733 * @args: number of unmaps in progress args
5734 * @resp: response structure.
5735 * @vdev_req: indicates whether this request came from a vdev.
5736 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5739 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5740 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5741 * contains the number of unmaps in progress.
5744 * EINVAL - Invalid port ID.
5746 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
5748 struct dlb2_pending_port_unmaps_args *args,
5749 struct dlb2_cmd_response *resp,
5751 unsigned int vdev_id)
5753 struct dlb2_hw_domain *domain;
5754 struct dlb2_ldb_port *port;
5756 dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
5758 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5761 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5765 port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
5766 if (!port || !port->configured) {
5767 resp->status = DLB2_ST_INVALID_PORT_ID;
5771 resp->id = port->num_pending_removals;
5776 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
5778 struct dlb2_cmd_response *resp,
5780 unsigned int vdev_id,
5781 struct dlb2_hw_domain **out_domain)
5783 struct dlb2_hw_domain *domain;
5785 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5788 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5792 if (!domain->configured) {
5793 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5797 if (domain->started) {
5798 resp->status = DLB2_ST_DOMAIN_STARTED;
5802 *out_domain = domain;
5807 static void dlb2_log_start_domain(struct dlb2_hw *hw,
5810 unsigned int vdev_id)
5812 DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
5814 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5815 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5819 * dlb2_hw_start_domain() - start a scheduling domain
5820 * @hw: dlb2_hw handle for a particular device.
5821 * @domain_id: domain ID.
5822 * @arg: start domain arguments.
5823 * @resp: response structure.
5824 * @vdev_req: indicates whether this request came from a vdev.
5825 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5827 * This function starts a scheduling domain, which allows applications to send
5828 * traffic through it. Once a domain is started, its resources can no longer be
5829 * configured (besides QID remapping and port enable/disable).
5831 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5835 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5836 * assigned a detailed error code from enum dlb2_error.
5839 * EINVAL - the domain is not configured, or the domain is already started.
5842 dlb2_hw_start_domain(struct dlb2_hw *hw,
5844 struct dlb2_start_domain_args *args,
5845 struct dlb2_cmd_response *resp,
5847 unsigned int vdev_id)
5849 struct dlb2_list_entry *iter;
5850 struct dlb2_dir_pq_pair *dir_queue;
5851 struct dlb2_ldb_queue *ldb_queue;
5852 struct dlb2_hw_domain *domain;
5857 dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
5859 ret = dlb2_verify_start_domain_args(hw,
5869 * Enable load-balanced and directed queue write permissions for the
5870 * queues this domain owns. Without this, the DLB2 will drop all
5871 * incoming traffic to those queues.
5873 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
5877 DLB2_BIT_SET(vasqid_v, DLB2_SYS_LDB_VASQID_V_VASQID_V);
5879 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
5880 ldb_queue->id.phys_id;
5882 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), vasqid_v);
5885 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
5889 DLB2_BIT_SET(vasqid_v, DLB2_SYS_DIR_VASQID_V_VASQID_V);
5891 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
5892 dir_queue->id.phys_id;
5894 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), vasqid_v);
5899 domain->started = true;
5906 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
5912 DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
5914 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5915 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5916 DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5920 * dlb2_hw_get_dir_queue_depth() - returns the depth of a directed queue
5921 * @hw: dlb2_hw handle for a particular device.
5922 * @domain_id: domain ID.
5923 * @args: queue depth args
5924 * @resp: response structure.
5925 * @vdev_req: indicates whether this request came from a vdev.
5926 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5928 * This function returns the depth of a directed queue.
5930 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5934 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5935 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5936 * contains the depth.
5939 * EINVAL - Invalid domain ID or queue ID.
5941 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
5943 struct dlb2_get_dir_queue_depth_args *args,
5944 struct dlb2_cmd_response *resp,
5946 unsigned int vdev_id)
5948 struct dlb2_dir_pq_pair *queue;
5949 struct dlb2_hw_domain *domain;
5954 dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
5957 domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
5959 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5963 id = args->queue_id;
5965 queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
5967 resp->status = DLB2_ST_INVALID_QID;
5971 resp->id = dlb2_dir_queue_depth(hw, queue);
5976 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
5982 DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
5984 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5985 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5986 DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5990 * dlb2_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
5991 * @hw: dlb2_hw handle for a particular device.
5992 * @domain_id: domain ID.
5993 * @args: queue depth args
5994 * @resp: response structure.
5995 * @vdev_req: indicates whether this request came from a vdev.
5996 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5998 * This function returns the depth of a load-balanced queue.
6000 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
6004 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6005 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
6006 * contains the depth.
6009 * EINVAL - Invalid domain ID or queue ID.
6011 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
6013 struct dlb2_get_ldb_queue_depth_args *args,
6014 struct dlb2_cmd_response *resp,
6016 unsigned int vdev_id)
6018 struct dlb2_hw_domain *domain;
6019 struct dlb2_ldb_queue *queue;
6021 dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
6024 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6026 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6030 queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
6032 resp->status = DLB2_ST_INVALID_QID;
6036 resp->id = dlb2_ldb_queue_depth(hw, queue);
6042 * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures
6043 * @hw: dlb2_hw handle for a particular device.
6045 * This function attempts to finish any outstanding unmap procedures.
6046 * This function should be called by the kernel thread responsible for
6047 * finishing map/unmap procedures.
6050 * Returns the number of procedures that weren't completed.
6052 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
6056 /* Finish queue unmap jobs for any domain that needs it */
6057 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6058 struct dlb2_hw_domain *domain = &hw->domains[i];
6060 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
6067 * dlb2_finish_map_qid_procedures() - finish any pending map procedures
6068 * @hw: dlb2_hw handle for a particular device.
6070 * This function attempts to finish any outstanding map procedures.
6071 * This function should be called by the kernel thread responsible for
6072 * finishing map/unmap procedures.
6075 * Returns the number of procedures that weren't completed.
6077 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
6081 /* Finish queue map jobs for any domain that needs it */
6082 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6083 struct dlb2_hw_domain *domain = &hw->domains[i];
6085 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
6092 * dlb2_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports.
6093 * @hw: dlb2_hw handle for a particular device.
6095 * This function must be called prior to configuring scheduling domains.
6098 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
6102 ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6105 DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_DIR_CQ_MODE);
6107 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6111 * dlb2_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
6113 * @hw: dlb2_hw handle for a particular device.
6115 * This function must be called prior to configuring scheduling domains.
6117 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
6121 ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6124 DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_LDB_CQ_MODE);
6126 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6130 * dlb2_get_group_sequence_numbers() - return a group's number of SNs per queue
6131 * @hw: dlb2_hw handle for a particular device.
6132 * @group_id: sequence number group ID.
6134 * This function returns the configured number of sequence numbers per queue
6135 * for the specified group.
6138 * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6140 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, u32 group_id)
6142 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6145 return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
6149 * dlb2_get_group_sequence_number_occupancy() - return a group's in-use slots
6150 * @hw: dlb2_hw handle for a particular device.
6151 * @group_id: sequence number group ID.
6153 * This function returns the group's number of in-use slots (i.e. load-balanced
6154 * queues using the specified group).
6157 * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6159 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw, u32 group_id)
6161 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6164 return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
6167 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
6171 DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
6172 DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
6173 DLB2_HW_DBG(hw, "\tValue: %u\n", val);
6177 * dlb2_set_group_sequence_numbers() - assign a group's number of SNs per queue
6178 * @hw: dlb2_hw handle for a particular device.
6179 * @group_id: sequence number group ID.
6180 * @val: requested amount of sequence numbers per queue.
6182 * This function configures the group's number of sequence numbers per queue.
6183 * val can be a power-of-two between 32 and 1024, inclusive. This setting can
6184 * be configured until the first ordered load-balanced queue is configured, at
6185 * which point the configuration is locked.
6188 * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
6189 * ordered queue is configured.
6191 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
6195 const u32 valid_allocations[] = {64, 128, 256, 512, 1024};
6196 struct dlb2_sn_group *group;
6200 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6203 group = &hw->rsrcs.sn_groups[group_id];
6206 * Once the first load-balanced queue using an SN group is configured,
6207 * the group cannot be changed.
6209 if (group->slot_use_bitmap != 0)
6212 for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
6213 if (val == valid_allocations[mode])
6216 if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
6220 group->sequence_numbers_per_queue = val;
6222 DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[0].mode,
6223 DLB2_RO_GRP_SN_MODE_SN_MODE_0);
6224 DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[1].mode,
6225 DLB2_RO_GRP_SN_MODE_SN_MODE_1);
6227 DLB2_CSR_WR(hw, DLB2_RO_GRP_SN_MODE(hw->ver), sn_mode);
6229 dlb2_log_set_group_sequence_numbers(hw, group_id, val);