1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18 DLB2_LIST_HEAD((head), type, domain_list)
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21 DLB2_LIST_HEAD((head), type, func_list)
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24 DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27 DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
36 * The PF driver cannot assume that a register write will affect subsequent HCW
37 * writes. To ensure a write completes, the driver must read back a CSR. This
38 * function only need be called for configuration that can occur after the
39 * domain has started; prior to starting, applications can't send HCWs.
41 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
43 DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
46 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
50 dlb2_list_init_head(&domain->used_ldb_queues);
51 dlb2_list_init_head(&domain->used_dir_pq_pairs);
52 dlb2_list_init_head(&domain->avail_ldb_queues);
53 dlb2_list_init_head(&domain->avail_dir_pq_pairs);
55 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
56 dlb2_list_init_head(&domain->used_ldb_ports[i]);
57 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
58 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
61 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
64 dlb2_list_init_head(&rsrc->avail_domains);
65 dlb2_list_init_head(&rsrc->used_domains);
66 dlb2_list_init_head(&rsrc->avail_ldb_queues);
67 dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
69 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
70 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
74 * dlb2_resource_free() - free device state memory
75 * @hw: dlb2_hw handle for a particular device.
77 * This function frees software state pointed to by dlb2_hw. This function
78 * should be called when resetting the device or unloading the driver.
80 void dlb2_resource_free(struct dlb2_hw *hw)
84 if (hw->pf.avail_hist_list_entries)
85 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
87 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
88 if (hw->vdev[i].avail_hist_list_entries)
89 dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
94 * dlb2_resource_init() - initialize the device
95 * @hw: pointer to struct dlb2_hw.
96 * @ver: device version.
98 * This function initializes the device's software state (pointed to by the hw
99 * argument) and programs global scheduling QoS registers. This function should
100 * be called during driver initialization, and the dlb2_hw structure should
101 * be zero-initialized before calling the function.
103 * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
107 * Returns 0 upon success, <0 otherwise.
109 int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
111 struct dlb2_list_entry *list;
116 * For optimal load-balancing, ports that map to one or more QIDs in
117 * common should not be in numerical sequence. The port->QID mapping is
118 * application dependent, but the driver interleaves port IDs as much
119 * as possible to reduce the likelihood of sequential ports mapping to
120 * the same QID(s). This initial allocation of port IDs maximizes the
121 * average distance between an ID and its immediate neighbors (i.e.
122 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
125 const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
126 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,
127 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
128 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
129 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
134 dlb2_init_fn_rsrc_lists(&hw->pf);
136 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
137 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
139 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
140 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
141 hw->domains[i].parent_func = &hw->pf;
144 /* Give all resources to the PF driver */
145 hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
146 for (i = 0; i < hw->pf.num_avail_domains; i++) {
147 list = &hw->domains[i].func_list;
149 dlb2_list_add(&hw->pf.avail_domains, list);
152 hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
153 for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
154 list = &hw->rsrcs.ldb_queues[i].func_list;
156 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
159 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
160 hw->pf.num_avail_ldb_ports[i] =
161 DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
163 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
164 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
165 struct dlb2_ldb_port *port;
167 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
169 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
173 hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
174 for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
175 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
177 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
180 if (hw->ver == DLB2_HW_V2) {
181 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
182 hw->pf.num_avail_dqed_entries =
183 DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
185 hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
188 hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
190 ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
191 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
195 ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
199 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
200 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
201 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
205 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
210 /* Initialize the hardware resource IDs */
211 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
212 hw->domains[i].id.phys_id = i;
213 hw->domains[i].id.vdev_owned = false;
216 for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
217 hw->rsrcs.ldb_queues[i].id.phys_id = i;
218 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
221 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
222 hw->rsrcs.ldb_ports[i].id.phys_id = i;
223 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
226 for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
227 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
228 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
231 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
232 hw->rsrcs.sn_groups[i].id = i;
233 /* Default mode (0) is 64 sequence numbers per queue */
234 hw->rsrcs.sn_groups[i].mode = 0;
235 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
236 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
239 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
240 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
245 dlb2_resource_free(hw);
251 * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
252 * @hw: dlb2_hw handle for a particular device.
253 * @ver: device version.
255 * Clearing the PMCSR must be done at initialization to make the device fully
258 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
262 pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
264 DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
266 DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
270 * dlb2_hw_get_num_resources() - query the PCI function's available resources
271 * @hw: dlb2_hw handle for a particular device.
272 * @arg: pointer to resource counts.
273 * @vdev_req: indicates whether this request came from a vdev.
274 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
276 * This function returns the number of available resources for the PF or for a
279 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
283 * Returns 0 upon success, -EINVAL if vdev_req is true and vdev_id is
286 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
287 struct dlb2_get_num_resources_args *arg,
289 unsigned int vdev_id)
291 struct dlb2_function_resources *rsrcs;
292 struct dlb2_bitmap *map;
295 if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
299 rsrcs = &hw->vdev[vdev_id];
303 arg->num_sched_domains = rsrcs->num_avail_domains;
305 arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
307 arg->num_ldb_ports = 0;
308 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
309 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
311 arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
312 arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
313 arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
314 arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
316 arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
318 arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
320 map = rsrcs->avail_hist_list_entries;
322 arg->num_hist_list_entries = dlb2_bitmap_count(map);
324 arg->max_contiguous_hist_list_entries =
325 dlb2_bitmap_longest_set_range(map);
327 if (hw->ver == DLB2_HW_V2) {
328 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
329 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
331 arg->num_credits = rsrcs->num_avail_entries;
336 static void dlb2_configure_domain_credits_v2_5(struct dlb2_hw *hw,
337 struct dlb2_hw_domain *domain)
341 DLB2_BITS_SET(reg, domain->num_credits, DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
342 DLB2_CSR_WR(hw, DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id), reg);
345 static void dlb2_configure_domain_credits_v2(struct dlb2_hw *hw,
346 struct dlb2_hw_domain *domain)
350 DLB2_BITS_SET(reg, domain->num_ldb_credits,
351 DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
352 DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), reg);
355 DLB2_BITS_SET(reg, domain->num_dir_credits,
356 DLB2_CHP_CFG_DIR_VAS_CRD_COUNT);
357 DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), reg);
360 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
361 struct dlb2_hw_domain *domain)
363 if (hw->ver == DLB2_HW_V2)
364 dlb2_configure_domain_credits_v2(hw, domain);
366 dlb2_configure_domain_credits_v2_5(hw, domain);
369 static int dlb2_attach_credits(struct dlb2_function_resources *rsrcs,
370 struct dlb2_hw_domain *domain,
372 struct dlb2_cmd_response *resp)
374 if (rsrcs->num_avail_entries < num_credits) {
375 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
379 rsrcs->num_avail_entries -= num_credits;
380 domain->num_credits += num_credits;
384 static struct dlb2_ldb_port *
385 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
386 struct dlb2_function_resources *rsrcs,
390 struct dlb2_list_entry *iter;
391 struct dlb2_ldb_port *port;
395 * To reduce the odds of consecutive load-balanced ports mapping to the
396 * same queue(s), the driver attempts to allocate ports whose neighbors
397 * are owned by a different domain.
399 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
403 phys_id = port->id.phys_id;
407 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
410 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
412 if (!hw->rsrcs.ldb_ports[next].owned ||
413 hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
416 if (!hw->rsrcs.ldb_ports[prev].owned ||
417 hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
424 * Failing that, the driver looks for a port with one neighbor owned by
425 * a different domain and the other unallocated.
427 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
431 phys_id = port->id.phys_id;
435 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
438 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
440 if (!hw->rsrcs.ldb_ports[prev].owned &&
441 hw->rsrcs.ldb_ports[next].owned &&
442 hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
445 if (!hw->rsrcs.ldb_ports[next].owned &&
446 hw->rsrcs.ldb_ports[prev].owned &&
447 hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
452 * Failing that, the driver looks for a port with both neighbors
455 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
459 phys_id = port->id.phys_id;
463 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
466 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
468 if (!hw->rsrcs.ldb_ports[prev].owned &&
469 !hw->rsrcs.ldb_ports[next].owned)
473 /* If all else fails, the driver returns the next available port. */
474 return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
478 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
479 struct dlb2_function_resources *rsrcs,
480 struct dlb2_hw_domain *domain,
483 struct dlb2_cmd_response *resp)
487 if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
488 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
492 for (i = 0; i < num_ports; i++) {
493 struct dlb2_ldb_port *port;
495 port = dlb2_get_next_ldb_port(hw, rsrcs,
496 domain->id.phys_id, cos_id);
499 "[%s()] Internal error: domain validation failed\n",
504 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
507 port->domain_id = domain->id;
510 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
514 rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
520 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
521 struct dlb2_function_resources *rsrcs,
522 struct dlb2_hw_domain *domain,
523 struct dlb2_create_sched_domain_args *args,
524 struct dlb2_cmd_response *resp)
529 if (args->cos_strict) {
530 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
531 u32 num = args->num_cos_ldb_ports[i];
533 /* Allocate ports from specific classes-of-service */
534 ret = __dlb2_attach_ldb_ports(hw,
548 * Attempt to allocate from specific class-of-service, but
549 * fallback to the other classes if that fails.
551 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
552 for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
553 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
554 cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
556 ret = __dlb2_attach_ldb_ports(hw,
572 /* Allocate num_ldb_ports from any class-of-service */
573 for (i = 0; i < args->num_ldb_ports; i++) {
574 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
575 ret = __dlb2_attach_ldb_ports(hw,
592 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
593 struct dlb2_function_resources *rsrcs,
594 struct dlb2_hw_domain *domain,
596 struct dlb2_cmd_response *resp)
600 if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
601 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
605 for (i = 0; i < num_ports; i++) {
606 struct dlb2_dir_pq_pair *port;
608 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
612 "[%s()] Internal error: domain validation failed\n",
617 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
619 port->domain_id = domain->id;
622 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
625 rsrcs->num_avail_dir_pq_pairs -= num_ports;
630 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
631 struct dlb2_hw_domain *domain,
633 struct dlb2_cmd_response *resp)
635 if (rsrcs->num_avail_qed_entries < num_credits) {
636 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
640 rsrcs->num_avail_qed_entries -= num_credits;
641 domain->num_ldb_credits += num_credits;
645 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
646 struct dlb2_hw_domain *domain,
648 struct dlb2_cmd_response *resp)
650 if (rsrcs->num_avail_dqed_entries < num_credits) {
651 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
655 rsrcs->num_avail_dqed_entries -= num_credits;
656 domain->num_dir_credits += num_credits;
661 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
662 struct dlb2_hw_domain *domain,
663 u32 num_atomic_inflights,
664 struct dlb2_cmd_response *resp)
666 if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
667 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
671 rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
672 domain->num_avail_aqed_entries += num_atomic_inflights;
677 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
678 struct dlb2_hw_domain *domain,
679 u32 num_hist_list_entries,
680 struct dlb2_cmd_response *resp)
682 struct dlb2_bitmap *bitmap;
685 if (num_hist_list_entries) {
686 bitmap = rsrcs->avail_hist_list_entries;
688 base = dlb2_bitmap_find_set_bit_range(bitmap,
689 num_hist_list_entries);
693 domain->total_hist_list_entries = num_hist_list_entries;
694 domain->avail_hist_list_entries = num_hist_list_entries;
695 domain->hist_list_entry_base = base;
696 domain->hist_list_entry_offset = 0;
698 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
703 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
707 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
708 struct dlb2_function_resources *rsrcs,
709 struct dlb2_hw_domain *domain,
711 struct dlb2_cmd_response *resp)
715 if (rsrcs->num_avail_ldb_queues < num_queues) {
716 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
720 for (i = 0; i < num_queues; i++) {
721 struct dlb2_ldb_queue *queue;
723 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
727 "[%s()] Internal error: domain validation failed\n",
732 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
734 queue->domain_id = domain->id;
737 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
740 rsrcs->num_avail_ldb_queues -= num_queues;
746 dlb2_domain_attach_resources(struct dlb2_hw *hw,
747 struct dlb2_function_resources *rsrcs,
748 struct dlb2_hw_domain *domain,
749 struct dlb2_create_sched_domain_args *args,
750 struct dlb2_cmd_response *resp)
754 ret = dlb2_attach_ldb_queues(hw,
757 args->num_ldb_queues,
762 ret = dlb2_attach_ldb_ports(hw,
770 ret = dlb2_attach_dir_ports(hw,
778 if (hw->ver == DLB2_HW_V2) {
779 ret = dlb2_attach_ldb_credits(rsrcs,
781 args->num_ldb_credits,
786 ret = dlb2_attach_dir_credits(rsrcs,
788 args->num_dir_credits,
792 } else { /* DLB 2.5 */
793 ret = dlb2_attach_credits(rsrcs,
801 ret = dlb2_attach_domain_hist_list_entries(rsrcs,
803 args->num_hist_list_entries,
808 ret = dlb2_attach_atomic_inflights(rsrcs,
810 args->num_atomic_inflights,
815 dlb2_configure_domain_credits(hw, domain);
817 domain->configured = true;
819 domain->started = false;
821 rsrcs->num_avail_domains--;
827 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
828 struct dlb2_create_sched_domain_args *args,
829 struct dlb2_cmd_response *resp,
831 struct dlb2_hw_domain **out_domain)
833 u32 num_avail_ldb_ports, req_ldb_ports;
834 struct dlb2_bitmap *avail_hl_entries;
835 unsigned int max_contig_hl_range;
836 struct dlb2_hw_domain *domain;
839 avail_hl_entries = rsrcs->avail_hist_list_entries;
841 max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
843 num_avail_ldb_ports = 0;
845 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
846 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
848 req_ldb_ports += args->num_cos_ldb_ports[i];
851 req_ldb_ports += args->num_ldb_ports;
853 if (rsrcs->num_avail_domains < 1) {
854 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
858 domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
859 if (domain == NULL) {
860 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
864 if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
865 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
869 if (req_ldb_ports > num_avail_ldb_ports) {
870 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
874 for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
875 if (args->num_cos_ldb_ports[i] >
876 rsrcs->num_avail_ldb_ports[i]) {
877 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
882 if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
883 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
887 if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
888 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
891 if (hw->ver == DLB2_HW_V2_5) {
892 if (rsrcs->num_avail_entries < args->num_credits) {
893 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
897 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
898 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
901 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
902 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
907 if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
908 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
912 if (max_contig_hl_range < args->num_hist_list_entries) {
913 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
917 *out_domain = domain;
923 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
924 struct dlb2_create_sched_domain_args *args,
926 unsigned int vdev_id)
928 DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
930 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
931 DLB2_HW_DBG(hw, "\tNumber of LDB queues: %d\n",
932 args->num_ldb_queues);
933 DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
934 args->num_ldb_ports);
935 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0): %d\n",
936 args->num_cos_ldb_ports[0]);
937 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1): %d\n",
938 args->num_cos_ldb_ports[1]);
939 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2): %d\n",
940 args->num_cos_ldb_ports[2]);
941 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3): %d\n",
942 args->num_cos_ldb_ports[3]);
943 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %d\n",
945 DLB2_HW_DBG(hw, "\tNumber of DIR ports: %d\n",
946 args->num_dir_ports);
947 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
948 args->num_atomic_inflights);
949 DLB2_HW_DBG(hw, "\tNumber of hist list entries: %d\n",
950 args->num_hist_list_entries);
951 if (hw->ver == DLB2_HW_V2) {
952 DLB2_HW_DBG(hw, "\tNumber of LDB credits: %d\n",
953 args->num_ldb_credits);
954 DLB2_HW_DBG(hw, "\tNumber of DIR credits: %d\n",
955 args->num_dir_credits);
957 DLB2_HW_DBG(hw, "\tNumber of credits: %d\n",
963 * dlb2_hw_create_sched_domain() - create a scheduling domain
964 * @hw: dlb2_hw handle for a particular device.
965 * @args: scheduling domain creation arguments.
966 * @resp: response structure.
967 * @vdev_req: indicates whether this request came from a vdev.
968 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
970 * This function creates a scheduling domain containing the resources specified
971 * in args. The individual resources (queues, ports, credits) can be configured
972 * after creating a scheduling domain.
974 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
978 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
979 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
980 * contains the domain ID.
982 * resp->id contains a virtual ID if vdev_req is true.
985 * EINVAL - A requested resource is unavailable, or the requested domain name
987 * EFAULT - Internal error (resp->status not set).
989 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
990 struct dlb2_create_sched_domain_args *args,
991 struct dlb2_cmd_response *resp,
993 unsigned int vdev_id)
995 struct dlb2_function_resources *rsrcs;
996 struct dlb2_hw_domain *domain;
999 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1001 dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
1004 * Verify that hardware resources are available before attempting to
1005 * satisfy the request. This simplifies the error unwinding code.
1007 ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp, hw, &domain);
1011 dlb2_init_domain_rsrc_lists(domain);
1013 ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
1016 "[%s()] Internal error: failed to verify args.\n",
1022 dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
1024 dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
1026 resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
1032 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
1033 struct dlb2_dir_pq_pair *port)
1037 DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
1038 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1043 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
1044 struct dlb2_dir_pq_pair *port)
1048 cnt = DLB2_CSR_RD(hw,
1049 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
1052 * Account for the initial token count, which is used in order to
1053 * provide a CQ with depth less than 8.
1056 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
1060 static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
1061 struct dlb2_dir_pq_pair *port)
1063 unsigned int port_id = port->id.phys_id;
1066 /* Return any outstanding tokens */
1067 cnt = dlb2_dir_cq_token_count(hw, port);
1070 struct dlb2_hcw hcw_mem[8], *hcw;
1071 void __iomem *pp_addr;
1073 pp_addr = os_map_producer_port(hw, port_id, false);
1075 /* Point hcw to a 64B-aligned location */
1076 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1079 * Program the first HCW for a batch token return and
1082 memset(hcw, 0, 4 * sizeof(*hcw));
1084 hcw->lock_id = cnt - 1;
1086 dlb2_movdir64b(pp_addr, hcw);
1088 os_fence_hcw(hw, pp_addr);
1090 os_unmap_producer_port(hw, pp_addr);
1096 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1097 struct dlb2_dir_pq_pair *port)
1101 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1106 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1107 struct dlb2_hw_domain *domain,
1110 struct dlb2_list_entry *iter;
1111 struct dlb2_dir_pq_pair *port;
1115 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1117 * Can't drain a port if it's not configured, and there's
1118 * nothing to drain if its queue is unconfigured.
1120 if (!port->port_configured || !port->queue_configured)
1124 dlb2_dir_port_cq_disable(hw, port);
1126 drain_cnt = dlb2_drain_dir_cq(hw, port);
1129 dlb2_dir_port_cq_enable(hw, port);
1135 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1136 struct dlb2_dir_pq_pair *queue)
1140 cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
1141 queue->id.phys_id));
1143 return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
1146 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1147 struct dlb2_dir_pq_pair *queue)
1149 return dlb2_dir_queue_depth(hw, queue) == 0;
1152 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1153 struct dlb2_hw_domain *domain)
1155 struct dlb2_list_entry *iter;
1156 struct dlb2_dir_pq_pair *queue;
1159 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1160 if (!dlb2_dir_queue_is_empty(hw, queue))
1166 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1167 struct dlb2_hw_domain *domain)
1171 /* If the domain hasn't been started, there's no traffic to drain */
1172 if (!domain->started)
1175 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1178 drain_cnt = dlb2_domain_drain_dir_cqs(hw, domain, false);
1180 if (dlb2_domain_dir_queues_empty(hw, domain))
1184 * Allow time for DLB to schedule QEs before draining
1192 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1194 "[%s()] Internal error: failed to empty queues\n",
1200 * Drain the CQs one more time. For the queues to go empty, they would
1201 * have scheduled one or more QEs.
1203 dlb2_domain_drain_dir_cqs(hw, domain, true);
1208 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1209 struct dlb2_ldb_port *port)
1214 * Don't re-enable the port if a removal is pending. The caller should
1215 * mark this port as enabled (if it isn't already), and when the
1216 * removal completes the port will be enabled.
1218 if (port->num_pending_removals)
1221 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1226 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1227 struct dlb2_ldb_port *port)
1231 DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
1232 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1237 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1238 struct dlb2_ldb_port *port)
1242 cnt = DLB2_CSR_RD(hw,
1243 DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
1245 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
1248 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1249 struct dlb2_ldb_port *port)
1253 cnt = DLB2_CSR_RD(hw,
1254 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
1257 * Account for the initial token count, which is used in order to
1258 * provide a CQ with depth less than 8.
1261 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
1265 static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1267 u32 infl_cnt, tkn_cnt;
1270 infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1271 tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1273 if (infl_cnt || tkn_cnt) {
1274 struct dlb2_hcw hcw_mem[8], *hcw;
1275 void __iomem *pp_addr;
1277 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1279 /* Point hcw to a 64B-aligned location */
1280 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1283 * Program the first HCW for a completion and token return and
1284 * the other HCWs as NOOPS
1287 memset(hcw, 0, 4 * sizeof(*hcw));
1288 hcw->qe_comp = (infl_cnt > 0);
1289 hcw->cq_token = (tkn_cnt > 0);
1290 hcw->lock_id = tkn_cnt - 1;
1292 /* Return tokens in the first HCW */
1293 dlb2_movdir64b(pp_addr, hcw);
1297 /* Issue remaining completions (if any) */
1298 for (i = 1; i < infl_cnt; i++)
1299 dlb2_movdir64b(pp_addr, hcw);
1301 os_fence_hcw(hw, pp_addr);
1303 os_unmap_producer_port(hw, pp_addr);
1309 static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1310 struct dlb2_hw_domain *domain,
1313 struct dlb2_list_entry *iter;
1314 struct dlb2_ldb_port *port;
1319 /* If the domain hasn't been started, there's no traffic to drain */
1320 if (!domain->started)
1323 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1324 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1326 dlb2_ldb_port_cq_disable(hw, port);
1328 drain_cnt = dlb2_drain_ldb_cq(hw, port);
1331 dlb2_ldb_port_cq_enable(hw, port);
1338 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1339 struct dlb2_ldb_queue *queue)
1343 aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1344 queue->id.phys_id));
1345 ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1346 queue->id.phys_id));
1347 atm = DLB2_CSR_RD(hw,
1348 DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
1350 return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
1351 + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
1352 + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
1355 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1356 struct dlb2_ldb_queue *queue)
1358 return dlb2_ldb_queue_depth(hw, queue) == 0;
1361 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1362 struct dlb2_hw_domain *domain)
1364 struct dlb2_list_entry *iter;
1365 struct dlb2_ldb_queue *queue;
1368 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1369 if (queue->num_mappings == 0)
1372 if (!dlb2_ldb_queue_is_empty(hw, queue))
1379 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1380 struct dlb2_hw_domain *domain)
1384 /* If the domain hasn't been started, there's no traffic to drain */
1385 if (!domain->started)
1388 if (domain->num_pending_removals > 0) {
1390 "[%s()] Internal error: failed to unmap domain queues\n",
1395 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1398 drain_cnt = dlb2_domain_drain_ldb_cqs(hw, domain, false);
1400 if (dlb2_domain_mapped_queues_empty(hw, domain))
1404 * Allow time for DLB to schedule QEs before draining
1411 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1413 "[%s()] Internal error: failed to empty queues\n",
1419 * Drain the CQs one more time. For the queues to go empty, they would
1420 * have scheduled one or more QEs.
1422 dlb2_domain_drain_ldb_cqs(hw, domain, true);
1427 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1428 struct dlb2_hw_domain *domain)
1430 struct dlb2_list_entry *iter;
1431 struct dlb2_ldb_port *port;
1435 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1436 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1437 port->enabled = true;
1439 dlb2_ldb_port_cq_enable(hw, port);
1444 static struct dlb2_ldb_queue *
1445 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1448 unsigned int vdev_id)
1450 struct dlb2_list_entry *iter1;
1451 struct dlb2_list_entry *iter2;
1452 struct dlb2_function_resources *rsrcs;
1453 struct dlb2_hw_domain *domain;
1454 struct dlb2_ldb_queue *queue;
1455 RTE_SET_USED(iter1);
1456 RTE_SET_USED(iter2);
1458 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1461 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1464 return &hw->rsrcs.ldb_queues[id];
1466 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1467 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
1468 if (queue->id.virt_id == id)
1473 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
1474 if (queue->id.virt_id == id)
1481 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1484 unsigned int vdev_id)
1486 struct dlb2_list_entry *iteration;
1487 struct dlb2_function_resources *rsrcs;
1488 struct dlb2_hw_domain *domain;
1489 RTE_SET_USED(iteration);
1491 if (id >= DLB2_MAX_NUM_DOMAINS)
1495 return &hw->domains[id];
1497 rsrcs = &hw->vdev[vdev_id];
1499 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
1500 if (domain->id.virt_id == id)
1507 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1508 struct dlb2_ldb_port *port,
1509 struct dlb2_ldb_queue *queue,
1511 enum dlb2_qid_map_state new_state)
1513 enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1514 struct dlb2_hw_domain *domain;
1517 domain_id = port->domain_id.phys_id;
1519 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1520 if (domain == NULL) {
1522 "[%s()] Internal error: unable to find domain %d\n",
1523 __func__, domain_id);
1527 switch (curr_state) {
1528 case DLB2_QUEUE_UNMAPPED:
1529 switch (new_state) {
1530 case DLB2_QUEUE_MAPPED:
1531 queue->num_mappings++;
1532 port->num_mappings++;
1534 case DLB2_QUEUE_MAP_IN_PROG:
1535 queue->num_pending_additions++;
1536 domain->num_pending_additions++;
1542 case DLB2_QUEUE_MAPPED:
1543 switch (new_state) {
1544 case DLB2_QUEUE_UNMAPPED:
1545 queue->num_mappings--;
1546 port->num_mappings--;
1548 case DLB2_QUEUE_UNMAP_IN_PROG:
1549 port->num_pending_removals++;
1550 domain->num_pending_removals++;
1552 case DLB2_QUEUE_MAPPED:
1553 /* Priority change, nothing to update */
1559 case DLB2_QUEUE_MAP_IN_PROG:
1560 switch (new_state) {
1561 case DLB2_QUEUE_UNMAPPED:
1562 queue->num_pending_additions--;
1563 domain->num_pending_additions--;
1565 case DLB2_QUEUE_MAPPED:
1566 queue->num_mappings++;
1567 port->num_mappings++;
1568 queue->num_pending_additions--;
1569 domain->num_pending_additions--;
1575 case DLB2_QUEUE_UNMAP_IN_PROG:
1576 switch (new_state) {
1577 case DLB2_QUEUE_UNMAPPED:
1578 port->num_pending_removals--;
1579 domain->num_pending_removals--;
1580 queue->num_mappings--;
1581 port->num_mappings--;
1583 case DLB2_QUEUE_MAPPED:
1584 port->num_pending_removals--;
1585 domain->num_pending_removals--;
1587 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1588 /* Nothing to update */
1594 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1595 switch (new_state) {
1596 case DLB2_QUEUE_UNMAP_IN_PROG:
1597 /* Nothing to update */
1599 case DLB2_QUEUE_UNMAPPED:
1601 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1602 * becomes UNMAPPED before it transitions to
1605 queue->num_mappings--;
1606 port->num_mappings--;
1607 port->num_pending_removals--;
1608 domain->num_pending_removals--;
1618 port->qid_map[slot].state = new_state;
1621 "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1622 __func__, queue->id.phys_id, port->id.phys_id,
1623 curr_state, new_state);
1628 "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1629 __func__, queue->id.phys_id, port->id.phys_id,
1630 curr_state, new_state);
1634 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1635 enum dlb2_qid_map_state state,
1640 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1641 if (port->qid_map[i].state == state)
1647 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1650 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1651 enum dlb2_qid_map_state state,
1652 struct dlb2_ldb_queue *queue,
1657 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1658 if (port->qid_map[i].state == state &&
1659 port->qid_map[i].qid == queue->id.phys_id)
1665 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1669 * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1670 * their function names imply, and should only be called by the dynamic CQ
1673 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1674 struct dlb2_hw_domain *domain,
1675 struct dlb2_ldb_queue *queue)
1677 struct dlb2_list_entry *iter;
1678 struct dlb2_ldb_port *port;
1682 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1683 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1684 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1686 if (!dlb2_port_find_slot_queue(port, state,
1691 dlb2_ldb_port_cq_disable(hw, port);
1696 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1697 struct dlb2_hw_domain *domain,
1698 struct dlb2_ldb_queue *queue)
1700 struct dlb2_list_entry *iter;
1701 struct dlb2_ldb_port *port;
1705 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1706 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1707 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1709 if (!dlb2_port_find_slot_queue(port, state,
1714 dlb2_ldb_port_cq_enable(hw, port);
1719 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1720 struct dlb2_ldb_port *port,
1725 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1726 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1727 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1729 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1734 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1735 struct dlb2_ldb_port *port,
1740 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1741 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1742 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1743 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1745 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1750 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1751 struct dlb2_ldb_port *p,
1752 struct dlb2_ldb_queue *q,
1755 enum dlb2_qid_map_state state;
1763 /* Look for a pending or already mapped slot, else an unused slot */
1764 if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1765 !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1766 !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1768 "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1769 __func__, __LINE__);
1773 /* Read-modify-write the priority and valid bit register */
1774 cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
1776 cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
1777 cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
1778 & DLB2_LSP_CQ2PRIOV_PRIO;
1780 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
1782 /* Read-modify-write the QID map register */
1784 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
1787 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
1790 if (i == 0 || i == 4)
1791 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
1792 if (i == 1 || i == 5)
1793 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
1794 if (i == 2 || i == 6)
1795 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
1796 if (i == 3 || i == 7)
1797 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
1801 DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
1804 DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
1806 atm_qid2cq = DLB2_CSR_RD(hw,
1807 DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1808 p->id.phys_id / 4));
1810 lsp_qid2cq = DLB2_CSR_RD(hw,
1811 DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
1812 p->id.phys_id / 4));
1814 lsp_qid2cq2 = DLB2_CSR_RD(hw,
1815 DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
1816 p->id.phys_id / 4));
1818 switch (p->id.phys_id % 4) {
1820 DLB2_BIT_SET(atm_qid2cq,
1821 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
1822 DLB2_BIT_SET(lsp_qid2cq,
1823 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
1824 DLB2_BIT_SET(lsp_qid2cq2,
1825 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
1829 DLB2_BIT_SET(atm_qid2cq,
1830 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
1831 DLB2_BIT_SET(lsp_qid2cq,
1832 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
1833 DLB2_BIT_SET(lsp_qid2cq2,
1834 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
1838 DLB2_BIT_SET(atm_qid2cq,
1839 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
1840 DLB2_BIT_SET(lsp_qid2cq,
1841 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
1842 DLB2_BIT_SET(lsp_qid2cq2,
1843 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
1847 DLB2_BIT_SET(atm_qid2cq,
1848 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
1849 DLB2_BIT_SET(lsp_qid2cq,
1850 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
1851 DLB2_BIT_SET(lsp_qid2cq2,
1852 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
1857 DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1861 DLB2_LSP_QID2CQIDIX(hw->ver,
1862 q->id.phys_id, p->id.phys_id / 4),
1866 DLB2_LSP_QID2CQIDIX2(hw->ver,
1867 q->id.phys_id, p->id.phys_id / 4),
1872 p->qid_map[i].qid = q->id.phys_id;
1873 p->qid_map[i].priority = priority;
1875 state = DLB2_QUEUE_MAPPED;
1877 return dlb2_port_slot_state_transition(hw, p, q, i, state);
1880 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1881 struct dlb2_ldb_port *port,
1882 struct dlb2_ldb_queue *queue,
1889 /* Set the atomic scheduling haswork bit */
1890 active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1891 queue->id.phys_id));
1893 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1894 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1895 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1897 DLB2_BITS_GET(active,
1898 DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
1899 DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1901 /* Set the non-atomic scheduling haswork bit */
1902 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1904 enq = DLB2_CSR_RD(hw,
1905 DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1906 queue->id.phys_id));
1908 memset(&ctrl, 0, sizeof(ctrl));
1910 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1911 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1912 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1915 DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
1916 DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1918 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1925 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1926 struct dlb2_ldb_port *port,
1931 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1932 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1933 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1935 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1937 memset(&ctrl, 0, sizeof(ctrl));
1939 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1940 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1941 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1943 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1949 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1950 struct dlb2_ldb_queue *queue)
1954 DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
1955 DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
1957 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1961 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1962 struct dlb2_ldb_queue *queue)
1965 DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1966 DLB2_LSP_QID_LDB_INFL_LIM_RST);
1969 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1970 struct dlb2_hw_domain *domain,
1971 struct dlb2_ldb_port *port,
1972 struct dlb2_ldb_queue *queue)
1974 struct dlb2_list_entry *iter;
1975 enum dlb2_qid_map_state state;
1981 infl_cnt = DLB2_CSR_RD(hw,
1982 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
1983 queue->id.phys_id));
1985 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
1987 "[%s()] Internal error: non-zero QID inflight count\n",
1993 * Static map the port and set its corresponding has_work bits.
1995 state = DLB2_QUEUE_MAP_IN_PROG;
1996 if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1999 prio = port->qid_map[slot].priority;
2002 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
2003 * the port's qid_map state.
2005 ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2009 ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
2014 * Ensure IF_status(cq,qid) is 0 before enabling the port to
2015 * prevent spurious schedules to cause the queue's inflight
2016 * count to increase.
2018 dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
2020 /* Reset the queue's inflight status */
2021 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2022 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2023 state = DLB2_QUEUE_MAPPED;
2024 if (!dlb2_port_find_slot_queue(port, state,
2028 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2032 dlb2_ldb_queue_set_inflight_limit(hw, queue);
2034 /* Re-enable CQs mapped to this queue */
2035 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2037 /* If this queue has other mappings pending, clear its inflight limit */
2038 if (queue->num_pending_additions > 0)
2039 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
2045 * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
2046 * @hw: dlb2_hw handle for a particular device.
2047 * @port: load-balanced port
2048 * @queue: load-balanced queue
2049 * @priority: queue servicing priority
2051 * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
2052 * at a later point, and <0 if an error occurred.
2054 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
2055 struct dlb2_ldb_port *port,
2056 struct dlb2_ldb_queue *queue,
2059 enum dlb2_qid_map_state state;
2060 struct dlb2_hw_domain *domain;
2061 int domain_id, slot, ret;
2064 domain_id = port->domain_id.phys_id;
2066 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
2067 if (domain == NULL) {
2069 "[%s()] Internal error: unable to find domain %d\n",
2070 __func__, port->domain_id.phys_id);
2075 * Set the QID inflight limit to 0 to prevent further scheduling of the
2078 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
2079 queue->id.phys_id), 0);
2081 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
2083 "Internal error: No available unmapped slots\n");
2087 port->qid_map[slot].qid = queue->id.phys_id;
2088 port->qid_map[slot].priority = priority;
2090 state = DLB2_QUEUE_MAP_IN_PROG;
2091 ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
2095 infl_cnt = DLB2_CSR_RD(hw,
2096 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2097 queue->id.phys_id));
2099 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2101 * The queue is owed completions so it's not safe to map it
2102 * yet. Schedule a kernel thread to complete the mapping later,
2103 * once software has completed all the queue's inflight events.
2105 if (!os_worker_active(hw))
2106 os_schedule_work(hw);
2112 * Disable the affected CQ, and the CQs already mapped to the QID,
2113 * before reading the QID's inflight count a second time. There is an
2114 * unlikely race in which the QID may schedule one more QE after we
2115 * read an inflight count of 0, and disabling the CQs guarantees that
2116 * the race will not occur after a re-read of the inflight count
2120 dlb2_ldb_port_cq_disable(hw, port);
2122 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2124 infl_cnt = DLB2_CSR_RD(hw,
2125 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2126 queue->id.phys_id));
2128 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2130 dlb2_ldb_port_cq_enable(hw, port);
2132 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2135 * The queue is owed completions so it's not safe to map it
2136 * yet. Schedule a kernel thread to complete the mapping later,
2137 * once software has completed all the queue's inflight events.
2139 if (!os_worker_active(hw))
2140 os_schedule_work(hw);
2145 return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2148 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2149 struct dlb2_hw_domain *domain,
2150 struct dlb2_ldb_port *port)
2154 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2156 struct dlb2_ldb_queue *queue;
2159 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2162 qid = port->qid_map[i].qid;
2164 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2166 if (queue == NULL) {
2168 "[%s()] Internal error: unable to find queue %d\n",
2173 infl_cnt = DLB2_CSR_RD(hw,
2174 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2176 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
2180 * Disable the affected CQ, and the CQs already mapped to the
2181 * QID, before reading the QID's inflight count a second time.
2182 * There is an unlikely race in which the QID may schedule one
2183 * more QE after we read an inflight count of 0, and disabling
2184 * the CQs guarantees that the race will not occur after a
2185 * re-read of the inflight count register.
2188 dlb2_ldb_port_cq_disable(hw, port);
2190 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2192 infl_cnt = DLB2_CSR_RD(hw,
2193 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2195 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2197 dlb2_ldb_port_cq_enable(hw, port);
2199 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2204 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2209 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2210 struct dlb2_hw_domain *domain)
2212 struct dlb2_list_entry *iter;
2213 struct dlb2_ldb_port *port;
2217 if (!domain->configured || domain->num_pending_additions == 0)
2220 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2221 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2222 dlb2_domain_finish_map_port(hw, domain, port);
2225 return domain->num_pending_additions;
2228 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2229 struct dlb2_ldb_port *port,
2230 struct dlb2_ldb_queue *queue)
2232 enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2241 /* Find the queue's slot */
2242 mapped = DLB2_QUEUE_MAPPED;
2243 in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2244 pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2246 if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2247 !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2248 !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2250 "[%s():%d] Internal error: QID %d isn't mapped\n",
2251 __func__, __LINE__, queue->id.phys_id);
2255 port_id = port->id.phys_id;
2256 queue_id = queue->id.phys_id;
2258 /* Read-modify-write the priority and valid bit register */
2259 cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
2261 cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
2263 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
2265 atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
2268 lsp_qid2cq = DLB2_CSR_RD(hw,
2269 DLB2_LSP_QID2CQIDIX(hw->ver,
2270 queue_id, port_id / 4));
2272 lsp_qid2cq2 = DLB2_CSR_RD(hw,
2273 DLB2_LSP_QID2CQIDIX2(hw->ver,
2274 queue_id, port_id / 4));
2276 switch (port_id % 4) {
2278 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2279 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2280 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2284 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2285 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2286 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2290 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2291 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2292 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2296 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2297 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2298 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2302 DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
2304 DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
2307 DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
2312 unmapped = DLB2_QUEUE_UNMAPPED;
2314 return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2317 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2318 struct dlb2_hw_domain *domain,
2319 struct dlb2_ldb_port *port,
2320 struct dlb2_ldb_queue *queue,
2323 if (domain->started)
2324 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2326 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2330 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2331 struct dlb2_hw_domain *domain,
2332 struct dlb2_ldb_port *port,
2335 enum dlb2_qid_map_state state;
2336 struct dlb2_ldb_queue *queue;
2338 queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2340 state = port->qid_map[slot].state;
2342 /* Update the QID2CQIDX and CQ2QID vectors */
2343 dlb2_ldb_port_unmap_qid(hw, port, queue);
2346 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2349 dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2351 /* Reset the {CQ, slot} to its default state */
2352 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2354 /* Re-enable the CQ if it was not manually disabled by the user */
2356 dlb2_ldb_port_cq_enable(hw, port);
2359 * If there is a mapping that is pending this slot's removal, perform
2362 if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2363 struct dlb2_ldb_port_qid_map *map;
2364 struct dlb2_ldb_queue *map_queue;
2367 map = &port->qid_map[slot];
2369 map->qid = map->pending_qid;
2370 map->priority = map->pending_priority;
2372 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2373 prio = map->priority;
2375 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2380 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2381 struct dlb2_hw_domain *domain,
2382 struct dlb2_ldb_port *port)
2386 const int max_iters = 1000;
2387 const int iter_poll_us = 100;
2389 if (port->num_pending_removals == 0)
2393 * The unmap requires all the CQ's outstanding inflights to be
2394 * completed. Poll up to 100ms.
2396 for (i = 0; i < max_iters; i++) {
2397 infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
2400 if (DLB2_BITS_GET(infl_cnt,
2401 DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) == 0)
2403 rte_delay_us_sleep(iter_poll_us);
2406 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
2409 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2410 struct dlb2_ldb_port_qid_map *map;
2412 map = &port->qid_map[i];
2414 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2415 map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2418 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2425 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2426 struct dlb2_hw_domain *domain)
2428 struct dlb2_list_entry *iter;
2429 struct dlb2_ldb_port *port;
2433 if (!domain->configured || domain->num_pending_removals == 0)
2436 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2437 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2438 dlb2_domain_finish_unmap_port(hw, domain, port);
2441 return domain->num_pending_removals;
2444 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2445 struct dlb2_hw_domain *domain)
2447 struct dlb2_list_entry *iter;
2448 struct dlb2_ldb_port *port;
2452 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2453 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2454 port->enabled = false;
2456 dlb2_ldb_port_cq_disable(hw, port);
2462 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2465 unsigned int vdev_id)
2467 DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2469 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2470 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2473 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2474 struct dlb2_hw_domain *domain,
2475 unsigned int vdev_id)
2477 struct dlb2_list_entry *iter;
2478 struct dlb2_dir_pq_pair *port;
2482 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2486 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2487 virt_id = port->id.virt_id;
2489 virt_id = port->id.phys_id;
2491 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2493 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
2497 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2498 struct dlb2_hw_domain *domain,
2499 unsigned int vdev_id)
2501 struct dlb2_list_entry *iter;
2502 struct dlb2_ldb_port *port;
2507 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2508 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2512 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2513 virt_id = port->id.virt_id;
2515 virt_id = port->id.phys_id;
2517 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2519 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
2525 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2526 struct dlb2_hw_domain *domain)
2528 struct dlb2_list_entry *iter;
2529 struct dlb2_ldb_port *port;
2535 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2536 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2538 DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
2543 DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
2551 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2552 struct dlb2_hw_domain *domain)
2554 struct dlb2_list_entry *iter;
2555 struct dlb2_dir_pq_pair *port;
2560 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2562 DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2566 DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
2572 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2573 struct dlb2_hw_domain *domain)
2575 int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2576 struct dlb2_list_entry *iter;
2577 struct dlb2_ldb_queue *queue;
2580 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2581 int idx = domain_offset + queue->id.phys_id;
2583 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
2585 if (queue->id.vdev_owned) {
2587 DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2590 idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2593 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
2595 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
2601 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2602 struct dlb2_hw_domain *domain)
2604 struct dlb2_list_entry *iter;
2605 struct dlb2_dir_pq_pair *queue;
2606 unsigned long max_ports;
2610 max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
2612 domain_offset = domain->id.phys_id * max_ports;
2614 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2615 int idx = domain_offset + queue->id.phys_id;
2617 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
2619 if (queue->id.vdev_owned) {
2620 idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
2622 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
2624 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
2629 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2630 struct dlb2_hw_domain *domain)
2632 struct dlb2_list_entry *iter;
2633 struct dlb2_ldb_port *port;
2638 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2639 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2641 DLB2_CHP_SN_CHK_ENBL(hw->ver,
2648 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2649 struct dlb2_hw_domain *domain)
2651 struct dlb2_list_entry *iter;
2652 struct dlb2_ldb_port *port;
2656 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2657 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2660 for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
2661 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2665 if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2667 "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2668 __func__, port->id.phys_id);
2677 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2678 struct dlb2_hw_domain *domain)
2680 struct dlb2_list_entry *iter;
2681 struct dlb2_dir_pq_pair *port;
2684 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2685 port->enabled = false;
2687 dlb2_dir_port_cq_disable(hw, port);
2692 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2693 struct dlb2_hw_domain *domain)
2695 struct dlb2_list_entry *iter;
2696 struct dlb2_dir_pq_pair *port;
2700 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2702 DLB2_SYS_DIR_PP_V(port->id.phys_id),
2708 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2709 struct dlb2_hw_domain *domain)
2711 struct dlb2_list_entry *iter;
2712 struct dlb2_ldb_port *port;
2717 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2718 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2720 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2726 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2727 struct dlb2_hw_domain *domain)
2729 struct dlb2_list_entry *iter;
2730 struct dlb2_dir_pq_pair *dir_port;
2731 struct dlb2_ldb_port *ldb_port;
2732 struct dlb2_ldb_queue *queue;
2737 * Confirm that all the domain's queue's inflight counts and AQED
2738 * active counts are 0.
2740 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2741 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2743 "[%s()] Internal error: failed to empty ldb queue %d\n",
2744 __func__, queue->id.phys_id);
2749 /* Confirm that all the domain's CQs inflight and token counts are 0. */
2750 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2751 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2752 if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2753 dlb2_ldb_cq_token_count(hw, ldb_port)) {
2755 "[%s()] Internal error: failed to empty ldb port %d\n",
2756 __func__, ldb_port->id.phys_id);
2762 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2763 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2765 "[%s()] Internal error: failed to empty dir queue %d\n",
2766 __func__, dir_port->id.phys_id);
2770 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2772 "[%s()] Internal error: failed to empty dir port %d\n",
2773 __func__, dir_port->id.phys_id);
2781 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2782 struct dlb2_ldb_port *port)
2785 DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2786 DLB2_SYS_LDB_PP2VAS_RST);
2789 DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
2790 DLB2_CHP_LDB_CQ2VAS_RST);
2793 DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2794 DLB2_SYS_LDB_PP2VDEV_RST);
2796 if (port->id.vdev_owned) {
2801 * DLB uses producer port address bits 17:12 to determine the
2802 * producer port ID. In Scalable IOV mode, PP accesses come
2803 * through the PF MMIO window for the physical producer port,
2804 * so for translation purposes the virtual and physical port
2807 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2808 virt_id = port->id.virt_id;
2810 virt_id = port->id.phys_id;
2812 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2815 DLB2_SYS_VF_LDB_VPP2PP(offs),
2816 DLB2_SYS_VF_LDB_VPP2PP_RST);
2819 DLB2_SYS_VF_LDB_VPP_V(offs),
2820 DLB2_SYS_VF_LDB_VPP_V_RST);
2824 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2825 DLB2_SYS_LDB_PP_V_RST);
2828 DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
2829 DLB2_LSP_CQ_LDB_DSBL_RST);
2832 DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
2833 DLB2_CHP_LDB_CQ_DEPTH_RST);
2835 if (hw->ver != DLB2_HW_V2)
2837 DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
2838 DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
2841 DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
2842 DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2845 DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
2846 DLB2_CHP_HIST_LIST_LIM_RST);
2849 DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
2850 DLB2_CHP_HIST_LIST_BASE_RST);
2853 DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
2854 DLB2_CHP_HIST_LIST_POP_PTR_RST);
2857 DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
2858 DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2861 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2862 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2865 DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2866 DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2869 DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
2870 DLB2_CHP_LDB_CQ_INT_ENB_RST);
2873 DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2874 DLB2_SYS_LDB_CQ_ISR_RST);
2877 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2878 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2881 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2882 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2885 DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
2886 DLB2_CHP_LDB_CQ_WPTR_RST);
2889 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
2890 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2893 DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2894 DLB2_SYS_LDB_CQ_ADDR_L_RST);
2897 DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2898 DLB2_SYS_LDB_CQ_ADDR_U_RST);
2900 if (hw->ver == DLB2_HW_V2)
2902 DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2903 DLB2_SYS_LDB_CQ_AT_RST);
2906 DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
2907 DLB2_SYS_LDB_CQ_PASID_RST);
2910 DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2911 DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2914 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
2915 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2918 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
2919 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2922 DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
2923 DLB2_LSP_CQ2QID0_RST);
2926 DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
2927 DLB2_LSP_CQ2QID1_RST);
2930 DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
2931 DLB2_LSP_CQ2PRIOV_RST);
2934 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2935 struct dlb2_hw_domain *domain)
2937 struct dlb2_list_entry *iter;
2938 struct dlb2_ldb_port *port;
2942 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2943 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2944 __dlb2_domain_reset_ldb_port_registers(hw, port);
2949 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2950 struct dlb2_dir_pq_pair *port)
2955 DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
2956 DLB2_CHP_DIR_CQ2VAS_RST);
2959 DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
2960 DLB2_LSP_CQ_DIR_DSBL_RST);
2962 DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
2964 if (hw->ver == DLB2_HW_V2)
2965 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2968 DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
2971 DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
2972 DLB2_CHP_DIR_CQ_DEPTH_RST);
2975 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2976 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2979 DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2980 DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2983 DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2984 DLB2_CHP_DIR_CQ_INT_ENB_RST);
2987 DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2988 DLB2_SYS_DIR_CQ_ISR_RST);
2991 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
2993 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2996 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2997 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
3000 DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
3001 DLB2_CHP_DIR_CQ_WPTR_RST);
3004 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
3005 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
3008 DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
3009 DLB2_SYS_DIR_CQ_ADDR_L_RST);
3012 DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
3013 DLB2_SYS_DIR_CQ_ADDR_U_RST);
3016 DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
3017 DLB2_SYS_DIR_CQ_AT_RST);
3019 if (hw->ver == DLB2_HW_V2)
3021 DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
3022 DLB2_SYS_DIR_CQ_AT_RST);
3025 DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
3026 DLB2_SYS_DIR_CQ_PASID_RST);
3029 DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
3030 DLB2_SYS_DIR_CQ_FMT_RST);
3033 DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
3034 DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
3037 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3038 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
3041 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3042 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
3045 DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
3046 DLB2_SYS_DIR_PP2VAS_RST);
3049 DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3050 DLB2_CHP_DIR_CQ2VAS_RST);
3053 DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
3054 DLB2_SYS_DIR_PP2VDEV_RST);
3056 if (port->id.vdev_owned) {
3061 * DLB uses producer port address bits 17:12 to determine the
3062 * producer port ID. In Scalable IOV mode, PP accesses come
3063 * through the PF MMIO window for the physical producer port,
3064 * so for translation purposes the virtual and physical port
3067 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3068 virt_id = port->id.virt_id;
3070 virt_id = port->id.phys_id;
3072 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3076 DLB2_SYS_VF_DIR_VPP2PP(offs),
3077 DLB2_SYS_VF_DIR_VPP2PP_RST);
3080 DLB2_SYS_VF_DIR_VPP_V(offs),
3081 DLB2_SYS_VF_DIR_VPP_V_RST);
3085 DLB2_SYS_DIR_PP_V(port->id.phys_id),
3086 DLB2_SYS_DIR_PP_V_RST);
3089 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3090 struct dlb2_hw_domain *domain)
3092 struct dlb2_list_entry *iter;
3093 struct dlb2_dir_pq_pair *port;
3096 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3097 __dlb2_domain_reset_dir_port_registers(hw, port);
3100 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
3101 struct dlb2_hw_domain *domain)
3103 struct dlb2_list_entry *iter;
3104 struct dlb2_ldb_queue *queue;
3107 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3108 unsigned int queue_id = queue->id.phys_id;
3112 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
3113 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3116 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
3117 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3120 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
3121 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3124 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
3125 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3128 DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
3129 DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3132 DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
3133 DLB2_LSP_QID_LDB_INFL_LIM_RST);
3136 DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
3137 DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3140 DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
3141 DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3144 DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
3145 DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3148 DLB2_SYS_LDB_QID_ITS(queue_id),
3149 DLB2_SYS_LDB_QID_ITS_RST);
3152 DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
3153 DLB2_CHP_ORD_QID_SN_RST);
3156 DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
3157 DLB2_CHP_ORD_QID_SN_MAP_RST);
3160 DLB2_SYS_LDB_QID_V(queue_id),
3161 DLB2_SYS_LDB_QID_V_RST);
3164 DLB2_SYS_LDB_QID_CFG_V(queue_id),
3165 DLB2_SYS_LDB_QID_CFG_V_RST);
3167 if (queue->sn_cfg_valid) {
3170 offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
3172 offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
3176 offs[queue->sn_group],
3177 DLB2_RO_GRP_0_SLT_SHFT_RST);
3180 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3182 DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
3183 DLB2_LSP_QID2CQIDIX_00_RST);
3186 DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
3187 DLB2_LSP_QID2CQIDIX2_00_RST);
3190 DLB2_ATM_QID2CQIDIX(queue_id, i),
3191 DLB2_ATM_QID2CQIDIX_00_RST);
3196 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3197 struct dlb2_hw_domain *domain)
3199 struct dlb2_list_entry *iter;
3200 struct dlb2_dir_pq_pair *queue;
3203 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3205 DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
3207 DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3210 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
3212 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3215 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
3217 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3220 DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
3222 DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3225 DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3226 DLB2_SYS_DIR_QID_ITS_RST);
3229 DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3230 DLB2_SYS_DIR_QID_V_RST);
3238 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3239 struct dlb2_hw_domain *domain)
3241 dlb2_domain_reset_ldb_port_registers(hw, domain);
3243 dlb2_domain_reset_dir_port_registers(hw, domain);
3245 dlb2_domain_reset_ldb_queue_registers(hw, domain);
3247 dlb2_domain_reset_dir_queue_registers(hw, domain);
3249 if (hw->ver == DLB2_HW_V2) {
3251 DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3252 DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3255 DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3256 DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3259 DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
3260 DLB2_CHP_CFG_VAS_CRD_RST);
3263 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3264 struct dlb2_hw_domain *domain)
3266 struct dlb2_dir_pq_pair *tmp_dir_port;
3267 struct dlb2_ldb_queue *tmp_ldb_queue;
3268 struct dlb2_ldb_port *tmp_ldb_port;
3269 struct dlb2_list_entry *iter1;
3270 struct dlb2_list_entry *iter2;
3271 struct dlb2_function_resources *rsrcs;
3272 struct dlb2_dir_pq_pair *dir_port;
3273 struct dlb2_ldb_queue *ldb_queue;
3274 struct dlb2_ldb_port *ldb_port;
3275 struct dlb2_list_head *list;
3277 RTE_SET_USED(tmp_dir_port);
3278 RTE_SET_USED(tmp_ldb_queue);
3279 RTE_SET_USED(tmp_ldb_port);
3280 RTE_SET_USED(iter1);
3281 RTE_SET_USED(iter2);
3283 rsrcs = domain->parent_func;
3285 /* Move the domain's ldb queues to the function's avail list */
3286 list = &domain->used_ldb_queues;
3287 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3288 if (ldb_queue->sn_cfg_valid) {
3289 struct dlb2_sn_group *grp;
3291 grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3293 dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3294 ldb_queue->sn_cfg_valid = false;
3297 ldb_queue->owned = false;
3298 ldb_queue->num_mappings = 0;
3299 ldb_queue->num_pending_additions = 0;
3301 dlb2_list_del(&domain->used_ldb_queues,
3302 &ldb_queue->domain_list);
3303 dlb2_list_add(&rsrcs->avail_ldb_queues,
3304 &ldb_queue->func_list);
3305 rsrcs->num_avail_ldb_queues++;
3308 list = &domain->avail_ldb_queues;
3309 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3310 ldb_queue->owned = false;
3312 dlb2_list_del(&domain->avail_ldb_queues,
3313 &ldb_queue->domain_list);
3314 dlb2_list_add(&rsrcs->avail_ldb_queues,
3315 &ldb_queue->func_list);
3316 rsrcs->num_avail_ldb_queues++;
3319 /* Move the domain's ldb ports to the function's avail list */
3320 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3321 list = &domain->used_ldb_ports[i];
3322 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3326 ldb_port->owned = false;
3327 ldb_port->configured = false;
3328 ldb_port->num_pending_removals = 0;
3329 ldb_port->num_mappings = 0;
3330 ldb_port->init_tkn_cnt = 0;
3331 ldb_port->cq_depth = 0;
3332 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3333 ldb_port->qid_map[j].state =
3334 DLB2_QUEUE_UNMAPPED;
3336 dlb2_list_del(&domain->used_ldb_ports[i],
3337 &ldb_port->domain_list);
3338 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3339 &ldb_port->func_list);
3340 rsrcs->num_avail_ldb_ports[i]++;
3343 list = &domain->avail_ldb_ports[i];
3344 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3346 ldb_port->owned = false;
3348 dlb2_list_del(&domain->avail_ldb_ports[i],
3349 &ldb_port->domain_list);
3350 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3351 &ldb_port->func_list);
3352 rsrcs->num_avail_ldb_ports[i]++;
3356 /* Move the domain's dir ports to the function's avail list */
3357 list = &domain->used_dir_pq_pairs;
3358 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3359 dir_port->owned = false;
3360 dir_port->port_configured = false;
3361 dir_port->init_tkn_cnt = 0;
3363 dlb2_list_del(&domain->used_dir_pq_pairs,
3364 &dir_port->domain_list);
3366 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3367 &dir_port->func_list);
3368 rsrcs->num_avail_dir_pq_pairs++;
3371 list = &domain->avail_dir_pq_pairs;
3372 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3373 dir_port->owned = false;
3375 dlb2_list_del(&domain->avail_dir_pq_pairs,
3376 &dir_port->domain_list);
3378 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3379 &dir_port->func_list);
3380 rsrcs->num_avail_dir_pq_pairs++;
3383 /* Return hist list entries to the function */
3384 ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3385 domain->hist_list_entry_base,
3386 domain->total_hist_list_entries);
3389 "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3394 domain->total_hist_list_entries = 0;
3395 domain->avail_hist_list_entries = 0;
3396 domain->hist_list_entry_base = 0;
3397 domain->hist_list_entry_offset = 0;
3399 if (hw->ver == DLB2_HW_V2_5) {
3400 rsrcs->num_avail_entries += domain->num_credits;
3401 domain->num_credits = 0;
3403 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3404 domain->num_ldb_credits = 0;
3406 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3407 domain->num_dir_credits = 0;
3409 rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3410 rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3411 domain->num_avail_aqed_entries = 0;
3412 domain->num_used_aqed_entries = 0;
3414 domain->num_pending_removals = 0;
3415 domain->num_pending_additions = 0;
3416 domain->configured = false;
3417 domain->started = false;
3420 * Move the domain out of the used_domains list and back to the
3421 * function's avail_domains list.
3423 dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3424 dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3425 rsrcs->num_avail_domains++;
3430 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3431 struct dlb2_hw_domain *domain,
3432 struct dlb2_ldb_queue *queue)
3434 struct dlb2_ldb_port *port = NULL;
3437 /* If a domain has LDB queues, it must have LDB ports */
3438 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3439 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
3447 "[%s()] Internal error: No configured LDB ports\n",
3452 /* If necessary, free up a QID slot in this CQ */
3453 if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3454 struct dlb2_ldb_queue *mapped_queue;
3456 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3458 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3463 ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3467 return dlb2_domain_drain_mapped_queues(hw, domain);
3470 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3471 struct dlb2_hw_domain *domain)
3473 struct dlb2_list_entry *iter;
3474 struct dlb2_ldb_queue *queue;
3478 /* If the domain hasn't been started, there's no traffic to drain */
3479 if (!domain->started)
3483 * Pre-condition: the unattached queue must not have any outstanding
3484 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3485 * prior to this in dlb2_domain_drain_mapped_queues().
3487 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3488 if (queue->num_mappings != 0 ||
3489 dlb2_ldb_queue_is_empty(hw, queue))
3492 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3501 * dlb2_reset_domain() - reset a scheduling domain
3502 * @hw: dlb2_hw handle for a particular device.
3503 * @domain_id: domain ID.
3504 * @vdev_req: indicates whether this request came from a vdev.
3505 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3507 * This function resets and frees a DLB 2.0 scheduling domain and its associated
3510 * Pre-condition: the driver must ensure software has stopped sending QEs
3511 * through this domain's producer ports before invoking this function, or
3512 * undefined behavior will result.
3514 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3518 * Returns 0 upon success, -1 otherwise.
3520 * EINVAL - Invalid domain ID, or the domain is not configured.
3521 * EFAULT - Internal error. (Possibly caused if software is the pre-condition
3523 * ETIMEDOUT - Hardware component didn't reset in the expected time.
3525 int dlb2_reset_domain(struct dlb2_hw *hw,
3528 unsigned int vdev_id)
3530 struct dlb2_hw_domain *domain;
3533 dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3535 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3537 if (domain == NULL || !domain->configured)
3542 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3544 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3547 /* Disable CQ interrupts */
3548 dlb2_domain_disable_dir_port_interrupts(hw, domain);
3550 dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3553 * For each queue owned by this domain, disable its write permissions to
3554 * cause any traffic sent to it to be dropped. Well-behaved software
3555 * should not be sending QEs at this point.
3557 dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3559 dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3561 /* Turn off completion tracking on all the domain's PPs. */
3562 dlb2_domain_disable_ldb_seq_checks(hw, domain);
3565 * Disable the LDB CQs and drain them in order to complete the map and
3566 * unmap procedures, which require zero CQ inflights and zero QID
3567 * inflights respectively.
3569 dlb2_domain_disable_ldb_cqs(hw, domain);
3571 dlb2_domain_drain_ldb_cqs(hw, domain, false);
3573 ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3577 ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3581 ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3585 /* Re-enable the CQs in order to drain the mapped queues. */
3586 dlb2_domain_enable_ldb_cqs(hw, domain);
3588 ret = dlb2_domain_drain_mapped_queues(hw, domain);
3592 ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3596 /* Done draining LDB QEs, so disable the CQs. */
3597 dlb2_domain_disable_ldb_cqs(hw, domain);
3599 dlb2_domain_drain_dir_queues(hw, domain);
3601 /* Done draining DIR QEs, so disable the CQs. */
3602 dlb2_domain_disable_dir_cqs(hw, domain);
3605 dlb2_domain_disable_dir_producer_ports(hw, domain);
3607 dlb2_domain_disable_ldb_producer_ports(hw, domain);
3609 ret = dlb2_domain_verify_reset_success(hw, domain);
3613 /* Reset the QID and port state. */
3614 dlb2_domain_reset_registers(hw, domain);
3616 /* Hardware reset complete. Reset the domain's software state */
3617 return dlb2_domain_reset_software_state(hw, domain);
3621 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3623 struct dlb2_create_ldb_queue_args *args,
3625 unsigned int vdev_id)
3627 DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3629 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3630 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
3632 DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3633 args->num_sequence_numbers);
3634 DLB2_HW_DBG(hw, "\tNumber of QID inflights: %d\n",
3635 args->num_qid_inflights);
3636 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
3637 args->num_atomic_inflights);
3641 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3642 struct dlb2_ldb_queue *queue,
3643 struct dlb2_create_ldb_queue_args *args)
3648 queue->sn_cfg_valid = false;
3650 if (args->num_sequence_numbers == 0)
3653 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3654 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3656 if (group->sequence_numbers_per_queue ==
3657 args->num_sequence_numbers &&
3658 !dlb2_sn_group_full(group)) {
3659 slot = dlb2_sn_group_alloc_slot(group);
3667 "[%s():%d] Internal error: no sequence number slots available\n",
3668 __func__, __LINE__);
3672 queue->sn_cfg_valid = true;
3673 queue->sn_group = i;
3674 queue->sn_slot = slot;
3679 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3681 struct dlb2_create_ldb_queue_args *args,
3682 struct dlb2_cmd_response *resp,
3684 unsigned int vdev_id,
3685 struct dlb2_hw_domain **out_domain,
3686 struct dlb2_ldb_queue **out_queue)
3688 struct dlb2_hw_domain *domain;
3689 struct dlb2_ldb_queue *queue;
3692 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3695 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3699 if (!domain->configured) {
3700 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3704 if (domain->started) {
3705 resp->status = DLB2_ST_DOMAIN_STARTED;
3709 queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3711 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3715 if (args->num_sequence_numbers) {
3716 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3717 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3719 if (group->sequence_numbers_per_queue ==
3720 args->num_sequence_numbers &&
3721 !dlb2_sn_group_full(group))
3725 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3726 resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3731 if (args->num_qid_inflights < 1 || args->num_qid_inflights > 2048) {
3732 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3736 /* Inflights must be <= number of sequence numbers if ordered */
3737 if (args->num_sequence_numbers != 0 &&
3738 args->num_qid_inflights > args->num_sequence_numbers) {
3739 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3743 if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3744 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3748 if (args->num_atomic_inflights &&
3749 args->lock_id_comp_level != 0 &&
3750 args->lock_id_comp_level != 64 &&
3751 args->lock_id_comp_level != 128 &&
3752 args->lock_id_comp_level != 256 &&
3753 args->lock_id_comp_level != 512 &&
3754 args->lock_id_comp_level != 1024 &&
3755 args->lock_id_comp_level != 2048 &&
3756 args->lock_id_comp_level != 4096 &&
3757 args->lock_id_comp_level != 65536) {
3758 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3762 *out_domain = domain;
3769 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3770 struct dlb2_hw_domain *domain,
3771 struct dlb2_ldb_queue *queue,
3772 struct dlb2_create_ldb_queue_args *args)
3775 ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3779 /* Attach QID inflights */
3780 queue->num_qid_inflights = args->num_qid_inflights;
3782 /* Attach atomic inflights */
3783 queue->aqed_limit = args->num_atomic_inflights;
3785 domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3786 domain->num_used_aqed_entries += args->num_atomic_inflights;
3791 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3792 struct dlb2_hw_domain *domain,
3793 struct dlb2_ldb_queue *queue,
3794 struct dlb2_create_ldb_queue_args *args,
3796 unsigned int vdev_id)
3798 struct dlb2_sn_group *sn_group;
3803 /* QID write permissions are turned on when the domain is started */
3804 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.phys_id;
3806 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), reg);
3809 * Unordered QIDs get 4K inflights, ordered get as many as the number
3810 * of sequence numbers.
3812 DLB2_BITS_SET(reg, args->num_qid_inflights,
3813 DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
3814 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
3815 queue->id.phys_id), reg);
3817 alimit = queue->aqed_limit;
3819 if (alimit > DLB2_MAX_NUM_AQED_ENTRIES)
3820 alimit = DLB2_MAX_NUM_AQED_ENTRIES;
3823 DLB2_BITS_SET(reg, alimit, DLB2_LSP_QID_AQED_ACTIVE_LIM_LIMIT);
3825 DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver,
3826 queue->id.phys_id), reg);
3829 switch (args->lock_id_comp_level) {
3831 DLB2_BITS_SET(reg, 1, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3834 DLB2_BITS_SET(reg, 2, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3837 DLB2_BITS_SET(reg, 3, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3840 DLB2_BITS_SET(reg, 4, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3843 DLB2_BITS_SET(reg, 5, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3846 DLB2_BITS_SET(reg, 6, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3849 DLB2_BITS_SET(reg, 7, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3852 /* No compression by default */
3856 DLB2_CSR_WR(hw, DLB2_AQED_QID_HID_WIDTH(queue->id.phys_id), reg);
3859 /* Don't timestamp QEs that pass through this queue */
3860 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_ITS(queue->id.phys_id), reg);
3862 DLB2_BITS_SET(reg, args->depth_threshold,
3863 DLB2_LSP_QID_ATM_DEPTH_THRSH_THRESH);
3865 DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver,
3866 queue->id.phys_id), reg);
3869 DLB2_BITS_SET(reg, args->depth_threshold,
3870 DLB2_LSP_QID_NALDB_DEPTH_THRSH_THRESH);
3872 DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue->id.phys_id),
3876 * This register limits the number of inflight flows a queue can have
3877 * at one time. It has an upper bound of 2048, but can be
3878 * over-subscribed. 512 is chosen so that a single queue does not use
3879 * the entire atomic storage, but can use a substantial portion if
3883 DLB2_BITS_SET(reg, 512, DLB2_AQED_QID_FID_LIM_QID_FID_LIMIT);
3884 DLB2_CSR_WR(hw, DLB2_AQED_QID_FID_LIM(queue->id.phys_id), reg);
3888 sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3889 DLB2_BITS_SET(reg, sn_group->mode, DLB2_CHP_ORD_QID_SN_MAP_MODE);
3890 DLB2_BITS_SET(reg, queue->sn_slot, DLB2_CHP_ORD_QID_SN_MAP_SLOT);
3891 DLB2_BITS_SET(reg, sn_group->id, DLB2_CHP_ORD_QID_SN_MAP_GRP);
3894 DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue->id.phys_id), reg);
3897 DLB2_BITS_SET(reg, (args->num_sequence_numbers != 0),
3898 DLB2_SYS_LDB_QID_CFG_V_SN_CFG_V);
3899 DLB2_BITS_SET(reg, (args->num_atomic_inflights != 0),
3900 DLB2_SYS_LDB_QID_CFG_V_FID_CFG_V);
3902 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), reg);
3905 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3908 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VQID_V_VQID_V);
3909 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), reg);
3912 DLB2_BITS_SET(reg, queue->id.phys_id,
3913 DLB2_SYS_VF_LDB_VQID2QID_QID);
3914 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), reg);
3917 DLB2_BITS_SET(reg, queue->id.virt_id,
3918 DLB2_SYS_LDB_QID2VQID_VQID);
3919 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID2VQID(queue->id.phys_id), reg);
3923 DLB2_BIT_SET(reg, DLB2_SYS_LDB_QID_V_QID_V);
3924 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), reg);
3928 * dlb2_hw_create_ldb_queue() - create a load-balanced queue
3929 * @hw: dlb2_hw handle for a particular device.
3930 * @domain_id: domain ID.
3931 * @args: queue creation arguments.
3932 * @resp: response structure.
3933 * @vdev_req: indicates whether this request came from a vdev.
3934 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3936 * This function creates a load-balanced queue.
3938 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3942 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
3943 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
3944 * contains the queue ID.
3946 * resp->id contains a virtual ID if vdev_req is true.
3949 * EINVAL - A requested resource is unavailable, the domain is not configured,
3950 * the domain has already been started, or the requested queue name is
3952 * EFAULT - Internal error (resp->status not set).
3954 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3956 struct dlb2_create_ldb_queue_args *args,
3957 struct dlb2_cmd_response *resp,
3959 unsigned int vdev_id)
3961 struct dlb2_hw_domain *domain;
3962 struct dlb2_ldb_queue *queue;
3965 dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3968 * Verify that hardware resources are available before attempting to
3969 * satisfy the request. This simplifies the error unwinding code.
3971 ret = dlb2_verify_create_ldb_queue_args(hw,
3982 ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3986 "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3987 __func__, __LINE__);
3991 dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3993 queue->num_mappings = 0;
3995 queue->configured = true;
3998 * Configuration succeeded, so move the resource from the 'avail' to
4001 dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
4003 dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
4006 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
4011 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
4012 struct dlb2_hw_domain *domain,
4013 struct dlb2_ldb_port *port,
4015 unsigned int vdev_id)
4019 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
4020 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
4027 * DLB uses producer port address bits 17:12 to determine the
4028 * producer port ID. In Scalable IOV mode, PP accesses come
4029 * through the PF MMIO window for the physical producer port,
4030 * so for translation purposes the virtual and physical port
4033 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4034 virt_id = port->id.virt_id;
4036 virt_id = port->id.phys_id;
4039 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
4040 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4041 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
4044 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
4045 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
4048 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
4049 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
4053 DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
4054 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
4057 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4058 struct dlb2_hw_domain *domain,
4059 struct dlb2_ldb_port *port,
4060 uintptr_t cq_dma_base,
4061 struct dlb2_create_ldb_port_args *args,
4063 unsigned int vdev_id)
4069 /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4070 DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
4071 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
4073 reg = cq_dma_base >> 32;
4074 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
4077 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4078 * cache lines out-of-order (but QEs within a cache line are always
4079 * updated in-order).
4082 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
4084 !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4085 DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
4086 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
4088 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
4090 port->cq_depth = args->cq_depth;
4092 if (args->cq_depth <= 8) {
4094 } else if (args->cq_depth == 16) {
4096 } else if (args->cq_depth == 32) {
4098 } else if (args->cq_depth == 64) {
4100 } else if (args->cq_depth == 128) {
4102 } else if (args->cq_depth == 256) {
4104 } else if (args->cq_depth == 512) {
4106 } else if (args->cq_depth == 1024) {
4110 "[%s():%d] Internal error: invalid CQ depth\n",
4111 __func__, __LINE__);
4116 DLB2_BITS_SET(reg, ds,
4117 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4119 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4123 * To support CQs with depth less than 8, program the token count
4124 * register with a non-zero initial value. Operations such as domain
4125 * reset must take this initial value into account when quiescing the
4128 port->init_tkn_cnt = 0;
4130 if (args->cq_depth < 8) {
4132 port->init_tkn_cnt = 8 - args->cq_depth;
4136 DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
4138 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4142 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4143 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4147 DLB2_BITS_SET(reg, ds,
4148 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
4150 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4153 /* Reset the CQ write pointer */
4155 DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
4156 DLB2_CHP_LDB_CQ_WPTR_RST);
4160 port->hist_list_entry_limit - 1,
4161 DLB2_CHP_HIST_LIST_LIM_LIMIT);
4162 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
4164 DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
4165 DLB2_CHP_HIST_LIST_BASE_BASE);
4167 DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
4171 * The inflight limit sets a cap on the number of QEs for which this CQ
4172 * can owe completions at one time.
4175 DLB2_BITS_SET(reg, args->cq_history_list_size,
4176 DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
4177 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
4181 DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4182 DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
4183 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
4187 DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4188 DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
4189 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
4193 * Address translation (AT) settings: 0: untranslated, 2: translated
4194 * (see ATS spec regarding Address Type field for more details)
4197 if (hw->ver == DLB2_HW_V2) {
4199 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
4202 if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4204 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4205 DLB2_SYS_LDB_CQ_PASID_PASID);
4206 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
4209 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
4212 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
4213 DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
4215 /* Disable the port's QID mappings */
4217 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
4223 dlb2_cq_depth_is_valid(u32 depth)
4225 if (depth != 1 && depth != 2 &&
4226 depth != 4 && depth != 8 &&
4227 depth != 16 && depth != 32 &&
4228 depth != 64 && depth != 128 &&
4229 depth != 256 && depth != 512 &&
4236 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4237 struct dlb2_hw_domain *domain,
4238 struct dlb2_ldb_port *port,
4239 uintptr_t cq_dma_base,
4240 struct dlb2_create_ldb_port_args *args,
4242 unsigned int vdev_id)
4246 port->hist_list_entry_base = domain->hist_list_entry_base +
4247 domain->hist_list_entry_offset;
4248 port->hist_list_entry_limit = port->hist_list_entry_base +
4249 args->cq_history_list_size;
4251 domain->hist_list_entry_offset += args->cq_history_list_size;
4252 domain->avail_hist_list_entries -= args->cq_history_list_size;
4254 ret = dlb2_ldb_port_configure_cq(hw,
4264 dlb2_ldb_port_configure_pp(hw,
4270 dlb2_ldb_port_cq_enable(hw, port);
4272 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4273 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4274 port->num_mappings = 0;
4276 port->enabled = true;
4278 port->configured = true;
4284 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4286 uintptr_t cq_dma_base,
4287 struct dlb2_create_ldb_port_args *args,
4289 unsigned int vdev_id)
4291 DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4293 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4294 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
4296 DLB2_HW_DBG(hw, "\tCQ depth: %d\n",
4298 DLB2_HW_DBG(hw, "\tCQ hist list size: %d\n",
4299 args->cq_history_list_size);
4300 DLB2_HW_DBG(hw, "\tCQ base address: 0x%lx\n",
4302 DLB2_HW_DBG(hw, "\tCoS ID: %u\n", args->cos_id);
4303 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %u\n",
4308 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4310 uintptr_t cq_dma_base,
4311 struct dlb2_create_ldb_port_args *args,
4312 struct dlb2_cmd_response *resp,
4314 unsigned int vdev_id,
4315 struct dlb2_hw_domain **out_domain,
4316 struct dlb2_ldb_port **out_port,
4319 struct dlb2_hw_domain *domain;
4320 struct dlb2_ldb_port *port;
4323 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4326 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4330 if (!domain->configured) {
4331 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4335 if (domain->started) {
4336 resp->status = DLB2_ST_DOMAIN_STARTED;
4340 if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4341 resp->status = DLB2_ST_INVALID_COS_ID;
4345 if (args->cos_strict) {
4347 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4350 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4351 id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4353 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4361 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4365 /* Check cache-line alignment */
4366 if ((cq_dma_base & 0x3F) != 0) {
4367 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4371 if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4372 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4376 /* The history list size must be >= 1 */
4377 if (!args->cq_history_list_size) {
4378 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4382 if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4383 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4387 *out_domain = domain;
4395 * dlb2_hw_create_ldb_port() - create a load-balanced port
4396 * @hw: dlb2_hw handle for a particular device.
4397 * @domain_id: domain ID.
4398 * @args: port creation arguments.
4399 * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4400 * @resp: response structure.
4401 * @vdev_req: indicates whether this request came from a vdev.
4402 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4404 * This function creates a load-balanced port.
4406 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4410 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4411 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4412 * contains the port ID.
4414 * resp->id contains a virtual ID if vdev_req is true.
4417 * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4418 * pointer address is not properly aligned, the domain is not
4419 * configured, or the domain has already been started.
4420 * EFAULT - Internal error (resp->status not set).
4422 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4424 struct dlb2_create_ldb_port_args *args,
4425 uintptr_t cq_dma_base,
4426 struct dlb2_cmd_response *resp,
4428 unsigned int vdev_id)
4430 struct dlb2_hw_domain *domain;
4431 struct dlb2_ldb_port *port;
4434 dlb2_log_create_ldb_port_args(hw,
4442 * Verify that hardware resources are available before attempting to
4443 * satisfy the request. This simplifies the error unwinding code.
4445 ret = dlb2_verify_create_ldb_port_args(hw,
4458 ret = dlb2_configure_ldb_port(hw,
4469 * Configuration succeeded, so move the resource from the 'avail' to
4472 dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4474 dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4477 resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4483 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4485 uintptr_t cq_dma_base,
4486 struct dlb2_create_dir_port_args *args,
4488 unsigned int vdev_id)
4490 DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4492 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4493 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
4495 DLB2_HW_DBG(hw, "\tCQ depth: %d\n",
4497 DLB2_HW_DBG(hw, "\tCQ base address: 0x%lx\n",
4501 static struct dlb2_dir_pq_pair *
4502 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
4505 struct dlb2_hw_domain *domain)
4507 struct dlb2_list_entry *iter;
4508 struct dlb2_dir_pq_pair *port;
4511 if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
4514 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
4515 if ((!vdev_req && port->id.phys_id == id) ||
4516 (vdev_req && port->id.virt_id == id))
4524 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4526 uintptr_t cq_dma_base,
4527 struct dlb2_create_dir_port_args *args,
4528 struct dlb2_cmd_response *resp,
4530 unsigned int vdev_id,
4531 struct dlb2_hw_domain **out_domain,
4532 struct dlb2_dir_pq_pair **out_port)
4534 struct dlb2_hw_domain *domain;
4535 struct dlb2_dir_pq_pair *pq;
4537 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4540 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4544 if (!domain->configured) {
4545 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4549 if (domain->started) {
4550 resp->status = DLB2_ST_DOMAIN_STARTED;
4554 if (args->queue_id != -1) {
4556 * If the user claims the queue is already configured, validate
4557 * the queue ID, its domain, and whether the queue is
4560 pq = dlb2_get_domain_used_dir_pq(hw,
4565 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4566 !pq->queue_configured) {
4567 resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4572 * If the port's queue is not configured, validate that a free
4573 * port-queue pair is available.
4575 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4578 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4583 /* Check cache-line alignment */
4584 if ((cq_dma_base & 0x3F) != 0) {
4585 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4589 if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4590 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4594 *out_domain = domain;
4600 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4601 struct dlb2_hw_domain *domain,
4602 struct dlb2_dir_pq_pair *port,
4604 unsigned int vdev_id)
4608 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_DIR_PP2VAS_VAS);
4609 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), reg);
4616 * DLB uses producer port address bits 17:12 to determine the
4617 * producer port ID. In Scalable IOV mode, PP accesses come
4618 * through the PF MMIO window for the physical producer port,
4619 * so for translation purposes the virtual and physical port
4622 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4623 virt_id = port->id.virt_id;
4625 virt_id = port->id.phys_id;
4628 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_DIR_VPP2PP_PP);
4629 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
4630 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), reg);
4633 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_PP2VDEV_VDEV);
4634 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VDEV(port->id.phys_id), reg);
4637 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VPP_V_VPP_V);
4638 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), reg);
4642 DLB2_BIT_SET(reg, DLB2_SYS_DIR_PP_V_PP_V);
4643 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP_V(port->id.phys_id), reg);
4646 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4647 struct dlb2_hw_domain *domain,
4648 struct dlb2_dir_pq_pair *port,
4649 uintptr_t cq_dma_base,
4650 struct dlb2_create_dir_port_args *args,
4652 unsigned int vdev_id)
4657 /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4658 DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_DIR_CQ_ADDR_L_ADDR_L);
4659 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), reg);
4661 reg = cq_dma_base >> 32;
4662 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), reg);
4665 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4666 * cache lines out-of-order (but QEs within a cache line are always
4667 * updated in-order).
4670 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_CQ2VF_PF_RO_VF);
4671 DLB2_BITS_SET(reg, !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4672 DLB2_SYS_DIR_CQ2VF_PF_RO_IS_PF);
4673 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ2VF_PF_RO_RO);
4675 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), reg);
4677 if (args->cq_depth <= 8) {
4679 } else if (args->cq_depth == 16) {
4681 } else if (args->cq_depth == 32) {
4683 } else if (args->cq_depth == 64) {
4685 } else if (args->cq_depth == 128) {
4687 } else if (args->cq_depth == 256) {
4689 } else if (args->cq_depth == 512) {
4691 } else if (args->cq_depth == 1024) {
4695 "[%s():%d] Internal error: invalid CQ depth\n",
4696 __func__, __LINE__);
4701 DLB2_BITS_SET(reg, ds,
4702 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4704 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4708 * To support CQs with depth less than 8, program the token count
4709 * register with a non-zero initial value. Operations such as domain
4710 * reset must take this initial value into account when quiescing the
4713 port->init_tkn_cnt = 0;
4715 if (args->cq_depth < 8) {
4717 port->init_tkn_cnt = 8 - args->cq_depth;
4719 DLB2_BITS_SET(reg, port->init_tkn_cnt,
4720 DLB2_LSP_CQ_DIR_TKN_CNT_COUNT);
4722 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4726 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4727 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4731 DLB2_BITS_SET(reg, ds,
4732 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_TOKEN_DEPTH_SELECT_V2);
4734 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
4738 /* Reset the CQ write pointer */
4740 DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
4741 DLB2_CHP_DIR_CQ_WPTR_RST);
4743 /* Virtualize the PPID */
4745 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), reg);
4748 * Address translation (AT) settings: 0: untranslated, 2: translated
4749 * (see ATS spec regarding Address Type field for more details)
4751 if (hw->ver == DLB2_HW_V2) {
4753 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), reg);
4756 if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4757 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4758 DLB2_SYS_DIR_CQ_PASID_PASID);
4759 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ_PASID_FMT2);
4762 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id), reg);
4765 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_DIR_CQ2VAS_CQ2VAS);
4766 DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id), reg);
4771 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
4772 struct dlb2_hw_domain *domain,
4773 struct dlb2_dir_pq_pair *port,
4774 uintptr_t cq_dma_base,
4775 struct dlb2_create_dir_port_args *args,
4777 unsigned int vdev_id)
4781 ret = dlb2_dir_port_configure_cq(hw,
4792 dlb2_dir_port_configure_pp(hw,
4798 dlb2_dir_port_cq_enable(hw, port);
4800 port->enabled = true;
4802 port->port_configured = true;
4808 * dlb2_hw_create_dir_port() - create a directed port
4809 * @hw: dlb2_hw handle for a particular device.
4810 * @domain_id: domain ID.
4811 * @args: port creation arguments.
4812 * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4813 * @resp: response structure.
4814 * @vdev_req: indicates whether this request came from a vdev.
4815 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4817 * This function creates a directed port.
4819 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4823 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4824 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4825 * contains the port ID.
4827 * resp->id contains a virtual ID if vdev_req is true.
4830 * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4831 * pointer address is not properly aligned, the domain is not
4832 * configured, or the domain has already been started.
4833 * EFAULT - Internal error (resp->status not set).
4835 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
4837 struct dlb2_create_dir_port_args *args,
4838 uintptr_t cq_dma_base,
4839 struct dlb2_cmd_response *resp,
4841 unsigned int vdev_id)
4843 struct dlb2_dir_pq_pair *port;
4844 struct dlb2_hw_domain *domain;
4847 dlb2_log_create_dir_port_args(hw,
4855 * Verify that hardware resources are available before attempting to
4856 * satisfy the request. This simplifies the error unwinding code.
4858 ret = dlb2_verify_create_dir_port_args(hw,
4870 ret = dlb2_configure_dir_port(hw,
4881 * Configuration succeeded, so move the resource from the 'avail' to
4882 * the 'used' list (if it's not already there).
4884 if (args->queue_id == -1) {
4885 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
4887 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
4891 resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4896 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
4897 struct dlb2_hw_domain *domain,
4898 struct dlb2_dir_pq_pair *queue,
4899 struct dlb2_create_dir_queue_args *args,
4901 unsigned int vdev_id)
4906 /* QID write permissions are turned on when the domain is started */
4907 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4910 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), reg);
4912 /* Don't timestamp QEs that pass through this queue */
4913 DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_ITS(queue->id.phys_id), reg);
4916 DLB2_BITS_SET(reg, args->depth_threshold,
4917 DLB2_LSP_QID_DIR_DEPTH_THRSH_THRESH);
4919 DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver, queue->id.phys_id),
4923 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4927 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VQID_V_VQID_V);
4928 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), reg);
4931 DLB2_BITS_SET(reg, queue->id.phys_id,
4932 DLB2_SYS_VF_DIR_VQID2QID_QID);
4933 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), reg);
4937 DLB2_BIT_SET(reg, DLB2_SYS_DIR_QID_V_QID_V);
4938 DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), reg);
4940 queue->queue_configured = true;
4944 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
4946 struct dlb2_create_dir_queue_args *args,
4948 unsigned int vdev_id)
4950 DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
4952 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4953 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
4954 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
4958 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
4960 struct dlb2_create_dir_queue_args *args,
4961 struct dlb2_cmd_response *resp,
4963 unsigned int vdev_id,
4964 struct dlb2_hw_domain **out_domain,
4965 struct dlb2_dir_pq_pair **out_queue)
4967 struct dlb2_hw_domain *domain;
4968 struct dlb2_dir_pq_pair *pq;
4970 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4973 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4977 if (!domain->configured) {
4978 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4982 if (domain->started) {
4983 resp->status = DLB2_ST_DOMAIN_STARTED;
4988 * If the user claims the port is already configured, validate the port
4989 * ID, its domain, and whether the port is configured.
4991 if (args->port_id != -1) {
4992 pq = dlb2_get_domain_used_dir_pq(hw,
4997 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4998 !pq->port_configured) {
4999 resp->status = DLB2_ST_INVALID_PORT_ID;
5004 * If the queue's port is not configured, validate that a free
5005 * port-queue pair is available.
5007 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
5010 resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
5015 *out_domain = domain;
5022 * dlb2_hw_create_dir_queue() - create a directed queue
5023 * @hw: dlb2_hw handle for a particular device.
5024 * @domain_id: domain ID.
5025 * @args: queue creation arguments.
5026 * @resp: response structure.
5027 * @vdev_req: indicates whether this request came from a vdev.
5028 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5030 * This function creates a directed queue.
5032 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5036 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5037 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5038 * contains the queue ID.
5040 * resp->id contains a virtual ID if vdev_req is true.
5043 * EINVAL - A requested resource is unavailable, the domain is not configured,
5044 * or the domain has already been started.
5045 * EFAULT - Internal error (resp->status not set).
5047 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5049 struct dlb2_create_dir_queue_args *args,
5050 struct dlb2_cmd_response *resp,
5052 unsigned int vdev_id)
5054 struct dlb2_dir_pq_pair *queue;
5055 struct dlb2_hw_domain *domain;
5058 dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5061 * Verify that hardware resources are available before attempting to
5062 * satisfy the request. This simplifies the error unwinding code.
5064 ret = dlb2_verify_create_dir_queue_args(hw,
5075 dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5078 * Configuration succeeded, so move the resource from the 'avail' to
5079 * the 'used' list (if it's not already there).
5081 if (args->port_id == -1) {
5082 dlb2_list_del(&domain->avail_dir_pq_pairs,
5083 &queue->domain_list);
5085 dlb2_list_add(&domain->used_dir_pq_pairs,
5086 &queue->domain_list);
5091 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5097 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5098 struct dlb2_ldb_queue *queue,
5103 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5104 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5106 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5107 map->pending_qid == queue->id.phys_id)
5113 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5116 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5117 struct dlb2_ldb_queue *queue,
5118 struct dlb2_cmd_response *resp)
5120 enum dlb2_qid_map_state state;
5123 /* Unused slot available? */
5124 if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5128 * If the queue is already mapped (from the application's perspective),
5129 * this is simply a priority update.
5131 state = DLB2_QUEUE_MAPPED;
5132 if (dlb2_port_find_slot_queue(port, state, queue, &i))
5135 state = DLB2_QUEUE_MAP_IN_PROG;
5136 if (dlb2_port_find_slot_queue(port, state, queue, &i))
5139 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5143 * If the slot contains an unmap in progress, it's considered
5146 state = DLB2_QUEUE_UNMAP_IN_PROG;
5147 if (dlb2_port_find_slot(port, state, &i))
5150 state = DLB2_QUEUE_UNMAPPED;
5151 if (dlb2_port_find_slot(port, state, &i))
5154 resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5158 static struct dlb2_ldb_queue *
5159 dlb2_get_domain_ldb_queue(u32 id,
5161 struct dlb2_hw_domain *domain)
5163 struct dlb2_list_entry *iter;
5164 struct dlb2_ldb_queue *queue;
5167 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5170 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
5171 if ((!vdev_req && queue->id.phys_id == id) ||
5172 (vdev_req && queue->id.virt_id == id))
5179 static struct dlb2_ldb_port *
5180 dlb2_get_domain_used_ldb_port(u32 id,
5182 struct dlb2_hw_domain *domain)
5184 struct dlb2_list_entry *iter;
5185 struct dlb2_ldb_port *port;
5189 if (id >= DLB2_MAX_NUM_LDB_PORTS)
5192 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5193 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
5194 if ((!vdev_req && port->id.phys_id == id) ||
5195 (vdev_req && port->id.virt_id == id))
5199 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter) {
5200 if ((!vdev_req && port->id.phys_id == id) ||
5201 (vdev_req && port->id.virt_id == id))
5209 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5210 struct dlb2_ldb_port *port,
5212 struct dlb2_map_qid_args *args)
5216 /* Read-modify-write the priority and valid bit register */
5217 cq2priov = DLB2_CSR_RD(hw,
5218 DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id));
5220 cq2priov |= (1 << (slot + DLB2_LSP_CQ2PRIOV_V_LOC)) &
5221 DLB2_LSP_CQ2PRIOV_V;
5222 cq2priov |= ((args->priority & 0x7) << slot * 3) &
5223 DLB2_LSP_CQ2PRIOV_PRIO;
5225 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), cq2priov);
5229 port->qid_map[slot].priority = args->priority;
5232 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5234 struct dlb2_map_qid_args *args,
5235 struct dlb2_cmd_response *resp,
5237 unsigned int vdev_id,
5238 struct dlb2_hw_domain **out_domain,
5239 struct dlb2_ldb_port **out_port,
5240 struct dlb2_ldb_queue **out_queue)
5242 struct dlb2_hw_domain *domain;
5243 struct dlb2_ldb_queue *queue;
5244 struct dlb2_ldb_port *port;
5247 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5250 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5254 if (!domain->configured) {
5255 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5261 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5263 if (!port || !port->configured) {
5264 resp->status = DLB2_ST_INVALID_PORT_ID;
5268 if (args->priority >= DLB2_QID_PRIORITIES) {
5269 resp->status = DLB2_ST_INVALID_PRIORITY;
5273 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5275 if (!queue || !queue->configured) {
5276 resp->status = DLB2_ST_INVALID_QID;
5280 if (queue->domain_id.phys_id != domain->id.phys_id) {
5281 resp->status = DLB2_ST_INVALID_QID;
5285 if (port->domain_id.phys_id != domain->id.phys_id) {
5286 resp->status = DLB2_ST_INVALID_PORT_ID;
5290 *out_domain = domain;
5297 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5299 struct dlb2_map_qid_args *args,
5301 unsigned int vdev_id)
5303 DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5305 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5306 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5308 DLB2_HW_DBG(hw, "\tPort ID: %d\n",
5310 DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
5312 DLB2_HW_DBG(hw, "\tPriority: %d\n",
5317 * dlb2_hw_map_qid() - map a load-balanced queue to a load-balanced port
5318 * @hw: dlb2_hw handle for a particular device.
5319 * @domain_id: domain ID.
5320 * @args: map QID arguments.
5321 * @resp: response structure.
5322 * @vdev_req: indicates whether this request came from a vdev.
5323 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5325 * This function configures the DLB to schedule QEs from the specified queue
5326 * to the specified port. Each load-balanced port can be mapped to up to 8
5327 * queues; each load-balanced queue can potentially map to all the
5328 * load-balanced ports.
5330 * A successful return does not necessarily mean the mapping was configured. If
5331 * this function is unable to immediately map the queue to the port, it will
5332 * add the requested operation to a per-port list of pending map/unmap
5333 * operations, and (if it's not already running) launch a kernel thread that
5334 * periodically attempts to process all pending operations. In a sense, this is
5335 * an asynchronous function.
5337 * This asynchronicity creates two views of the state of hardware: the actual
5338 * hardware state and the requested state (as if every request completed
5339 * immediately). If there are any pending map/unmap operations, the requested
5340 * state will differ from the actual state. All validation is performed with
5341 * respect to the pending state; for instance, if there are 8 pending map
5342 * operations for port X, a request for a 9th will fail because a load-balanced
5343 * port can only map up to 8 queues.
5345 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5349 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5350 * assigned a detailed error code from enum dlb2_error.
5353 * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5354 * the domain is not configured.
5355 * EFAULT - Internal error (resp->status not set).
5356 * EBUSY - The requested port has outstanding detach operations.
5358 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5360 struct dlb2_map_qid_args *args,
5361 struct dlb2_cmd_response *resp,
5363 unsigned int vdev_id)
5365 struct dlb2_hw_domain *domain;
5366 struct dlb2_ldb_queue *queue;
5367 enum dlb2_qid_map_state st;
5368 struct dlb2_ldb_port *port;
5372 dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5375 * Verify that hardware resources are available before attempting to
5376 * satisfy the request. This simplifies the error unwinding code.
5378 ret = dlb2_verify_map_qid_args(hw,
5390 prio = args->priority;
5393 * If there are any outstanding detach operations for this port,
5394 * attempt to complete them. This may be necessary to free up a QID
5395 * slot for this requested mapping.
5397 if (port->num_pending_removals) {
5399 bool_ret = dlb2_domain_finish_unmap_port(hw, domain, port);
5404 ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5408 /* Hardware requires disabling the CQ before mapping QIDs. */
5410 dlb2_ldb_port_cq_disable(hw, port);
5413 * If this is only a priority change, don't perform the full QID->CQ
5416 st = DLB2_QUEUE_MAPPED;
5417 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5418 if (prio != port->qid_map[i].priority) {
5419 dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5420 DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5423 st = DLB2_QUEUE_MAPPED;
5424 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5431 st = DLB2_QUEUE_UNMAP_IN_PROG;
5432 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5433 if (prio != port->qid_map[i].priority) {
5434 dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5435 DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5438 st = DLB2_QUEUE_MAPPED;
5439 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5447 * If this is a priority change on an in-progress mapping, don't
5448 * perform the full QID->CQ mapping procedure.
5450 st = DLB2_QUEUE_MAP_IN_PROG;
5451 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5452 port->qid_map[i].priority = prio;
5454 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5460 * If this is a priority change on a pending mapping, update the
5463 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5464 port->qid_map[i].pending_priority = prio;
5466 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5472 * If all the CQ's slots are in use, then there's an unmap in progress
5473 * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5474 * mapping to pending_map and return. When the removal is completed for
5475 * the slot's current occupant, this mapping will be performed.
5477 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5478 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5479 enum dlb2_qid_map_state new_st;
5481 port->qid_map[i].pending_qid = queue->id.phys_id;
5482 port->qid_map[i].pending_priority = prio;
5484 new_st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5486 ret = dlb2_port_slot_state_transition(hw, port, queue,
5491 DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5498 * If the domain has started, a special "dynamic" CQ->queue mapping
5499 * procedure is required in order to safely update the CQ<->QID tables.
5500 * The "static" procedure cannot be used when traffic is flowing,
5501 * because the CQ<->QID tables cannot be updated atomically and the
5502 * scheduler won't see the new mapping unless the queue's if_status
5503 * changes, which isn't guaranteed.
5505 ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5507 /* If ret is less than zero, it's due to an internal error */
5513 dlb2_ldb_port_cq_enable(hw, port);
5520 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5522 struct dlb2_unmap_qid_args *args,
5524 unsigned int vdev_id)
5526 DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5528 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5529 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5531 DLB2_HW_DBG(hw, "\tPort ID: %d\n",
5533 DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
5535 if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5536 DLB2_HW_DBG(hw, "\tQueue's num mappings: %d\n",
5537 hw->rsrcs.ldb_queues[args->qid].num_mappings);
5540 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5542 struct dlb2_unmap_qid_args *args,
5543 struct dlb2_cmd_response *resp,
5545 unsigned int vdev_id,
5546 struct dlb2_hw_domain **out_domain,
5547 struct dlb2_ldb_port **out_port,
5548 struct dlb2_ldb_queue **out_queue)
5550 enum dlb2_qid_map_state state;
5551 struct dlb2_hw_domain *domain;
5552 struct dlb2_ldb_queue *queue;
5553 struct dlb2_ldb_port *port;
5557 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5560 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5564 if (!domain->configured) {
5565 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5571 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5573 if (!port || !port->configured) {
5574 resp->status = DLB2_ST_INVALID_PORT_ID;
5578 if (port->domain_id.phys_id != domain->id.phys_id) {
5579 resp->status = DLB2_ST_INVALID_PORT_ID;
5583 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5585 if (!queue || !queue->configured) {
5586 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5587 __func__, args->qid);
5588 resp->status = DLB2_ST_INVALID_QID;
5593 * Verify that the port has the queue mapped. From the application's
5594 * perspective a queue is mapped if it is actually mapped, the map is
5595 * in progress, or the map is blocked pending an unmap.
5597 state = DLB2_QUEUE_MAPPED;
5598 if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5601 state = DLB2_QUEUE_MAP_IN_PROG;
5602 if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5605 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5608 resp->status = DLB2_ST_INVALID_QID;
5612 *out_domain = domain;
5620 * dlb2_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
5621 * @hw: dlb2_hw handle for a particular device.
5622 * @domain_id: domain ID.
5623 * @args: unmap QID arguments.
5624 * @resp: response structure.
5625 * @vdev_req: indicates whether this request came from a vdev.
5626 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5628 * This function configures the DLB to stop scheduling QEs from the specified
5629 * queue to the specified port.
5631 * A successful return does not necessarily mean the mapping was removed. If
5632 * this function is unable to immediately unmap the queue from the port, it
5633 * will add the requested operation to a per-port list of pending map/unmap
5634 * operations, and (if it's not already running) launch a kernel thread that
5635 * periodically attempts to process all pending operations. See
5636 * dlb2_hw_map_qid() for more details.
5638 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5642 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5643 * assigned a detailed error code from enum dlb2_error.
5646 * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5647 * the domain is not configured.
5648 * EFAULT - Internal error (resp->status not set).
5650 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5652 struct dlb2_unmap_qid_args *args,
5653 struct dlb2_cmd_response *resp,
5655 unsigned int vdev_id)
5657 struct dlb2_hw_domain *domain;
5658 struct dlb2_ldb_queue *queue;
5659 enum dlb2_qid_map_state st;
5660 struct dlb2_ldb_port *port;
5661 bool unmap_complete;
5664 dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5667 * Verify that hardware resources are available before attempting to
5668 * satisfy the request. This simplifies the error unwinding code.
5670 ret = dlb2_verify_unmap_qid_args(hw,
5683 * If the queue hasn't been mapped yet, we need to update the slot's
5684 * state and re-enable the queue's inflights.
5686 st = DLB2_QUEUE_MAP_IN_PROG;
5687 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5689 * Since the in-progress map was aborted, re-enable the QID's
5692 if (queue->num_pending_additions == 0)
5693 dlb2_ldb_queue_set_inflight_limit(hw, queue);
5695 st = DLB2_QUEUE_UNMAPPED;
5696 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5700 goto unmap_qid_done;
5704 * If the queue mapping is on hold pending an unmap, we simply need to
5705 * update the slot's state.
5707 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5708 st = DLB2_QUEUE_UNMAP_IN_PROG;
5709 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5713 goto unmap_qid_done;
5716 st = DLB2_QUEUE_MAPPED;
5717 if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5719 "[%s()] Internal error: no available CQ slots\n",
5725 * QID->CQ mapping removal is an asynchronous procedure. It requires
5726 * stopping the DLB2 from scheduling this CQ, draining all inflights
5727 * from the CQ, then unmapping the queue from the CQ. This function
5728 * simply marks the port as needing the queue unmapped, and (if
5729 * necessary) starts the unmapping worker thread.
5731 dlb2_ldb_port_cq_disable(hw, port);
5733 st = DLB2_QUEUE_UNMAP_IN_PROG;
5734 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5739 * Attempt to finish the unmapping now, in case the port has no
5740 * outstanding inflights. If that's not the case, this will fail and
5741 * the unmapping will be completed at a later time.
5743 unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
5746 * If the unmapping couldn't complete immediately, launch the worker
5747 * thread (if it isn't already launched) to finish it later.
5749 if (!unmap_complete && !os_worker_active(hw))
5750 os_schedule_work(hw);
5759 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
5760 struct dlb2_pending_port_unmaps_args *args,
5762 unsigned int vdev_id)
5764 DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
5766 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
5767 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
5771 * dlb2_hw_pending_port_unmaps() - returns the number of unmap operations in
5773 * @hw: dlb2_hw handle for a particular device.
5774 * @domain_id: domain ID.
5775 * @args: number of unmaps in progress args
5776 * @resp: response structure.
5777 * @vdev_req: indicates whether this request came from a vdev.
5778 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5781 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5782 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5783 * contains the number of unmaps in progress.
5786 * EINVAL - Invalid port ID.
5788 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
5790 struct dlb2_pending_port_unmaps_args *args,
5791 struct dlb2_cmd_response *resp,
5793 unsigned int vdev_id)
5795 struct dlb2_hw_domain *domain;
5796 struct dlb2_ldb_port *port;
5798 dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
5800 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5803 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5807 port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
5808 if (!port || !port->configured) {
5809 resp->status = DLB2_ST_INVALID_PORT_ID;
5813 resp->id = port->num_pending_removals;
5818 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
5820 struct dlb2_cmd_response *resp,
5822 unsigned int vdev_id,
5823 struct dlb2_hw_domain **out_domain)
5825 struct dlb2_hw_domain *domain;
5827 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5830 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5834 if (!domain->configured) {
5835 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5839 if (domain->started) {
5840 resp->status = DLB2_ST_DOMAIN_STARTED;
5844 *out_domain = domain;
5849 static void dlb2_log_start_domain(struct dlb2_hw *hw,
5852 unsigned int vdev_id)
5854 DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
5856 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5857 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5861 * dlb2_hw_start_domain() - start a scheduling domain
5862 * @hw: dlb2_hw handle for a particular device.
5863 * @domain_id: domain ID.
5864 * @arg: start domain arguments.
5865 * @resp: response structure.
5866 * @vdev_req: indicates whether this request came from a vdev.
5867 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5869 * This function starts a scheduling domain, which allows applications to send
5870 * traffic through it. Once a domain is started, its resources can no longer be
5871 * configured (besides QID remapping and port enable/disable).
5873 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5877 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5878 * assigned a detailed error code from enum dlb2_error.
5881 * EINVAL - the domain is not configured, or the domain is already started.
5884 dlb2_hw_start_domain(struct dlb2_hw *hw,
5886 struct dlb2_start_domain_args *args,
5887 struct dlb2_cmd_response *resp,
5889 unsigned int vdev_id)
5891 struct dlb2_list_entry *iter;
5892 struct dlb2_dir_pq_pair *dir_queue;
5893 struct dlb2_ldb_queue *ldb_queue;
5894 struct dlb2_hw_domain *domain;
5899 dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
5901 ret = dlb2_verify_start_domain_args(hw,
5911 * Enable load-balanced and directed queue write permissions for the
5912 * queues this domain owns. Without this, the DLB2 will drop all
5913 * incoming traffic to those queues.
5915 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
5919 DLB2_BIT_SET(vasqid_v, DLB2_SYS_LDB_VASQID_V_VASQID_V);
5921 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
5922 ldb_queue->id.phys_id;
5924 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), vasqid_v);
5927 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
5931 DLB2_BIT_SET(vasqid_v, DLB2_SYS_DIR_VASQID_V_VASQID_V);
5933 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
5934 dir_queue->id.phys_id;
5936 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), vasqid_v);
5941 domain->started = true;
5948 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
5954 DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
5956 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
5957 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5958 DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
5962 * dlb2_hw_get_dir_queue_depth() - returns the depth of a directed queue
5963 * @hw: dlb2_hw handle for a particular device.
5964 * @domain_id: domain ID.
5965 * @args: queue depth args
5966 * @resp: response structure.
5967 * @vdev_req: indicates whether this request came from a vdev.
5968 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5970 * This function returns the depth of a directed queue.
5972 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5976 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5977 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5978 * contains the depth.
5981 * EINVAL - Invalid domain ID or queue ID.
5983 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
5985 struct dlb2_get_dir_queue_depth_args *args,
5986 struct dlb2_cmd_response *resp,
5988 unsigned int vdev_id)
5990 struct dlb2_dir_pq_pair *queue;
5991 struct dlb2_hw_domain *domain;
5996 dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
5999 domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
6001 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6005 id = args->queue_id;
6007 queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
6009 resp->status = DLB2_ST_INVALID_QID;
6013 resp->id = dlb2_dir_queue_depth(hw, queue);
6018 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
6024 DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
6026 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
6027 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
6028 DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
6032 * dlb2_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
6033 * @hw: dlb2_hw handle for a particular device.
6034 * @domain_id: domain ID.
6035 * @args: queue depth args
6036 * @resp: response structure.
6037 * @vdev_req: indicates whether this request came from a vdev.
6038 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
6040 * This function returns the depth of a load-balanced queue.
6042 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
6046 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6047 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
6048 * contains the depth.
6051 * EINVAL - Invalid domain ID or queue ID.
6053 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
6055 struct dlb2_get_ldb_queue_depth_args *args,
6056 struct dlb2_cmd_response *resp,
6058 unsigned int vdev_id)
6060 struct dlb2_hw_domain *domain;
6061 struct dlb2_ldb_queue *queue;
6063 dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
6066 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6068 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6072 queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
6074 resp->status = DLB2_ST_INVALID_QID;
6078 resp->id = dlb2_ldb_queue_depth(hw, queue);
6084 * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures
6085 * @hw: dlb2_hw handle for a particular device.
6087 * This function attempts to finish any outstanding unmap procedures.
6088 * This function should be called by the kernel thread responsible for
6089 * finishing map/unmap procedures.
6092 * Returns the number of procedures that weren't completed.
6094 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
6098 /* Finish queue unmap jobs for any domain that needs it */
6099 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6100 struct dlb2_hw_domain *domain = &hw->domains[i];
6102 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
6109 * dlb2_finish_map_qid_procedures() - finish any pending map procedures
6110 * @hw: dlb2_hw handle for a particular device.
6112 * This function attempts to finish any outstanding map procedures.
6113 * This function should be called by the kernel thread responsible for
6114 * finishing map/unmap procedures.
6117 * Returns the number of procedures that weren't completed.
6119 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
6123 /* Finish queue map jobs for any domain that needs it */
6124 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6125 struct dlb2_hw_domain *domain = &hw->domains[i];
6127 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
6134 * dlb2_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports.
6135 * @hw: dlb2_hw handle for a particular device.
6137 * This function must be called prior to configuring scheduling domains.
6140 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
6144 ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6147 DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_DIR_CQ_MODE);
6149 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6153 * dlb2_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
6155 * @hw: dlb2_hw handle for a particular device.
6157 * This function must be called prior to configuring scheduling domains.
6159 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
6163 ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6166 DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_LDB_CQ_MODE);
6168 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6172 * dlb2_get_group_sequence_numbers() - return a group's number of SNs per queue
6173 * @hw: dlb2_hw handle for a particular device.
6174 * @group_id: sequence number group ID.
6176 * This function returns the configured number of sequence numbers per queue
6177 * for the specified group.
6180 * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6182 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, u32 group_id)
6184 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6187 return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
6191 * dlb2_get_group_sequence_number_occupancy() - return a group's in-use slots
6192 * @hw: dlb2_hw handle for a particular device.
6193 * @group_id: sequence number group ID.
6195 * This function returns the group's number of in-use slots (i.e. load-balanced
6196 * queues using the specified group).
6199 * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6201 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw, u32 group_id)
6203 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6206 return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
6209 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
6213 DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
6214 DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
6215 DLB2_HW_DBG(hw, "\tValue: %u\n", val);
6219 * dlb2_set_group_sequence_numbers() - assign a group's number of SNs per queue
6220 * @hw: dlb2_hw handle for a particular device.
6221 * @group_id: sequence number group ID.
6222 * @val: requested amount of sequence numbers per queue.
6224 * This function configures the group's number of sequence numbers per queue.
6225 * val can be a power-of-two between 32 and 1024, inclusive. This setting can
6226 * be configured until the first ordered load-balanced queue is configured, at
6227 * which point the configuration is locked.
6230 * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
6231 * ordered queue is configured.
6233 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
6237 const u32 valid_allocations[] = {64, 128, 256, 512, 1024};
6238 struct dlb2_sn_group *group;
6242 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6245 group = &hw->rsrcs.sn_groups[group_id];
6248 * Once the first load-balanced queue using an SN group is configured,
6249 * the group cannot be changed.
6251 if (group->slot_use_bitmap != 0)
6254 for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
6255 if (val == valid_allocations[mode])
6258 if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
6262 group->sequence_numbers_per_queue = val;
6264 DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[0].mode,
6265 DLB2_RO_GRP_SN_MODE_SN_MODE_0);
6266 DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[1].mode,
6267 DLB2_RO_GRP_SN_MODE_SN_MODE_1);
6269 DLB2_CSR_WR(hw, DLB2_RO_GRP_SN_MODE(hw->ver), sn_mode);
6271 dlb2_log_set_group_sequence_numbers(hw, group_id, val);
6277 * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights
6278 * @hw: dlb2_hw handle for a particular device.
6279 * @weight: 8-entry array of arbiter weights.
6281 * weight[N] programs priority N's weight. In cases where the 8 priorities are
6282 * reduced to 4 bins, the mapping is:
6283 * - weight[1] programs bin 0
6284 * - weight[3] programs bin 1
6285 * - weight[5] programs bin 2
6286 * - weight[7] programs bin 3
6288 void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8])
6292 DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN0);
6293 DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN1);
6294 DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN2);
6295 DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN3);
6296 DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN, reg);
6299 DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI0);
6300 DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI1);
6301 DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI2);
6302 DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI3);
6303 DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0(hw->ver), reg);
6306 DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0);
6307 DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1);
6308 DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2);
6309 DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3);
6310 DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0(hw->ver), reg);
6313 DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0);
6314 DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1);
6315 DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2);
6316 DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3);
6317 DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0, reg);
6320 DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI0);
6321 DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI1);
6322 DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI2);
6323 DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI3);
6324 DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0, reg);
6327 DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI0);
6328 DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI1);
6329 DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI2);
6330 DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI3);
6331 DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0(hw->ver), reg);
6334 DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN0);
6335 DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN1);
6336 DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN2);
6337 DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN3);
6338 DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN, reg);
6341 DLB2_BITS_SET(reg, weight[1], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI0);
6342 DLB2_BITS_SET(reg, weight[3], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI1);
6343 DLB2_BITS_SET(reg, weight[5], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI2);
6344 DLB2_BITS_SET(reg, weight[7], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI3);
6345 DLB2_CSR_WR(hw, DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0, reg);
6349 * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights
6350 * @hw: dlb2_hw handle for a particular device.
6351 * @weight: 8-entry array of arbiter weights.
6353 * weight[N] programs priority N's weight. In cases where the 8 priorities are
6354 * reduced to 4 bins, the mapping is:
6355 * - weight[1] programs bin 0
6356 * - weight[3] programs bin 1
6357 * - weight[5] programs bin 2
6358 * - weight[7] programs bin 3
6360 void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8])
6364 DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI0_WEIGHT);
6365 DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI1_WEIGHT);
6366 DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI2_WEIGHT);
6367 DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI3_WEIGHT);
6368 DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0(hw->ver), reg);
6371 DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI0_WEIGHT);
6372 DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI1_WEIGHT);
6373 DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI2_WEIGHT);
6374 DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI3_WEIGHT);
6375 DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0(hw->ver), reg);
6378 static void dlb2_log_enable_cq_weight(struct dlb2_hw *hw,
6380 struct dlb2_enable_cq_weight_args *args,
6382 unsigned int vdev_id)
6384 DLB2_HW_DBG(hw, "DLB2 enable CQ weight arguments:\n");
6385 DLB2_HW_DBG(hw, "\tvdev_req %d, vdev_id %d\n", vdev_req, vdev_id);
6386 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
6387 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
6388 DLB2_HW_DBG(hw, "\tLimit: %d\n", args->limit);
6392 dlb2_verify_enable_cq_weight_args(struct dlb2_hw *hw,
6394 struct dlb2_enable_cq_weight_args *args,
6395 struct dlb2_cmd_response *resp,
6397 unsigned int vdev_id)
6399 struct dlb2_hw_domain *domain;
6400 struct dlb2_ldb_port *port;
6402 if (hw->ver == DLB2_HW_V2) {
6403 resp->status = DLB2_ST_FEATURE_UNAVAILABLE;
6407 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6410 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6414 if (!domain->configured) {
6415 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
6419 if (domain->started) {
6420 resp->status = DLB2_ST_DOMAIN_STARTED;
6424 port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
6425 if (!port || !port->configured) {
6426 resp->status = DLB2_ST_INVALID_PORT_ID;
6430 if (args->limit == 0 || args->limit > port->cq_depth) {
6431 resp->status = DLB2_ST_INVALID_CQ_WEIGHT_LIMIT;
6438 int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw,
6440 struct dlb2_enable_cq_weight_args *args,
6441 struct dlb2_cmd_response *resp,
6443 unsigned int vdev_id)
6445 struct dlb2_hw_domain *domain;
6446 struct dlb2_ldb_port *port;
6450 dlb2_log_enable_cq_weight(hw, domain_id, args, vdev_req, vdev_id);
6453 * Verify that hardware resources are available before attempting to
6454 * satisfy the request. This simplifies the error unwinding code.
6456 ret = dlb2_verify_enable_cq_weight_args(hw,
6465 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6468 "[%s():%d] Internal error: domain not found\n",
6469 __func__, __LINE__);
6475 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
6478 "[%s(): %d] Internal error: port not found\n",
6479 __func__, __LINE__);
6483 DLB2_BIT_SET(reg, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_V);
6484 DLB2_BITS_SET(reg, args->limit, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_LIMIT);
6486 DLB2_CSR_WR(hw, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id), reg);
6493 static void dlb2_log_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bw)
6495 DLB2_HW_DBG(hw, "DLB2 set port CoS bandwidth:\n");
6496 DLB2_HW_DBG(hw, "\tCoS ID: %u\n", cos_id);
6497 DLB2_HW_DBG(hw, "\tBandwidth: %u\n", bw);
6500 #define DLB2_MAX_BW_PCT 100
6503 * dlb2_hw_set_cos_bandwidth() - set a bandwidth allocation percentage for a
6504 * port class-of-service.
6505 * @hw: dlb2_hw handle for a particular device.
6506 * @cos_id: class-of-service ID.
6507 * @bandwidth: class-of-service bandwidth.
6510 * Returns 0 upon success, < 0 otherwise.
6513 * EINVAL - Invalid cos ID, bandwidth is greater than 100, or bandwidth would
6514 * cause the total bandwidth across all classes of service to exceed
6517 int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth)
6523 if (cos_id >= DLB2_NUM_COS_DOMAINS)
6526 if (bandwidth > DLB2_MAX_BW_PCT)
6531 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
6532 total += (i == cos_id) ? bandwidth : hw->cos_reservation[i];
6534 if (total > DLB2_MAX_BW_PCT)
6537 reg = DLB2_CSR_RD(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id));
6540 * Normalize the bandwidth to a value in the range 0-255. Integer
6541 * division may leave unreserved scheduling slots; these will be
6542 * divided among the 4 classes of service.
6544 DLB2_BITS_SET(reg, (bandwidth * 256) / 100, DLB2_LSP_CFG_SHDW_RANGE_COS_BW_RANGE);
6545 DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id), reg);
6548 DLB2_BIT_SET(reg, DLB2_LSP_CFG_SHDW_CTRL_TRANSFER);
6549 /* Atomically transfer the newly configured service weight */
6550 DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_CTRL(hw->ver), reg);
6552 dlb2_log_set_cos_bandwidth(hw, cos_id, bandwidth);
6554 hw->cos_reservation[cos_id] = bandwidth;