1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #define DLB2_USE_NEW_HEADERS /* TEMPORARY FOR MERGE */
9 #include "dlb2_hw_types_new.h"
10 #include "dlb2_osdep.h"
11 #include "dlb2_osdep_bitmap.h"
12 #include "dlb2_osdep_types.h"
13 #include "dlb2_regs_new.h"
14 #include "dlb2_resource_new.h" /* TEMP FOR UPSTREAMPATCHES */
16 #include "../../dlb2_priv.h"
17 #include "../../dlb2_inline_fns.h"
19 #define DLB2_DOM_LIST_HEAD(head, type) \
20 DLB2_LIST_HEAD((head), type, domain_list)
22 #define DLB2_FUNC_LIST_HEAD(head, type) \
23 DLB2_LIST_HEAD((head), type, func_list)
25 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
26 DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
28 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
29 DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
31 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
32 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
34 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
35 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
38 * The PF driver cannot assume that a register write will affect subsequent HCW
39 * writes. To ensure a write completes, the driver must read back a CSR. This
40 * function only need be called for configuration that can occur after the
41 * domain has started; prior to starting, applications can't send HCWs.
43 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
45 DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
48 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
52 dlb2_list_init_head(&domain->used_ldb_queues);
53 dlb2_list_init_head(&domain->used_dir_pq_pairs);
54 dlb2_list_init_head(&domain->avail_ldb_queues);
55 dlb2_list_init_head(&domain->avail_dir_pq_pairs);
57 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
58 dlb2_list_init_head(&domain->used_ldb_ports[i]);
59 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
60 dlb2_list_init_head(&domain->avail_ldb_ports[i]);
63 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
66 dlb2_list_init_head(&rsrc->avail_domains);
67 dlb2_list_init_head(&rsrc->used_domains);
68 dlb2_list_init_head(&rsrc->avail_ldb_queues);
69 dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
71 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
72 dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
76 * dlb2_resource_free() - free device state memory
77 * @hw: dlb2_hw handle for a particular device.
79 * This function frees software state pointed to by dlb2_hw. This function
80 * should be called when resetting the device or unloading the driver.
82 void dlb2_resource_free(struct dlb2_hw *hw)
86 if (hw->pf.avail_hist_list_entries)
87 dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
89 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
90 if (hw->vdev[i].avail_hist_list_entries)
91 dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
96 * dlb2_resource_init() - initialize the device
97 * @hw: pointer to struct dlb2_hw.
98 * @ver: device version.
100 * This function initializes the device's software state (pointed to by the hw
101 * argument) and programs global scheduling QoS registers. This function should
102 * be called during driver initialization, and the dlb2_hw structure should
103 * be zero-initialized before calling the function.
105 * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
109 * Returns 0 upon success, <0 otherwise.
111 int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
113 struct dlb2_list_entry *list;
118 * For optimal load-balancing, ports that map to one or more QIDs in
119 * common should not be in numerical sequence. The port->QID mapping is
120 * application dependent, but the driver interleaves port IDs as much
121 * as possible to reduce the likelihood of sequential ports mapping to
122 * the same QID(s). This initial allocation of port IDs maximizes the
123 * average distance between an ID and its immediate neighbors (i.e.
124 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
127 const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
128 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,
129 16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
130 32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
131 48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
136 dlb2_init_fn_rsrc_lists(&hw->pf);
138 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
139 dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
141 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
142 dlb2_init_domain_rsrc_lists(&hw->domains[i]);
143 hw->domains[i].parent_func = &hw->pf;
146 /* Give all resources to the PF driver */
147 hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
148 for (i = 0; i < hw->pf.num_avail_domains; i++) {
149 list = &hw->domains[i].func_list;
151 dlb2_list_add(&hw->pf.avail_domains, list);
154 hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
155 for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
156 list = &hw->rsrcs.ldb_queues[i].func_list;
158 dlb2_list_add(&hw->pf.avail_ldb_queues, list);
161 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
162 hw->pf.num_avail_ldb_ports[i] =
163 DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
165 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
166 int cos_id = i >> DLB2_NUM_COS_DOMAINS;
167 struct dlb2_ldb_port *port;
169 port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
171 dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
175 hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
176 for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
177 list = &hw->rsrcs.dir_pq_pairs[i].func_list;
179 dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
182 if (hw->ver == DLB2_HW_V2) {
183 hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
184 hw->pf.num_avail_dqed_entries =
185 DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
187 hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
190 hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
192 ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
193 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
197 ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
201 for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
202 ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
203 DLB2_MAX_NUM_HIST_LIST_ENTRIES);
207 ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
212 /* Initialize the hardware resource IDs */
213 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
214 hw->domains[i].id.phys_id = i;
215 hw->domains[i].id.vdev_owned = false;
218 for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
219 hw->rsrcs.ldb_queues[i].id.phys_id = i;
220 hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
223 for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
224 hw->rsrcs.ldb_ports[i].id.phys_id = i;
225 hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
228 for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
229 hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
230 hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
233 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
234 hw->rsrcs.sn_groups[i].id = i;
235 /* Default mode (0) is 64 sequence numbers per queue */
236 hw->rsrcs.sn_groups[i].mode = 0;
237 hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
238 hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
241 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
242 hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
247 dlb2_resource_free(hw);
253 * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
254 * @hw: dlb2_hw handle for a particular device.
255 * @ver: device version.
257 * Clearing the PMCSR must be done at initialization to make the device fully
260 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
264 pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
266 DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
268 DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
272 * dlb2_hw_get_num_resources() - query the PCI function's available resources
273 * @hw: dlb2_hw handle for a particular device.
274 * @arg: pointer to resource counts.
275 * @vdev_req: indicates whether this request came from a vdev.
276 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
278 * This function returns the number of available resources for the PF or for a
281 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
285 * Returns 0 upon success, -EINVAL if vdev_req is true and vdev_id is
288 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
289 struct dlb2_get_num_resources_args *arg,
291 unsigned int vdev_id)
293 struct dlb2_function_resources *rsrcs;
294 struct dlb2_bitmap *map;
297 if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
301 rsrcs = &hw->vdev[vdev_id];
305 arg->num_sched_domains = rsrcs->num_avail_domains;
307 arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
309 arg->num_ldb_ports = 0;
310 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
311 arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
313 arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
314 arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
315 arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
316 arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
318 arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
320 arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
322 map = rsrcs->avail_hist_list_entries;
324 arg->num_hist_list_entries = dlb2_bitmap_count(map);
326 arg->max_contiguous_hist_list_entries =
327 dlb2_bitmap_longest_set_range(map);
329 if (hw->ver == DLB2_HW_V2) {
330 arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
331 arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
333 arg->num_credits = rsrcs->num_avail_entries;
338 static void dlb2_configure_domain_credits_v2_5(struct dlb2_hw *hw,
339 struct dlb2_hw_domain *domain)
343 DLB2_BITS_SET(reg, domain->num_credits, DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
344 DLB2_CSR_WR(hw, DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id), reg);
347 static void dlb2_configure_domain_credits_v2(struct dlb2_hw *hw,
348 struct dlb2_hw_domain *domain)
352 DLB2_BITS_SET(reg, domain->num_ldb_credits,
353 DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
354 DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), reg);
357 DLB2_BITS_SET(reg, domain->num_dir_credits,
358 DLB2_CHP_CFG_DIR_VAS_CRD_COUNT);
359 DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), reg);
362 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
363 struct dlb2_hw_domain *domain)
365 if (hw->ver == DLB2_HW_V2)
366 dlb2_configure_domain_credits_v2(hw, domain);
368 dlb2_configure_domain_credits_v2_5(hw, domain);
371 static int dlb2_attach_credits(struct dlb2_function_resources *rsrcs,
372 struct dlb2_hw_domain *domain,
374 struct dlb2_cmd_response *resp)
376 if (rsrcs->num_avail_entries < num_credits) {
377 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
381 rsrcs->num_avail_entries -= num_credits;
382 domain->num_credits += num_credits;
386 static struct dlb2_ldb_port *
387 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
388 struct dlb2_function_resources *rsrcs,
392 struct dlb2_list_entry *iter;
393 struct dlb2_ldb_port *port;
397 * To reduce the odds of consecutive load-balanced ports mapping to the
398 * same queue(s), the driver attempts to allocate ports whose neighbors
399 * are owned by a different domain.
401 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
405 phys_id = port->id.phys_id;
409 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
412 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
414 if (!hw->rsrcs.ldb_ports[next].owned ||
415 hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
418 if (!hw->rsrcs.ldb_ports[prev].owned ||
419 hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
426 * Failing that, the driver looks for a port with one neighbor owned by
427 * a different domain and the other unallocated.
429 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
433 phys_id = port->id.phys_id;
437 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
440 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
442 if (!hw->rsrcs.ldb_ports[prev].owned &&
443 hw->rsrcs.ldb_ports[next].owned &&
444 hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
447 if (!hw->rsrcs.ldb_ports[next].owned &&
448 hw->rsrcs.ldb_ports[prev].owned &&
449 hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
454 * Failing that, the driver looks for a port with both neighbors
457 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
461 phys_id = port->id.phys_id;
465 if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
468 prev = DLB2_MAX_NUM_LDB_PORTS - 1;
470 if (!hw->rsrcs.ldb_ports[prev].owned &&
471 !hw->rsrcs.ldb_ports[next].owned)
475 /* If all else fails, the driver returns the next available port. */
476 return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
480 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
481 struct dlb2_function_resources *rsrcs,
482 struct dlb2_hw_domain *domain,
485 struct dlb2_cmd_response *resp)
489 if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
490 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
494 for (i = 0; i < num_ports; i++) {
495 struct dlb2_ldb_port *port;
497 port = dlb2_get_next_ldb_port(hw, rsrcs,
498 domain->id.phys_id, cos_id);
501 "[%s()] Internal error: domain validation failed\n",
506 dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
509 port->domain_id = domain->id;
512 dlb2_list_add(&domain->avail_ldb_ports[cos_id],
516 rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
522 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
523 struct dlb2_function_resources *rsrcs,
524 struct dlb2_hw_domain *domain,
525 struct dlb2_create_sched_domain_args *args,
526 struct dlb2_cmd_response *resp)
531 if (args->cos_strict) {
532 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
533 u32 num = args->num_cos_ldb_ports[i];
535 /* Allocate ports from specific classes-of-service */
536 ret = __dlb2_attach_ldb_ports(hw,
550 * Attempt to allocate from specific class-of-service, but
551 * fallback to the other classes if that fails.
553 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
554 for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
555 for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
556 cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
558 ret = __dlb2_attach_ldb_ports(hw,
574 /* Allocate num_ldb_ports from any class-of-service */
575 for (i = 0; i < args->num_ldb_ports; i++) {
576 for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
577 ret = __dlb2_attach_ldb_ports(hw,
594 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
595 struct dlb2_function_resources *rsrcs,
596 struct dlb2_hw_domain *domain,
598 struct dlb2_cmd_response *resp)
602 if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
603 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
607 for (i = 0; i < num_ports; i++) {
608 struct dlb2_dir_pq_pair *port;
610 port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
614 "[%s()] Internal error: domain validation failed\n",
619 dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
621 port->domain_id = domain->id;
624 dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
627 rsrcs->num_avail_dir_pq_pairs -= num_ports;
632 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
633 struct dlb2_hw_domain *domain,
635 struct dlb2_cmd_response *resp)
637 if (rsrcs->num_avail_qed_entries < num_credits) {
638 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
642 rsrcs->num_avail_qed_entries -= num_credits;
643 domain->num_ldb_credits += num_credits;
647 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
648 struct dlb2_hw_domain *domain,
650 struct dlb2_cmd_response *resp)
652 if (rsrcs->num_avail_dqed_entries < num_credits) {
653 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
657 rsrcs->num_avail_dqed_entries -= num_credits;
658 domain->num_dir_credits += num_credits;
663 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
664 struct dlb2_hw_domain *domain,
665 u32 num_atomic_inflights,
666 struct dlb2_cmd_response *resp)
668 if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
669 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
673 rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
674 domain->num_avail_aqed_entries += num_atomic_inflights;
679 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
680 struct dlb2_hw_domain *domain,
681 u32 num_hist_list_entries,
682 struct dlb2_cmd_response *resp)
684 struct dlb2_bitmap *bitmap;
687 if (num_hist_list_entries) {
688 bitmap = rsrcs->avail_hist_list_entries;
690 base = dlb2_bitmap_find_set_bit_range(bitmap,
691 num_hist_list_entries);
695 domain->total_hist_list_entries = num_hist_list_entries;
696 domain->avail_hist_list_entries = num_hist_list_entries;
697 domain->hist_list_entry_base = base;
698 domain->hist_list_entry_offset = 0;
700 dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
705 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
709 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
710 struct dlb2_function_resources *rsrcs,
711 struct dlb2_hw_domain *domain,
713 struct dlb2_cmd_response *resp)
717 if (rsrcs->num_avail_ldb_queues < num_queues) {
718 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
722 for (i = 0; i < num_queues; i++) {
723 struct dlb2_ldb_queue *queue;
725 queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
729 "[%s()] Internal error: domain validation failed\n",
734 dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
736 queue->domain_id = domain->id;
739 dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
742 rsrcs->num_avail_ldb_queues -= num_queues;
748 dlb2_domain_attach_resources(struct dlb2_hw *hw,
749 struct dlb2_function_resources *rsrcs,
750 struct dlb2_hw_domain *domain,
751 struct dlb2_create_sched_domain_args *args,
752 struct dlb2_cmd_response *resp)
756 ret = dlb2_attach_ldb_queues(hw,
759 args->num_ldb_queues,
764 ret = dlb2_attach_ldb_ports(hw,
772 ret = dlb2_attach_dir_ports(hw,
780 if (hw->ver == DLB2_HW_V2) {
781 ret = dlb2_attach_ldb_credits(rsrcs,
783 args->num_ldb_credits,
788 ret = dlb2_attach_dir_credits(rsrcs,
790 args->num_dir_credits,
794 } else { /* DLB 2.5 */
795 ret = dlb2_attach_credits(rsrcs,
803 ret = dlb2_attach_domain_hist_list_entries(rsrcs,
805 args->num_hist_list_entries,
810 ret = dlb2_attach_atomic_inflights(rsrcs,
812 args->num_atomic_inflights,
817 dlb2_configure_domain_credits(hw, domain);
819 domain->configured = true;
821 domain->started = false;
823 rsrcs->num_avail_domains--;
829 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
830 struct dlb2_create_sched_domain_args *args,
831 struct dlb2_cmd_response *resp,
833 struct dlb2_hw_domain **out_domain)
835 u32 num_avail_ldb_ports, req_ldb_ports;
836 struct dlb2_bitmap *avail_hl_entries;
837 unsigned int max_contig_hl_range;
838 struct dlb2_hw_domain *domain;
841 avail_hl_entries = rsrcs->avail_hist_list_entries;
843 max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
845 num_avail_ldb_ports = 0;
847 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
848 num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
850 req_ldb_ports += args->num_cos_ldb_ports[i];
853 req_ldb_ports += args->num_ldb_ports;
855 if (rsrcs->num_avail_domains < 1) {
856 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
860 domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
861 if (domain == NULL) {
862 resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
866 if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
867 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
871 if (req_ldb_ports > num_avail_ldb_ports) {
872 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
876 for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
877 if (args->num_cos_ldb_ports[i] >
878 rsrcs->num_avail_ldb_ports[i]) {
879 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
884 if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
885 resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
889 if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
890 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
893 if (hw->ver == DLB2_HW_V2_5) {
894 if (rsrcs->num_avail_entries < args->num_credits) {
895 resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
899 if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
900 resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
903 if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
904 resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
909 if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
910 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
914 if (max_contig_hl_range < args->num_hist_list_entries) {
915 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
919 *out_domain = domain;
925 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
926 struct dlb2_create_sched_domain_args *args,
928 unsigned int vdev_id)
930 DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
932 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
933 DLB2_HW_DBG(hw, "\tNumber of LDB queues: %d\n",
934 args->num_ldb_queues);
935 DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
936 args->num_ldb_ports);
937 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0): %d\n",
938 args->num_cos_ldb_ports[0]);
939 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1): %d\n",
940 args->num_cos_ldb_ports[1]);
941 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2): %d\n",
942 args->num_cos_ldb_ports[2]);
943 DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3): %d\n",
944 args->num_cos_ldb_ports[3]);
945 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %d\n",
947 DLB2_HW_DBG(hw, "\tNumber of DIR ports: %d\n",
948 args->num_dir_ports);
949 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
950 args->num_atomic_inflights);
951 DLB2_HW_DBG(hw, "\tNumber of hist list entries: %d\n",
952 args->num_hist_list_entries);
953 if (hw->ver == DLB2_HW_V2) {
954 DLB2_HW_DBG(hw, "\tNumber of LDB credits: %d\n",
955 args->num_ldb_credits);
956 DLB2_HW_DBG(hw, "\tNumber of DIR credits: %d\n",
957 args->num_dir_credits);
959 DLB2_HW_DBG(hw, "\tNumber of credits: %d\n",
965 * dlb2_hw_create_sched_domain() - create a scheduling domain
966 * @hw: dlb2_hw handle for a particular device.
967 * @args: scheduling domain creation arguments.
968 * @resp: response structure.
969 * @vdev_req: indicates whether this request came from a vdev.
970 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
972 * This function creates a scheduling domain containing the resources specified
973 * in args. The individual resources (queues, ports, credits) can be configured
974 * after creating a scheduling domain.
976 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
980 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
981 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
982 * contains the domain ID.
984 * resp->id contains a virtual ID if vdev_req is true.
987 * EINVAL - A requested resource is unavailable, or the requested domain name
989 * EFAULT - Internal error (resp->status not set).
991 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
992 struct dlb2_create_sched_domain_args *args,
993 struct dlb2_cmd_response *resp,
995 unsigned int vdev_id)
997 struct dlb2_function_resources *rsrcs;
998 struct dlb2_hw_domain *domain;
1001 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1003 dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
1006 * Verify that hardware resources are available before attempting to
1007 * satisfy the request. This simplifies the error unwinding code.
1009 ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp, hw, &domain);
1013 dlb2_init_domain_rsrc_lists(domain);
1015 ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
1018 "[%s()] Internal error: failed to verify args.\n",
1024 dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
1026 dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
1028 resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
1034 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
1035 struct dlb2_dir_pq_pair *port)
1039 DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
1040 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1045 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
1046 struct dlb2_dir_pq_pair *port)
1050 cnt = DLB2_CSR_RD(hw,
1051 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
1054 * Account for the initial token count, which is used in order to
1055 * provide a CQ with depth less than 8.
1058 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
1062 static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
1063 struct dlb2_dir_pq_pair *port)
1065 unsigned int port_id = port->id.phys_id;
1068 /* Return any outstanding tokens */
1069 cnt = dlb2_dir_cq_token_count(hw, port);
1072 struct dlb2_hcw hcw_mem[8], *hcw;
1073 void __iomem *pp_addr;
1075 pp_addr = os_map_producer_port(hw, port_id, false);
1077 /* Point hcw to a 64B-aligned location */
1078 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1081 * Program the first HCW for a batch token return and
1084 memset(hcw, 0, 4 * sizeof(*hcw));
1086 hcw->lock_id = cnt - 1;
1088 dlb2_movdir64b(pp_addr, hcw);
1090 os_fence_hcw(hw, pp_addr);
1092 os_unmap_producer_port(hw, pp_addr);
1096 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1097 struct dlb2_dir_pq_pair *port)
1101 DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1106 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1107 struct dlb2_hw_domain *domain,
1110 struct dlb2_list_entry *iter;
1111 struct dlb2_dir_pq_pair *port;
1114 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1116 * Can't drain a port if it's not configured, and there's
1117 * nothing to drain if its queue is unconfigured.
1119 if (!port->port_configured || !port->queue_configured)
1123 dlb2_dir_port_cq_disable(hw, port);
1125 dlb2_drain_dir_cq(hw, port);
1128 dlb2_dir_port_cq_enable(hw, port);
1134 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1135 struct dlb2_dir_pq_pair *queue)
1139 cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
1140 queue->id.phys_id));
1142 return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
1145 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1146 struct dlb2_dir_pq_pair *queue)
1148 return dlb2_dir_queue_depth(hw, queue) == 0;
1151 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1152 struct dlb2_hw_domain *domain)
1154 struct dlb2_list_entry *iter;
1155 struct dlb2_dir_pq_pair *queue;
1158 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1159 if (!dlb2_dir_queue_is_empty(hw, queue))
1165 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1166 struct dlb2_hw_domain *domain)
1170 /* If the domain hasn't been started, there's no traffic to drain */
1171 if (!domain->started)
1174 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1175 dlb2_domain_drain_dir_cqs(hw, domain, true);
1177 if (dlb2_domain_dir_queues_empty(hw, domain))
1181 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1183 "[%s()] Internal error: failed to empty queues\n",
1189 * Drain the CQs one more time. For the queues to go empty, they would
1190 * have scheduled one or more QEs.
1192 dlb2_domain_drain_dir_cqs(hw, domain, true);
1197 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1198 struct dlb2_ldb_port *port)
1203 * Don't re-enable the port if a removal is pending. The caller should
1204 * mark this port as enabled (if it isn't already), and when the
1205 * removal completes the port will be enabled.
1207 if (port->num_pending_removals)
1210 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1215 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1216 struct dlb2_ldb_port *port)
1220 DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
1221 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1226 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1227 struct dlb2_ldb_port *port)
1231 cnt = DLB2_CSR_RD(hw,
1232 DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
1234 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
1237 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1238 struct dlb2_ldb_port *port)
1242 cnt = DLB2_CSR_RD(hw,
1243 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
1246 * Account for the initial token count, which is used in order to
1247 * provide a CQ with depth less than 8.
1250 return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
1254 static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1256 u32 infl_cnt, tkn_cnt;
1259 infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1260 tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1262 if (infl_cnt || tkn_cnt) {
1263 struct dlb2_hcw hcw_mem[8], *hcw;
1264 void __iomem *pp_addr;
1266 pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1268 /* Point hcw to a 64B-aligned location */
1269 hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1272 * Program the first HCW for a completion and token return and
1273 * the other HCWs as NOOPS
1276 memset(hcw, 0, 4 * sizeof(*hcw));
1277 hcw->qe_comp = (infl_cnt > 0);
1278 hcw->cq_token = (tkn_cnt > 0);
1279 hcw->lock_id = tkn_cnt - 1;
1281 /* Return tokens in the first HCW */
1282 dlb2_movdir64b(pp_addr, hcw);
1286 /* Issue remaining completions (if any) */
1287 for (i = 1; i < infl_cnt; i++)
1288 dlb2_movdir64b(pp_addr, hcw);
1290 os_fence_hcw(hw, pp_addr);
1292 os_unmap_producer_port(hw, pp_addr);
1296 static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1297 struct dlb2_hw_domain *domain,
1300 struct dlb2_list_entry *iter;
1301 struct dlb2_ldb_port *port;
1305 /* If the domain hasn't been started, there's no traffic to drain */
1306 if (!domain->started)
1309 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1310 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1312 dlb2_ldb_port_cq_disable(hw, port);
1314 dlb2_drain_ldb_cq(hw, port);
1317 dlb2_ldb_port_cq_enable(hw, port);
1322 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1323 struct dlb2_ldb_queue *queue)
1327 aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1328 queue->id.phys_id));
1329 ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1330 queue->id.phys_id));
1331 atm = DLB2_CSR_RD(hw,
1332 DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
1334 return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
1335 + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
1336 + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
1339 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1340 struct dlb2_ldb_queue *queue)
1342 return dlb2_ldb_queue_depth(hw, queue) == 0;
1345 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1346 struct dlb2_hw_domain *domain)
1348 struct dlb2_list_entry *iter;
1349 struct dlb2_ldb_queue *queue;
1352 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1353 if (queue->num_mappings == 0)
1356 if (!dlb2_ldb_queue_is_empty(hw, queue))
1363 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1364 struct dlb2_hw_domain *domain)
1368 /* If the domain hasn't been started, there's no traffic to drain */
1369 if (!domain->started)
1372 if (domain->num_pending_removals > 0) {
1374 "[%s()] Internal error: failed to unmap domain queues\n",
1379 for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1380 dlb2_domain_drain_ldb_cqs(hw, domain, true);
1382 if (dlb2_domain_mapped_queues_empty(hw, domain))
1386 if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1388 "[%s()] Internal error: failed to empty queues\n",
1394 * Drain the CQs one more time. For the queues to go empty, they would
1395 * have scheduled one or more QEs.
1397 dlb2_domain_drain_ldb_cqs(hw, domain, true);
1402 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1403 struct dlb2_hw_domain *domain)
1405 struct dlb2_list_entry *iter;
1406 struct dlb2_ldb_port *port;
1410 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1411 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1412 port->enabled = true;
1414 dlb2_ldb_port_cq_enable(hw, port);
1419 static struct dlb2_ldb_queue *
1420 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1423 unsigned int vdev_id)
1425 struct dlb2_list_entry *iter1;
1426 struct dlb2_list_entry *iter2;
1427 struct dlb2_function_resources *rsrcs;
1428 struct dlb2_hw_domain *domain;
1429 struct dlb2_ldb_queue *queue;
1430 RTE_SET_USED(iter1);
1431 RTE_SET_USED(iter2);
1433 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1436 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1439 return &hw->rsrcs.ldb_queues[id];
1441 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1442 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
1443 if (queue->id.virt_id == id)
1448 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
1449 if (queue->id.virt_id == id)
1456 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1459 unsigned int vdev_id)
1461 struct dlb2_list_entry *iteration;
1462 struct dlb2_function_resources *rsrcs;
1463 struct dlb2_hw_domain *domain;
1464 RTE_SET_USED(iteration);
1466 if (id >= DLB2_MAX_NUM_DOMAINS)
1470 return &hw->domains[id];
1472 rsrcs = &hw->vdev[vdev_id];
1474 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
1475 if (domain->id.virt_id == id)
1482 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1483 struct dlb2_ldb_port *port,
1484 struct dlb2_ldb_queue *queue,
1486 enum dlb2_qid_map_state new_state)
1488 enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1489 struct dlb2_hw_domain *domain;
1492 domain_id = port->domain_id.phys_id;
1494 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1495 if (domain == NULL) {
1497 "[%s()] Internal error: unable to find domain %d\n",
1498 __func__, domain_id);
1502 switch (curr_state) {
1503 case DLB2_QUEUE_UNMAPPED:
1504 switch (new_state) {
1505 case DLB2_QUEUE_MAPPED:
1506 queue->num_mappings++;
1507 port->num_mappings++;
1509 case DLB2_QUEUE_MAP_IN_PROG:
1510 queue->num_pending_additions++;
1511 domain->num_pending_additions++;
1517 case DLB2_QUEUE_MAPPED:
1518 switch (new_state) {
1519 case DLB2_QUEUE_UNMAPPED:
1520 queue->num_mappings--;
1521 port->num_mappings--;
1523 case DLB2_QUEUE_UNMAP_IN_PROG:
1524 port->num_pending_removals++;
1525 domain->num_pending_removals++;
1527 case DLB2_QUEUE_MAPPED:
1528 /* Priority change, nothing to update */
1534 case DLB2_QUEUE_MAP_IN_PROG:
1535 switch (new_state) {
1536 case DLB2_QUEUE_UNMAPPED:
1537 queue->num_pending_additions--;
1538 domain->num_pending_additions--;
1540 case DLB2_QUEUE_MAPPED:
1541 queue->num_mappings++;
1542 port->num_mappings++;
1543 queue->num_pending_additions--;
1544 domain->num_pending_additions--;
1550 case DLB2_QUEUE_UNMAP_IN_PROG:
1551 switch (new_state) {
1552 case DLB2_QUEUE_UNMAPPED:
1553 port->num_pending_removals--;
1554 domain->num_pending_removals--;
1555 queue->num_mappings--;
1556 port->num_mappings--;
1558 case DLB2_QUEUE_MAPPED:
1559 port->num_pending_removals--;
1560 domain->num_pending_removals--;
1562 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1563 /* Nothing to update */
1569 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1570 switch (new_state) {
1571 case DLB2_QUEUE_UNMAP_IN_PROG:
1572 /* Nothing to update */
1574 case DLB2_QUEUE_UNMAPPED:
1576 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1577 * becomes UNMAPPED before it transitions to
1580 queue->num_mappings--;
1581 port->num_mappings--;
1582 port->num_pending_removals--;
1583 domain->num_pending_removals--;
1593 port->qid_map[slot].state = new_state;
1596 "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1597 __func__, queue->id.phys_id, port->id.phys_id,
1598 curr_state, new_state);
1603 "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1604 __func__, queue->id.phys_id, port->id.phys_id,
1605 curr_state, new_state);
1609 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1610 enum dlb2_qid_map_state state,
1615 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1616 if (port->qid_map[i].state == state)
1622 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1625 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1626 enum dlb2_qid_map_state state,
1627 struct dlb2_ldb_queue *queue,
1632 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1633 if (port->qid_map[i].state == state &&
1634 port->qid_map[i].qid == queue->id.phys_id)
1640 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1644 * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1645 * their function names imply, and should only be called by the dynamic CQ
1648 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1649 struct dlb2_hw_domain *domain,
1650 struct dlb2_ldb_queue *queue)
1652 struct dlb2_list_entry *iter;
1653 struct dlb2_ldb_port *port;
1657 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1658 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1659 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1661 if (!dlb2_port_find_slot_queue(port, state,
1666 dlb2_ldb_port_cq_disable(hw, port);
1671 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1672 struct dlb2_hw_domain *domain,
1673 struct dlb2_ldb_queue *queue)
1675 struct dlb2_list_entry *iter;
1676 struct dlb2_ldb_port *port;
1680 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1681 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1682 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1684 if (!dlb2_port_find_slot_queue(port, state,
1689 dlb2_ldb_port_cq_enable(hw, port);
1694 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1695 struct dlb2_ldb_port *port,
1700 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1701 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1702 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1704 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1709 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1710 struct dlb2_ldb_port *port,
1715 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1716 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1717 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1718 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1720 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1725 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1726 struct dlb2_ldb_port *p,
1727 struct dlb2_ldb_queue *q,
1730 enum dlb2_qid_map_state state;
1738 /* Look for a pending or already mapped slot, else an unused slot */
1739 if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1740 !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1741 !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1743 "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1744 __func__, __LINE__);
1748 /* Read-modify-write the priority and valid bit register */
1749 cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
1751 cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
1752 cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
1753 & DLB2_LSP_CQ2PRIOV_PRIO;
1755 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
1757 /* Read-modify-write the QID map register */
1759 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
1762 cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
1765 if (i == 0 || i == 4)
1766 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
1767 if (i == 1 || i == 5)
1768 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
1769 if (i == 2 || i == 6)
1770 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
1771 if (i == 3 || i == 7)
1772 DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
1776 DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
1779 DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
1781 atm_qid2cq = DLB2_CSR_RD(hw,
1782 DLB2_ATM_QID2CQIDIX(q->id.phys_id,
1783 p->id.phys_id / 4));
1785 lsp_qid2cq = DLB2_CSR_RD(hw,
1786 DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
1787 p->id.phys_id / 4));
1789 lsp_qid2cq2 = DLB2_CSR_RD(hw,
1790 DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
1791 p->id.phys_id / 4));
1793 switch (p->id.phys_id % 4) {
1795 DLB2_BIT_SET(atm_qid2cq,
1796 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
1797 DLB2_BIT_SET(lsp_qid2cq,
1798 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
1799 DLB2_BIT_SET(lsp_qid2cq2,
1800 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
1804 DLB2_BIT_SET(atm_qid2cq,
1805 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
1806 DLB2_BIT_SET(lsp_qid2cq,
1807 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
1808 DLB2_BIT_SET(lsp_qid2cq2,
1809 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
1813 DLB2_BIT_SET(atm_qid2cq,
1814 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
1815 DLB2_BIT_SET(lsp_qid2cq,
1816 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
1817 DLB2_BIT_SET(lsp_qid2cq2,
1818 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
1822 DLB2_BIT_SET(atm_qid2cq,
1823 1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
1824 DLB2_BIT_SET(lsp_qid2cq,
1825 1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
1826 DLB2_BIT_SET(lsp_qid2cq2,
1827 1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
1832 DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
1836 DLB2_LSP_QID2CQIDIX(hw->ver,
1837 q->id.phys_id, p->id.phys_id / 4),
1841 DLB2_LSP_QID2CQIDIX2(hw->ver,
1842 q->id.phys_id, p->id.phys_id / 4),
1847 p->qid_map[i].qid = q->id.phys_id;
1848 p->qid_map[i].priority = priority;
1850 state = DLB2_QUEUE_MAPPED;
1852 return dlb2_port_slot_state_transition(hw, p, q, i, state);
1855 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
1856 struct dlb2_ldb_port *port,
1857 struct dlb2_ldb_queue *queue,
1864 /* Set the atomic scheduling haswork bit */
1865 active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1866 queue->id.phys_id));
1868 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1869 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1870 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1872 DLB2_BITS_GET(active,
1873 DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
1874 DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1876 /* Set the non-atomic scheduling haswork bit */
1877 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1879 enq = DLB2_CSR_RD(hw,
1880 DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1881 queue->id.phys_id));
1883 memset(&ctrl, 0, sizeof(ctrl));
1885 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1886 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1887 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1890 DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
1891 DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1893 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1900 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
1901 struct dlb2_ldb_port *port,
1906 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1907 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1908 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
1910 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1912 memset(&ctrl, 0, sizeof(ctrl));
1914 DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1915 DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1916 DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
1918 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1924 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
1925 struct dlb2_ldb_queue *queue)
1929 DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
1930 DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
1932 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1936 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
1937 struct dlb2_ldb_queue *queue)
1940 DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
1941 DLB2_LSP_QID_LDB_INFL_LIM_RST);
1944 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
1945 struct dlb2_hw_domain *domain,
1946 struct dlb2_ldb_port *port,
1947 struct dlb2_ldb_queue *queue)
1949 struct dlb2_list_entry *iter;
1950 enum dlb2_qid_map_state state;
1956 infl_cnt = DLB2_CSR_RD(hw,
1957 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
1958 queue->id.phys_id));
1960 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
1962 "[%s()] Internal error: non-zero QID inflight count\n",
1968 * Static map the port and set its corresponding has_work bits.
1970 state = DLB2_QUEUE_MAP_IN_PROG;
1971 if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
1974 prio = port->qid_map[slot].priority;
1977 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
1978 * the port's qid_map state.
1980 ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1984 ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
1989 * Ensure IF_status(cq,qid) is 0 before enabling the port to
1990 * prevent spurious schedules to cause the queue's inflight
1991 * count to increase.
1993 dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
1995 /* Reset the queue's inflight status */
1996 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1997 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1998 state = DLB2_QUEUE_MAPPED;
1999 if (!dlb2_port_find_slot_queue(port, state,
2003 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2007 dlb2_ldb_queue_set_inflight_limit(hw, queue);
2009 /* Re-enable CQs mapped to this queue */
2010 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2012 /* If this queue has other mappings pending, clear its inflight limit */
2013 if (queue->num_pending_additions > 0)
2014 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
2020 * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
2021 * @hw: dlb2_hw handle for a particular device.
2022 * @port: load-balanced port
2023 * @queue: load-balanced queue
2024 * @priority: queue servicing priority
2026 * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
2027 * at a later point, and <0 if an error occurred.
2029 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
2030 struct dlb2_ldb_port *port,
2031 struct dlb2_ldb_queue *queue,
2034 enum dlb2_qid_map_state state;
2035 struct dlb2_hw_domain *domain;
2036 int domain_id, slot, ret;
2039 domain_id = port->domain_id.phys_id;
2041 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
2042 if (domain == NULL) {
2044 "[%s()] Internal error: unable to find domain %d\n",
2045 __func__, port->domain_id.phys_id);
2050 * Set the QID inflight limit to 0 to prevent further scheduling of the
2053 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
2054 queue->id.phys_id), 0);
2056 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
2058 "Internal error: No available unmapped slots\n");
2062 port->qid_map[slot].qid = queue->id.phys_id;
2063 port->qid_map[slot].priority = priority;
2065 state = DLB2_QUEUE_MAP_IN_PROG;
2066 ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
2070 infl_cnt = DLB2_CSR_RD(hw,
2071 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2072 queue->id.phys_id));
2074 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2076 * The queue is owed completions so it's not safe to map it
2077 * yet. Schedule a kernel thread to complete the mapping later,
2078 * once software has completed all the queue's inflight events.
2080 if (!os_worker_active(hw))
2081 os_schedule_work(hw);
2087 * Disable the affected CQ, and the CQs already mapped to the QID,
2088 * before reading the QID's inflight count a second time. There is an
2089 * unlikely race in which the QID may schedule one more QE after we
2090 * read an inflight count of 0, and disabling the CQs guarantees that
2091 * the race will not occur after a re-read of the inflight count
2095 dlb2_ldb_port_cq_disable(hw, port);
2097 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2099 infl_cnt = DLB2_CSR_RD(hw,
2100 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2101 queue->id.phys_id));
2103 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2105 dlb2_ldb_port_cq_enable(hw, port);
2107 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2110 * The queue is owed completions so it's not safe to map it
2111 * yet. Schedule a kernel thread to complete the mapping later,
2112 * once software has completed all the queue's inflight events.
2114 if (!os_worker_active(hw))
2115 os_schedule_work(hw);
2120 return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2123 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2124 struct dlb2_hw_domain *domain,
2125 struct dlb2_ldb_port *port)
2129 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2131 struct dlb2_ldb_queue *queue;
2134 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2137 qid = port->qid_map[i].qid;
2139 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2141 if (queue == NULL) {
2143 "[%s()] Internal error: unable to find queue %d\n",
2148 infl_cnt = DLB2_CSR_RD(hw,
2149 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2151 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
2155 * Disable the affected CQ, and the CQs already mapped to the
2156 * QID, before reading the QID's inflight count a second time.
2157 * There is an unlikely race in which the QID may schedule one
2158 * more QE after we read an inflight count of 0, and disabling
2159 * the CQs guarantees that the race will not occur after a
2160 * re-read of the inflight count register.
2163 dlb2_ldb_port_cq_disable(hw, port);
2165 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2167 infl_cnt = DLB2_CSR_RD(hw,
2168 DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2170 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2172 dlb2_ldb_port_cq_enable(hw, port);
2174 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2179 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2184 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2185 struct dlb2_hw_domain *domain)
2187 struct dlb2_list_entry *iter;
2188 struct dlb2_ldb_port *port;
2192 if (!domain->configured || domain->num_pending_additions == 0)
2195 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2196 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2197 dlb2_domain_finish_map_port(hw, domain, port);
2200 return domain->num_pending_additions;
2203 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2204 struct dlb2_ldb_port *port,
2205 struct dlb2_ldb_queue *queue)
2207 enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2216 /* Find the queue's slot */
2217 mapped = DLB2_QUEUE_MAPPED;
2218 in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2219 pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2221 if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2222 !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2223 !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2225 "[%s():%d] Internal error: QID %d isn't mapped\n",
2226 __func__, __LINE__, queue->id.phys_id);
2230 port_id = port->id.phys_id;
2231 queue_id = queue->id.phys_id;
2233 /* Read-modify-write the priority and valid bit register */
2234 cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
2236 cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
2238 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
2240 atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
2243 lsp_qid2cq = DLB2_CSR_RD(hw,
2244 DLB2_LSP_QID2CQIDIX(hw->ver,
2245 queue_id, port_id / 4));
2247 lsp_qid2cq2 = DLB2_CSR_RD(hw,
2248 DLB2_LSP_QID2CQIDIX2(hw->ver,
2249 queue_id, port_id / 4));
2251 switch (port_id % 4) {
2253 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2254 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2255 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2259 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2260 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2261 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2265 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2266 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2267 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2271 atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2272 lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2273 lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2277 DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
2279 DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
2282 DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
2287 unmapped = DLB2_QUEUE_UNMAPPED;
2289 return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2292 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2293 struct dlb2_hw_domain *domain,
2294 struct dlb2_ldb_port *port,
2295 struct dlb2_ldb_queue *queue,
2298 if (domain->started)
2299 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2301 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2305 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2306 struct dlb2_hw_domain *domain,
2307 struct dlb2_ldb_port *port,
2310 enum dlb2_qid_map_state state;
2311 struct dlb2_ldb_queue *queue;
2313 queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2315 state = port->qid_map[slot].state;
2317 /* Update the QID2CQIDX and CQ2QID vectors */
2318 dlb2_ldb_port_unmap_qid(hw, port, queue);
2321 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2324 dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2326 /* Reset the {CQ, slot} to its default state */
2327 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2329 /* Re-enable the CQ if it was not manually disabled by the user */
2331 dlb2_ldb_port_cq_enable(hw, port);
2334 * If there is a mapping that is pending this slot's removal, perform
2337 if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2338 struct dlb2_ldb_port_qid_map *map;
2339 struct dlb2_ldb_queue *map_queue;
2342 map = &port->qid_map[slot];
2344 map->qid = map->pending_qid;
2345 map->priority = map->pending_priority;
2347 map_queue = &hw->rsrcs.ldb_queues[map->qid];
2348 prio = map->priority;
2350 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2355 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2356 struct dlb2_hw_domain *domain,
2357 struct dlb2_ldb_port *port)
2362 if (port->num_pending_removals == 0)
2366 * The unmap requires all the CQ's outstanding inflights to be
2369 infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
2371 if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
2374 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2375 struct dlb2_ldb_port_qid_map *map;
2377 map = &port->qid_map[i];
2379 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2380 map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2383 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2390 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2391 struct dlb2_hw_domain *domain)
2393 struct dlb2_list_entry *iter;
2394 struct dlb2_ldb_port *port;
2398 if (!domain->configured || domain->num_pending_removals == 0)
2401 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2402 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2403 dlb2_domain_finish_unmap_port(hw, domain, port);
2406 return domain->num_pending_removals;
2409 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2410 struct dlb2_hw_domain *domain)
2412 struct dlb2_list_entry *iter;
2413 struct dlb2_ldb_port *port;
2417 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2418 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2419 port->enabled = false;
2421 dlb2_ldb_port_cq_disable(hw, port);
2427 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2430 unsigned int vdev_id)
2432 DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2434 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2435 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2438 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2439 struct dlb2_hw_domain *domain,
2440 unsigned int vdev_id)
2442 struct dlb2_list_entry *iter;
2443 struct dlb2_dir_pq_pair *port;
2447 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2451 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2452 virt_id = port->id.virt_id;
2454 virt_id = port->id.phys_id;
2456 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2458 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
2462 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2463 struct dlb2_hw_domain *domain,
2464 unsigned int vdev_id)
2466 struct dlb2_list_entry *iter;
2467 struct dlb2_ldb_port *port;
2472 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2473 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2477 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2478 virt_id = port->id.virt_id;
2480 virt_id = port->id.phys_id;
2482 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2484 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
2490 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2491 struct dlb2_hw_domain *domain)
2493 struct dlb2_list_entry *iter;
2494 struct dlb2_ldb_port *port;
2500 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2501 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2503 DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
2508 DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
2516 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2517 struct dlb2_hw_domain *domain)
2519 struct dlb2_list_entry *iter;
2520 struct dlb2_dir_pq_pair *port;
2525 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2527 DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2531 DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
2537 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2538 struct dlb2_hw_domain *domain)
2540 int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2541 struct dlb2_list_entry *iter;
2542 struct dlb2_ldb_queue *queue;
2545 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2546 int idx = domain_offset + queue->id.phys_id;
2548 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
2550 if (queue->id.vdev_owned) {
2552 DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2555 idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2558 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
2560 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
2566 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2567 struct dlb2_hw_domain *domain)
2569 struct dlb2_list_entry *iter;
2570 struct dlb2_dir_pq_pair *queue;
2571 unsigned long max_ports;
2575 max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
2577 domain_offset = domain->id.phys_id * max_ports;
2579 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2580 int idx = domain_offset + queue->id.phys_id;
2582 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
2584 if (queue->id.vdev_owned) {
2585 idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
2587 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
2589 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
2594 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2595 struct dlb2_hw_domain *domain)
2597 struct dlb2_list_entry *iter;
2598 struct dlb2_ldb_port *port;
2603 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2604 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2606 DLB2_CHP_SN_CHK_ENBL(hw->ver,
2613 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2614 struct dlb2_hw_domain *domain)
2616 struct dlb2_list_entry *iter;
2617 struct dlb2_ldb_port *port;
2621 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2622 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2625 for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
2626 if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2630 if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2632 "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2633 __func__, port->id.phys_id);
2642 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2643 struct dlb2_hw_domain *domain)
2645 struct dlb2_list_entry *iter;
2646 struct dlb2_dir_pq_pair *port;
2649 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2650 port->enabled = false;
2652 dlb2_dir_port_cq_disable(hw, port);
2657 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2658 struct dlb2_hw_domain *domain)
2660 struct dlb2_list_entry *iter;
2661 struct dlb2_dir_pq_pair *port;
2665 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2667 DLB2_SYS_DIR_PP_V(port->id.phys_id),
2673 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2674 struct dlb2_hw_domain *domain)
2676 struct dlb2_list_entry *iter;
2677 struct dlb2_ldb_port *port;
2682 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2683 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2685 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2691 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2692 struct dlb2_hw_domain *domain)
2694 struct dlb2_list_entry *iter;
2695 struct dlb2_dir_pq_pair *dir_port;
2696 struct dlb2_ldb_port *ldb_port;
2697 struct dlb2_ldb_queue *queue;
2702 * Confirm that all the domain's queue's inflight counts and AQED
2703 * active counts are 0.
2705 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2706 if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2708 "[%s()] Internal error: failed to empty ldb queue %d\n",
2709 __func__, queue->id.phys_id);
2714 /* Confirm that all the domain's CQs inflight and token counts are 0. */
2715 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2716 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2717 if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2718 dlb2_ldb_cq_token_count(hw, ldb_port)) {
2720 "[%s()] Internal error: failed to empty ldb port %d\n",
2721 __func__, ldb_port->id.phys_id);
2727 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2728 if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2730 "[%s()] Internal error: failed to empty dir queue %d\n",
2731 __func__, dir_port->id.phys_id);
2735 if (dlb2_dir_cq_token_count(hw, dir_port)) {
2737 "[%s()] Internal error: failed to empty dir port %d\n",
2738 __func__, dir_port->id.phys_id);
2746 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2747 struct dlb2_ldb_port *port)
2750 DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
2751 DLB2_SYS_LDB_PP2VAS_RST);
2754 DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
2755 DLB2_CHP_LDB_CQ2VAS_RST);
2758 DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
2759 DLB2_SYS_LDB_PP2VDEV_RST);
2761 if (port->id.vdev_owned) {
2766 * DLB uses producer port address bits 17:12 to determine the
2767 * producer port ID. In Scalable IOV mode, PP accesses come
2768 * through the PF MMIO window for the physical producer port,
2769 * so for translation purposes the virtual and physical port
2772 if (hw->virt_mode == DLB2_VIRT_SRIOV)
2773 virt_id = port->id.virt_id;
2775 virt_id = port->id.phys_id;
2777 offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2780 DLB2_SYS_VF_LDB_VPP2PP(offs),
2781 DLB2_SYS_VF_LDB_VPP2PP_RST);
2784 DLB2_SYS_VF_LDB_VPP_V(offs),
2785 DLB2_SYS_VF_LDB_VPP_V_RST);
2789 DLB2_SYS_LDB_PP_V(port->id.phys_id),
2790 DLB2_SYS_LDB_PP_V_RST);
2793 DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
2794 DLB2_LSP_CQ_LDB_DSBL_RST);
2797 DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
2798 DLB2_CHP_LDB_CQ_DEPTH_RST);
2800 if (hw->ver != DLB2_HW_V2)
2802 DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
2803 DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
2806 DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
2807 DLB2_LSP_CQ_LDB_INFL_LIM_RST);
2810 DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
2811 DLB2_CHP_HIST_LIST_LIM_RST);
2814 DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
2815 DLB2_CHP_HIST_LIST_BASE_RST);
2818 DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
2819 DLB2_CHP_HIST_LIST_POP_PTR_RST);
2822 DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
2823 DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
2826 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2827 DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
2830 DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2831 DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
2834 DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
2835 DLB2_CHP_LDB_CQ_INT_ENB_RST);
2838 DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
2839 DLB2_SYS_LDB_CQ_ISR_RST);
2842 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2843 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
2846 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2847 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
2850 DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
2851 DLB2_CHP_LDB_CQ_WPTR_RST);
2854 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
2855 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
2858 DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
2859 DLB2_SYS_LDB_CQ_ADDR_L_RST);
2862 DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
2863 DLB2_SYS_LDB_CQ_ADDR_U_RST);
2865 if (hw->ver == DLB2_HW_V2)
2867 DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
2868 DLB2_SYS_LDB_CQ_AT_RST);
2871 DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
2872 DLB2_SYS_LDB_CQ_PASID_RST);
2875 DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
2876 DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
2879 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
2880 DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
2883 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
2884 DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
2887 DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
2888 DLB2_LSP_CQ2QID0_RST);
2891 DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
2892 DLB2_LSP_CQ2QID1_RST);
2895 DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
2896 DLB2_LSP_CQ2PRIOV_RST);
2899 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
2900 struct dlb2_hw_domain *domain)
2902 struct dlb2_list_entry *iter;
2903 struct dlb2_ldb_port *port;
2907 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2908 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2909 __dlb2_domain_reset_ldb_port_registers(hw, port);
2914 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
2915 struct dlb2_dir_pq_pair *port)
2920 DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
2921 DLB2_CHP_DIR_CQ2VAS_RST);
2924 DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
2925 DLB2_LSP_CQ_DIR_DSBL_RST);
2927 DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
2929 if (hw->ver == DLB2_HW_V2)
2930 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
2933 DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
2936 DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
2937 DLB2_CHP_DIR_CQ_DEPTH_RST);
2940 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
2941 DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
2944 DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
2945 DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
2948 DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2949 DLB2_CHP_DIR_CQ_INT_ENB_RST);
2952 DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
2953 DLB2_SYS_DIR_CQ_ISR_RST);
2956 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
2958 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
2961 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
2962 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
2965 DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
2966 DLB2_CHP_DIR_CQ_WPTR_RST);
2969 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
2970 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
2973 DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
2974 DLB2_SYS_DIR_CQ_ADDR_L_RST);
2977 DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
2978 DLB2_SYS_DIR_CQ_ADDR_U_RST);
2981 DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2982 DLB2_SYS_DIR_CQ_AT_RST);
2984 if (hw->ver == DLB2_HW_V2)
2986 DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
2987 DLB2_SYS_DIR_CQ_AT_RST);
2990 DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
2991 DLB2_SYS_DIR_CQ_PASID_RST);
2994 DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
2995 DLB2_SYS_DIR_CQ_FMT_RST);
2998 DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
2999 DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
3002 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3003 DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
3006 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3007 DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
3010 DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
3011 DLB2_SYS_DIR_PP2VAS_RST);
3014 DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3015 DLB2_CHP_DIR_CQ2VAS_RST);
3018 DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
3019 DLB2_SYS_DIR_PP2VDEV_RST);
3021 if (port->id.vdev_owned) {
3026 * DLB uses producer port address bits 17:12 to determine the
3027 * producer port ID. In Scalable IOV mode, PP accesses come
3028 * through the PF MMIO window for the physical producer port,
3029 * so for translation purposes the virtual and physical port
3032 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3033 virt_id = port->id.virt_id;
3035 virt_id = port->id.phys_id;
3037 offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3041 DLB2_SYS_VF_DIR_VPP2PP(offs),
3042 DLB2_SYS_VF_DIR_VPP2PP_RST);
3045 DLB2_SYS_VF_DIR_VPP_V(offs),
3046 DLB2_SYS_VF_DIR_VPP_V_RST);
3050 DLB2_SYS_DIR_PP_V(port->id.phys_id),
3051 DLB2_SYS_DIR_PP_V_RST);
3054 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3055 struct dlb2_hw_domain *domain)
3057 struct dlb2_list_entry *iter;
3058 struct dlb2_dir_pq_pair *port;
3061 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3062 __dlb2_domain_reset_dir_port_registers(hw, port);
3065 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
3066 struct dlb2_hw_domain *domain)
3068 struct dlb2_list_entry *iter;
3069 struct dlb2_ldb_queue *queue;
3072 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3073 unsigned int queue_id = queue->id.phys_id;
3077 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
3078 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3081 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
3082 DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3085 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
3086 DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3089 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
3090 DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3093 DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
3094 DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3097 DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
3098 DLB2_LSP_QID_LDB_INFL_LIM_RST);
3101 DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
3102 DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3105 DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
3106 DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3109 DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
3110 DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3113 DLB2_SYS_LDB_QID_ITS(queue_id),
3114 DLB2_SYS_LDB_QID_ITS_RST);
3117 DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
3118 DLB2_CHP_ORD_QID_SN_RST);
3121 DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
3122 DLB2_CHP_ORD_QID_SN_MAP_RST);
3125 DLB2_SYS_LDB_QID_V(queue_id),
3126 DLB2_SYS_LDB_QID_V_RST);
3129 DLB2_SYS_LDB_QID_CFG_V(queue_id),
3130 DLB2_SYS_LDB_QID_CFG_V_RST);
3132 if (queue->sn_cfg_valid) {
3135 offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
3137 offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
3141 offs[queue->sn_group],
3142 DLB2_RO_GRP_0_SLT_SHFT_RST);
3145 for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3147 DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
3148 DLB2_LSP_QID2CQIDIX_00_RST);
3151 DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
3152 DLB2_LSP_QID2CQIDIX2_00_RST);
3155 DLB2_ATM_QID2CQIDIX(queue_id, i),
3156 DLB2_ATM_QID2CQIDIX_00_RST);
3161 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3162 struct dlb2_hw_domain *domain)
3164 struct dlb2_list_entry *iter;
3165 struct dlb2_dir_pq_pair *queue;
3168 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3170 DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
3172 DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3175 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
3177 DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3180 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
3182 DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3185 DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
3187 DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3190 DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3191 DLB2_SYS_DIR_QID_ITS_RST);
3194 DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3195 DLB2_SYS_DIR_QID_V_RST);
3203 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3204 struct dlb2_hw_domain *domain)
3206 dlb2_domain_reset_ldb_port_registers(hw, domain);
3208 dlb2_domain_reset_dir_port_registers(hw, domain);
3210 dlb2_domain_reset_ldb_queue_registers(hw, domain);
3212 dlb2_domain_reset_dir_queue_registers(hw, domain);
3214 if (hw->ver == DLB2_HW_V2) {
3216 DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3217 DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3220 DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3221 DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3224 DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
3225 DLB2_CHP_CFG_VAS_CRD_RST);
3228 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3229 struct dlb2_hw_domain *domain)
3231 struct dlb2_dir_pq_pair *tmp_dir_port;
3232 struct dlb2_ldb_queue *tmp_ldb_queue;
3233 struct dlb2_ldb_port *tmp_ldb_port;
3234 struct dlb2_list_entry *iter1;
3235 struct dlb2_list_entry *iter2;
3236 struct dlb2_function_resources *rsrcs;
3237 struct dlb2_dir_pq_pair *dir_port;
3238 struct dlb2_ldb_queue *ldb_queue;
3239 struct dlb2_ldb_port *ldb_port;
3240 struct dlb2_list_head *list;
3242 RTE_SET_USED(tmp_dir_port);
3243 RTE_SET_USED(tmp_ldb_queue);
3244 RTE_SET_USED(tmp_ldb_port);
3245 RTE_SET_USED(iter1);
3246 RTE_SET_USED(iter2);
3248 rsrcs = domain->parent_func;
3250 /* Move the domain's ldb queues to the function's avail list */
3251 list = &domain->used_ldb_queues;
3252 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3253 if (ldb_queue->sn_cfg_valid) {
3254 struct dlb2_sn_group *grp;
3256 grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3258 dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3259 ldb_queue->sn_cfg_valid = false;
3262 ldb_queue->owned = false;
3263 ldb_queue->num_mappings = 0;
3264 ldb_queue->num_pending_additions = 0;
3266 dlb2_list_del(&domain->used_ldb_queues,
3267 &ldb_queue->domain_list);
3268 dlb2_list_add(&rsrcs->avail_ldb_queues,
3269 &ldb_queue->func_list);
3270 rsrcs->num_avail_ldb_queues++;
3273 list = &domain->avail_ldb_queues;
3274 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3275 ldb_queue->owned = false;
3277 dlb2_list_del(&domain->avail_ldb_queues,
3278 &ldb_queue->domain_list);
3279 dlb2_list_add(&rsrcs->avail_ldb_queues,
3280 &ldb_queue->func_list);
3281 rsrcs->num_avail_ldb_queues++;
3284 /* Move the domain's ldb ports to the function's avail list */
3285 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3286 list = &domain->used_ldb_ports[i];
3287 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3291 ldb_port->owned = false;
3292 ldb_port->configured = false;
3293 ldb_port->num_pending_removals = 0;
3294 ldb_port->num_mappings = 0;
3295 ldb_port->init_tkn_cnt = 0;
3296 ldb_port->cq_depth = 0;
3297 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3298 ldb_port->qid_map[j].state =
3299 DLB2_QUEUE_UNMAPPED;
3301 dlb2_list_del(&domain->used_ldb_ports[i],
3302 &ldb_port->domain_list);
3303 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3304 &ldb_port->func_list);
3305 rsrcs->num_avail_ldb_ports[i]++;
3308 list = &domain->avail_ldb_ports[i];
3309 DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3311 ldb_port->owned = false;
3313 dlb2_list_del(&domain->avail_ldb_ports[i],
3314 &ldb_port->domain_list);
3315 dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3316 &ldb_port->func_list);
3317 rsrcs->num_avail_ldb_ports[i]++;
3321 /* Move the domain's dir ports to the function's avail list */
3322 list = &domain->used_dir_pq_pairs;
3323 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3324 dir_port->owned = false;
3325 dir_port->port_configured = false;
3326 dir_port->init_tkn_cnt = 0;
3328 dlb2_list_del(&domain->used_dir_pq_pairs,
3329 &dir_port->domain_list);
3331 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3332 &dir_port->func_list);
3333 rsrcs->num_avail_dir_pq_pairs++;
3336 list = &domain->avail_dir_pq_pairs;
3337 DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3338 dir_port->owned = false;
3340 dlb2_list_del(&domain->avail_dir_pq_pairs,
3341 &dir_port->domain_list);
3343 dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3344 &dir_port->func_list);
3345 rsrcs->num_avail_dir_pq_pairs++;
3348 /* Return hist list entries to the function */
3349 ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3350 domain->hist_list_entry_base,
3351 domain->total_hist_list_entries);
3354 "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3359 domain->total_hist_list_entries = 0;
3360 domain->avail_hist_list_entries = 0;
3361 domain->hist_list_entry_base = 0;
3362 domain->hist_list_entry_offset = 0;
3364 if (hw->ver == DLB2_HW_V2_5) {
3365 rsrcs->num_avail_entries += domain->num_credits;
3366 domain->num_credits = 0;
3368 rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3369 domain->num_ldb_credits = 0;
3371 rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3372 domain->num_dir_credits = 0;
3374 rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3375 rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3376 domain->num_avail_aqed_entries = 0;
3377 domain->num_used_aqed_entries = 0;
3379 domain->num_pending_removals = 0;
3380 domain->num_pending_additions = 0;
3381 domain->configured = false;
3382 domain->started = false;
3385 * Move the domain out of the used_domains list and back to the
3386 * function's avail_domains list.
3388 dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3389 dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3390 rsrcs->num_avail_domains++;
3395 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3396 struct dlb2_hw_domain *domain,
3397 struct dlb2_ldb_queue *queue)
3399 struct dlb2_ldb_port *port = NULL;
3402 /* If a domain has LDB queues, it must have LDB ports */
3403 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3404 port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
3412 "[%s()] Internal error: No configured LDB ports\n",
3417 /* If necessary, free up a QID slot in this CQ */
3418 if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3419 struct dlb2_ldb_queue *mapped_queue;
3421 mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3423 ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3428 ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3432 return dlb2_domain_drain_mapped_queues(hw, domain);
3435 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3436 struct dlb2_hw_domain *domain)
3438 struct dlb2_list_entry *iter;
3439 struct dlb2_ldb_queue *queue;
3443 /* If the domain hasn't been started, there's no traffic to drain */
3444 if (!domain->started)
3448 * Pre-condition: the unattached queue must not have any outstanding
3449 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3450 * prior to this in dlb2_domain_drain_mapped_queues().
3452 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3453 if (queue->num_mappings != 0 ||
3454 dlb2_ldb_queue_is_empty(hw, queue))
3457 ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3466 * dlb2_reset_domain() - reset a scheduling domain
3467 * @hw: dlb2_hw handle for a particular device.
3468 * @domain_id: domain ID.
3469 * @vdev_req: indicates whether this request came from a vdev.
3470 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3472 * This function resets and frees a DLB 2.0 scheduling domain and its associated
3475 * Pre-condition: the driver must ensure software has stopped sending QEs
3476 * through this domain's producer ports before invoking this function, or
3477 * undefined behavior will result.
3479 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3483 * Returns 0 upon success, -1 otherwise.
3485 * EINVAL - Invalid domain ID, or the domain is not configured.
3486 * EFAULT - Internal error. (Possibly caused if software is the pre-condition
3488 * ETIMEDOUT - Hardware component didn't reset in the expected time.
3490 int dlb2_reset_domain(struct dlb2_hw *hw,
3493 unsigned int vdev_id)
3495 struct dlb2_hw_domain *domain;
3498 dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3500 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3502 if (domain == NULL || !domain->configured)
3507 dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3509 dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3512 /* Disable CQ interrupts */
3513 dlb2_domain_disable_dir_port_interrupts(hw, domain);
3515 dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3518 * For each queue owned by this domain, disable its write permissions to
3519 * cause any traffic sent to it to be dropped. Well-behaved software
3520 * should not be sending QEs at this point.
3522 dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3524 dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3526 /* Turn off completion tracking on all the domain's PPs. */
3527 dlb2_domain_disable_ldb_seq_checks(hw, domain);
3530 * Disable the LDB CQs and drain them in order to complete the map and
3531 * unmap procedures, which require zero CQ inflights and zero QID
3532 * inflights respectively.
3534 dlb2_domain_disable_ldb_cqs(hw, domain);
3536 dlb2_domain_drain_ldb_cqs(hw, domain, false);
3538 ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3542 ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3546 ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3550 /* Re-enable the CQs in order to drain the mapped queues. */
3551 dlb2_domain_enable_ldb_cqs(hw, domain);
3553 ret = dlb2_domain_drain_mapped_queues(hw, domain);
3557 ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3561 /* Done draining LDB QEs, so disable the CQs. */
3562 dlb2_domain_disable_ldb_cqs(hw, domain);
3564 dlb2_domain_drain_dir_queues(hw, domain);
3566 /* Done draining DIR QEs, so disable the CQs. */
3567 dlb2_domain_disable_dir_cqs(hw, domain);
3570 dlb2_domain_disable_dir_producer_ports(hw, domain);
3572 dlb2_domain_disable_ldb_producer_ports(hw, domain);
3574 ret = dlb2_domain_verify_reset_success(hw, domain);
3578 /* Reset the QID and port state. */
3579 dlb2_domain_reset_registers(hw, domain);
3581 /* Hardware reset complete. Reset the domain's software state */
3582 return dlb2_domain_reset_software_state(hw, domain);
3586 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3588 struct dlb2_create_ldb_queue_args *args,
3590 unsigned int vdev_id)
3592 DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3594 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3595 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
3597 DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3598 args->num_sequence_numbers);
3599 DLB2_HW_DBG(hw, "\tNumber of QID inflights: %d\n",
3600 args->num_qid_inflights);
3601 DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
3602 args->num_atomic_inflights);
3606 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3607 struct dlb2_ldb_queue *queue,
3608 struct dlb2_create_ldb_queue_args *args)
3613 queue->sn_cfg_valid = false;
3615 if (args->num_sequence_numbers == 0)
3618 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3619 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3621 if (group->sequence_numbers_per_queue ==
3622 args->num_sequence_numbers &&
3623 !dlb2_sn_group_full(group)) {
3624 slot = dlb2_sn_group_alloc_slot(group);
3632 "[%s():%d] Internal error: no sequence number slots available\n",
3633 __func__, __LINE__);
3637 queue->sn_cfg_valid = true;
3638 queue->sn_group = i;
3639 queue->sn_slot = slot;
3644 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3646 struct dlb2_create_ldb_queue_args *args,
3647 struct dlb2_cmd_response *resp,
3649 unsigned int vdev_id,
3650 struct dlb2_hw_domain **out_domain,
3651 struct dlb2_ldb_queue **out_queue)
3653 struct dlb2_hw_domain *domain;
3654 struct dlb2_ldb_queue *queue;
3657 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3660 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3664 if (!domain->configured) {
3665 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3669 if (domain->started) {
3670 resp->status = DLB2_ST_DOMAIN_STARTED;
3674 queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3676 resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3680 if (args->num_sequence_numbers) {
3681 for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3682 struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3684 if (group->sequence_numbers_per_queue ==
3685 args->num_sequence_numbers &&
3686 !dlb2_sn_group_full(group))
3690 if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3691 resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3696 if (args->num_qid_inflights > 4096) {
3697 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3701 /* Inflights must be <= number of sequence numbers if ordered */
3702 if (args->num_sequence_numbers != 0 &&
3703 args->num_qid_inflights > args->num_sequence_numbers) {
3704 resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3708 if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3709 resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3713 if (args->num_atomic_inflights &&
3714 args->lock_id_comp_level != 0 &&
3715 args->lock_id_comp_level != 64 &&
3716 args->lock_id_comp_level != 128 &&
3717 args->lock_id_comp_level != 256 &&
3718 args->lock_id_comp_level != 512 &&
3719 args->lock_id_comp_level != 1024 &&
3720 args->lock_id_comp_level != 2048 &&
3721 args->lock_id_comp_level != 4096 &&
3722 args->lock_id_comp_level != 65536) {
3723 resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3727 *out_domain = domain;
3734 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
3735 struct dlb2_hw_domain *domain,
3736 struct dlb2_ldb_queue *queue,
3737 struct dlb2_create_ldb_queue_args *args)
3740 ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
3744 /* Attach QID inflights */
3745 queue->num_qid_inflights = args->num_qid_inflights;
3747 /* Attach atomic inflights */
3748 queue->aqed_limit = args->num_atomic_inflights;
3750 domain->num_avail_aqed_entries -= args->num_atomic_inflights;
3751 domain->num_used_aqed_entries += args->num_atomic_inflights;
3756 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
3757 struct dlb2_hw_domain *domain,
3758 struct dlb2_ldb_queue *queue,
3759 struct dlb2_create_ldb_queue_args *args,
3761 unsigned int vdev_id)
3763 struct dlb2_sn_group *sn_group;
3768 /* QID write permissions are turned on when the domain is started */
3769 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.phys_id;
3771 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), reg);
3774 * Unordered QIDs get 4K inflights, ordered get as many as the number
3775 * of sequence numbers.
3777 DLB2_BITS_SET(reg, args->num_qid_inflights,
3778 DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
3779 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
3780 queue->id.phys_id), reg);
3782 alimit = queue->aqed_limit;
3784 if (alimit > DLB2_MAX_NUM_AQED_ENTRIES)
3785 alimit = DLB2_MAX_NUM_AQED_ENTRIES;
3788 DLB2_BITS_SET(reg, alimit, DLB2_LSP_QID_AQED_ACTIVE_LIM_LIMIT);
3790 DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver,
3791 queue->id.phys_id), reg);
3794 switch (args->lock_id_comp_level) {
3796 DLB2_BITS_SET(reg, 1, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3799 DLB2_BITS_SET(reg, 2, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3802 DLB2_BITS_SET(reg, 3, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3805 DLB2_BITS_SET(reg, 4, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3808 DLB2_BITS_SET(reg, 5, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3811 DLB2_BITS_SET(reg, 6, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3814 DLB2_BITS_SET(reg, 7, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
3817 /* No compression by default */
3821 DLB2_CSR_WR(hw, DLB2_AQED_QID_HID_WIDTH(queue->id.phys_id), reg);
3824 /* Don't timestamp QEs that pass through this queue */
3825 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_ITS(queue->id.phys_id), reg);
3827 DLB2_BITS_SET(reg, args->depth_threshold,
3828 DLB2_LSP_QID_ATM_DEPTH_THRSH_THRESH);
3830 DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver,
3831 queue->id.phys_id), reg);
3834 DLB2_BITS_SET(reg, args->depth_threshold,
3835 DLB2_LSP_QID_NALDB_DEPTH_THRSH_THRESH);
3837 DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue->id.phys_id),
3841 * This register limits the number of inflight flows a queue can have
3842 * at one time. It has an upper bound of 2048, but can be
3843 * over-subscribed. 512 is chosen so that a single queue does not use
3844 * the entire atomic storage, but can use a substantial portion if
3848 DLB2_BITS_SET(reg, 512, DLB2_AQED_QID_FID_LIM_QID_FID_LIMIT);
3849 DLB2_CSR_WR(hw, DLB2_AQED_QID_FID_LIM(queue->id.phys_id), reg);
3853 sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
3854 DLB2_BITS_SET(reg, sn_group->mode, DLB2_CHP_ORD_QID_SN_MAP_MODE);
3855 DLB2_BITS_SET(reg, queue->sn_slot, DLB2_CHP_ORD_QID_SN_MAP_SLOT);
3856 DLB2_BITS_SET(reg, sn_group->id, DLB2_CHP_ORD_QID_SN_MAP_GRP);
3859 DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue->id.phys_id), reg);
3862 DLB2_BITS_SET(reg, (args->num_sequence_numbers != 0),
3863 DLB2_SYS_LDB_QID_CFG_V_SN_CFG_V);
3864 DLB2_BITS_SET(reg, (args->num_atomic_inflights != 0),
3865 DLB2_SYS_LDB_QID_CFG_V_FID_CFG_V);
3867 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), reg);
3870 offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
3873 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VQID_V_VQID_V);
3874 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), reg);
3877 DLB2_BITS_SET(reg, queue->id.phys_id,
3878 DLB2_SYS_VF_LDB_VQID2QID_QID);
3879 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), reg);
3882 DLB2_BITS_SET(reg, queue->id.virt_id,
3883 DLB2_SYS_LDB_QID2VQID_VQID);
3884 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID2VQID(queue->id.phys_id), reg);
3888 DLB2_BIT_SET(reg, DLB2_SYS_LDB_QID_V_QID_V);
3889 DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), reg);
3893 * dlb2_hw_create_ldb_queue() - create a load-balanced queue
3894 * @hw: dlb2_hw handle for a particular device.
3895 * @domain_id: domain ID.
3896 * @args: queue creation arguments.
3897 * @resp: response structure.
3898 * @vdev_req: indicates whether this request came from a vdev.
3899 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3901 * This function creates a load-balanced queue.
3903 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3907 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
3908 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
3909 * contains the queue ID.
3911 * resp->id contains a virtual ID if vdev_req is true.
3914 * EINVAL - A requested resource is unavailable, the domain is not configured,
3915 * the domain has already been started, or the requested queue name is
3917 * EFAULT - Internal error (resp->status not set).
3919 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
3921 struct dlb2_create_ldb_queue_args *args,
3922 struct dlb2_cmd_response *resp,
3924 unsigned int vdev_id)
3926 struct dlb2_hw_domain *domain;
3927 struct dlb2_ldb_queue *queue;
3930 dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
3933 * Verify that hardware resources are available before attempting to
3934 * satisfy the request. This simplifies the error unwinding code.
3936 ret = dlb2_verify_create_ldb_queue_args(hw,
3947 ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
3951 "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
3952 __func__, __LINE__);
3956 dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
3958 queue->num_mappings = 0;
3960 queue->configured = true;
3963 * Configuration succeeded, so move the resource from the 'avail' to
3966 dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
3968 dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
3971 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
3976 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
3977 struct dlb2_hw_domain *domain,
3978 struct dlb2_ldb_port *port,
3980 unsigned int vdev_id)
3984 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
3985 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
3992 * DLB uses producer port address bits 17:12 to determine the
3993 * producer port ID. In Scalable IOV mode, PP accesses come
3994 * through the PF MMIO window for the physical producer port,
3995 * so for translation purposes the virtual and physical port
3998 if (hw->virt_mode == DLB2_VIRT_SRIOV)
3999 virt_id = port->id.virt_id;
4001 virt_id = port->id.phys_id;
4004 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
4005 offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4006 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
4009 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
4010 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
4013 DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
4014 DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
4018 DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
4019 DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
4022 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4023 struct dlb2_hw_domain *domain,
4024 struct dlb2_ldb_port *port,
4025 uintptr_t cq_dma_base,
4026 struct dlb2_create_ldb_port_args *args,
4028 unsigned int vdev_id)
4034 /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4035 DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
4036 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
4038 reg = cq_dma_base >> 32;
4039 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
4042 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4043 * cache lines out-of-order (but QEs within a cache line are always
4044 * updated in-order).
4047 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
4049 !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4050 DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
4051 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
4053 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
4055 port->cq_depth = args->cq_depth;
4057 if (args->cq_depth <= 8) {
4059 } else if (args->cq_depth == 16) {
4061 } else if (args->cq_depth == 32) {
4063 } else if (args->cq_depth == 64) {
4065 } else if (args->cq_depth == 128) {
4067 } else if (args->cq_depth == 256) {
4069 } else if (args->cq_depth == 512) {
4071 } else if (args->cq_depth == 1024) {
4075 "[%s():%d] Internal error: invalid CQ depth\n",
4076 __func__, __LINE__);
4081 DLB2_BITS_SET(reg, ds,
4082 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4084 DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4088 * To support CQs with depth less than 8, program the token count
4089 * register with a non-zero initial value. Operations such as domain
4090 * reset must take this initial value into account when quiescing the
4093 port->init_tkn_cnt = 0;
4095 if (args->cq_depth < 8) {
4097 port->init_tkn_cnt = 8 - args->cq_depth;
4101 DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
4103 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4107 DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4108 DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4112 DLB2_BITS_SET(reg, ds,
4113 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
4115 DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4118 /* Reset the CQ write pointer */
4120 DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
4121 DLB2_CHP_LDB_CQ_WPTR_RST);
4125 port->hist_list_entry_limit - 1,
4126 DLB2_CHP_HIST_LIST_LIM_LIMIT);
4127 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
4129 DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
4130 DLB2_CHP_HIST_LIST_BASE_BASE);
4132 DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
4136 * The inflight limit sets a cap on the number of QEs for which this CQ
4137 * can owe completions at one time.
4140 DLB2_BITS_SET(reg, args->cq_history_list_size,
4141 DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
4142 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
4146 DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4147 DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
4148 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
4152 DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4153 DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
4154 DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
4158 * Address translation (AT) settings: 0: untranslated, 2: translated
4159 * (see ATS spec regarding Address Type field for more details)
4162 if (hw->ver == DLB2_HW_V2) {
4164 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
4167 if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4169 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4170 DLB2_SYS_LDB_CQ_PASID_PASID);
4171 DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
4174 DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
4177 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
4178 DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
4180 /* Disable the port's QID mappings */
4182 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
4188 dlb2_cq_depth_is_valid(u32 depth)
4190 if (depth != 1 && depth != 2 &&
4191 depth != 4 && depth != 8 &&
4192 depth != 16 && depth != 32 &&
4193 depth != 64 && depth != 128 &&
4194 depth != 256 && depth != 512 &&
4201 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4202 struct dlb2_hw_domain *domain,
4203 struct dlb2_ldb_port *port,
4204 uintptr_t cq_dma_base,
4205 struct dlb2_create_ldb_port_args *args,
4207 unsigned int vdev_id)
4211 port->hist_list_entry_base = domain->hist_list_entry_base +
4212 domain->hist_list_entry_offset;
4213 port->hist_list_entry_limit = port->hist_list_entry_base +
4214 args->cq_history_list_size;
4216 domain->hist_list_entry_offset += args->cq_history_list_size;
4217 domain->avail_hist_list_entries -= args->cq_history_list_size;
4219 ret = dlb2_ldb_port_configure_cq(hw,
4229 dlb2_ldb_port_configure_pp(hw,
4235 dlb2_ldb_port_cq_enable(hw, port);
4237 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4238 port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4239 port->num_mappings = 0;
4241 port->enabled = true;
4243 port->configured = true;
4249 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4251 uintptr_t cq_dma_base,
4252 struct dlb2_create_ldb_port_args *args,
4254 unsigned int vdev_id)
4256 DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4258 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4259 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
4261 DLB2_HW_DBG(hw, "\tCQ depth: %d\n",
4263 DLB2_HW_DBG(hw, "\tCQ hist list size: %d\n",
4264 args->cq_history_list_size);
4265 DLB2_HW_DBG(hw, "\tCQ base address: 0x%lx\n",
4267 DLB2_HW_DBG(hw, "\tCoS ID: %u\n", args->cos_id);
4268 DLB2_HW_DBG(hw, "\tStrict CoS allocation: %u\n",
4273 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4275 uintptr_t cq_dma_base,
4276 struct dlb2_create_ldb_port_args *args,
4277 struct dlb2_cmd_response *resp,
4279 unsigned int vdev_id,
4280 struct dlb2_hw_domain **out_domain,
4281 struct dlb2_ldb_port **out_port,
4284 struct dlb2_hw_domain *domain;
4285 struct dlb2_ldb_port *port;
4288 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4291 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4295 if (!domain->configured) {
4296 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4300 if (domain->started) {
4301 resp->status = DLB2_ST_DOMAIN_STARTED;
4305 if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
4306 resp->status = DLB2_ST_INVALID_COS_ID;
4310 if (args->cos_strict) {
4312 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4315 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4316 id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4318 port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4326 resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4330 /* Check cache-line alignment */
4331 if ((cq_dma_base & 0x3F) != 0) {
4332 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4336 if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4337 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4341 /* The history list size must be >= 1 */
4342 if (!args->cq_history_list_size) {
4343 resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4347 if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4348 resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4352 *out_domain = domain;
4360 * dlb2_hw_create_ldb_port() - create a load-balanced port
4361 * @hw: dlb2_hw handle for a particular device.
4362 * @domain_id: domain ID.
4363 * @args: port creation arguments.
4364 * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4365 * @resp: response structure.
4366 * @vdev_req: indicates whether this request came from a vdev.
4367 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4369 * This function creates a load-balanced port.
4371 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4375 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4376 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4377 * contains the port ID.
4379 * resp->id contains a virtual ID if vdev_req is true.
4382 * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4383 * pointer address is not properly aligned, the domain is not
4384 * configured, or the domain has already been started.
4385 * EFAULT - Internal error (resp->status not set).
4387 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4389 struct dlb2_create_ldb_port_args *args,
4390 uintptr_t cq_dma_base,
4391 struct dlb2_cmd_response *resp,
4393 unsigned int vdev_id)
4395 struct dlb2_hw_domain *domain;
4396 struct dlb2_ldb_port *port;
4399 dlb2_log_create_ldb_port_args(hw,
4407 * Verify that hardware resources are available before attempting to
4408 * satisfy the request. This simplifies the error unwinding code.
4410 ret = dlb2_verify_create_ldb_port_args(hw,
4423 ret = dlb2_configure_ldb_port(hw,
4434 * Configuration succeeded, so move the resource from the 'avail' to
4437 dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4439 dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4442 resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4448 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4450 uintptr_t cq_dma_base,
4451 struct dlb2_create_dir_port_args *args,
4453 unsigned int vdev_id)
4455 DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4457 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4458 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
4460 DLB2_HW_DBG(hw, "\tCQ depth: %d\n",
4462 DLB2_HW_DBG(hw, "\tCQ base address: 0x%lx\n",
4466 static struct dlb2_dir_pq_pair *
4467 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
4470 struct dlb2_hw_domain *domain)
4472 struct dlb2_list_entry *iter;
4473 struct dlb2_dir_pq_pair *port;
4476 if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
4479 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
4480 if ((!vdev_req && port->id.phys_id == id) ||
4481 (vdev_req && port->id.virt_id == id))
4489 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4491 uintptr_t cq_dma_base,
4492 struct dlb2_create_dir_port_args *args,
4493 struct dlb2_cmd_response *resp,
4495 unsigned int vdev_id,
4496 struct dlb2_hw_domain **out_domain,
4497 struct dlb2_dir_pq_pair **out_port)
4499 struct dlb2_hw_domain *domain;
4500 struct dlb2_dir_pq_pair *pq;
4502 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4505 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4509 if (!domain->configured) {
4510 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4514 if (domain->started) {
4515 resp->status = DLB2_ST_DOMAIN_STARTED;
4519 if (args->queue_id != -1) {
4521 * If the user claims the queue is already configured, validate
4522 * the queue ID, its domain, and whether the queue is
4525 pq = dlb2_get_domain_used_dir_pq(hw,
4530 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4531 !pq->queue_configured) {
4532 resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4537 * If the port's queue is not configured, validate that a free
4538 * port-queue pair is available.
4540 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4543 resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4548 /* Check cache-line alignment */
4549 if ((cq_dma_base & 0x3F) != 0) {
4550 resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4554 if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4555 resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4559 *out_domain = domain;
4565 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4566 struct dlb2_hw_domain *domain,
4567 struct dlb2_dir_pq_pair *port,
4569 unsigned int vdev_id)
4573 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_DIR_PP2VAS_VAS);
4574 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), reg);
4581 * DLB uses producer port address bits 17:12 to determine the
4582 * producer port ID. In Scalable IOV mode, PP accesses come
4583 * through the PF MMIO window for the physical producer port,
4584 * so for translation purposes the virtual and physical port
4587 if (hw->virt_mode == DLB2_VIRT_SRIOV)
4588 virt_id = port->id.virt_id;
4590 virt_id = port->id.phys_id;
4593 DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_DIR_VPP2PP_PP);
4594 offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
4595 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), reg);
4598 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_PP2VDEV_VDEV);
4599 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VDEV(port->id.phys_id), reg);
4602 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VPP_V_VPP_V);
4603 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), reg);
4607 DLB2_BIT_SET(reg, DLB2_SYS_DIR_PP_V_PP_V);
4608 DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP_V(port->id.phys_id), reg);
4611 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4612 struct dlb2_hw_domain *domain,
4613 struct dlb2_dir_pq_pair *port,
4614 uintptr_t cq_dma_base,
4615 struct dlb2_create_dir_port_args *args,
4617 unsigned int vdev_id)
4622 /* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4623 DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_DIR_CQ_ADDR_L_ADDR_L);
4624 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), reg);
4626 reg = cq_dma_base >> 32;
4627 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), reg);
4630 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4631 * cache lines out-of-order (but QEs within a cache line are always
4632 * updated in-order).
4635 DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_CQ2VF_PF_RO_VF);
4636 DLB2_BITS_SET(reg, !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4637 DLB2_SYS_DIR_CQ2VF_PF_RO_IS_PF);
4638 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ2VF_PF_RO_RO);
4640 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), reg);
4642 if (args->cq_depth <= 8) {
4644 } else if (args->cq_depth == 16) {
4646 } else if (args->cq_depth == 32) {
4648 } else if (args->cq_depth == 64) {
4650 } else if (args->cq_depth == 128) {
4652 } else if (args->cq_depth == 256) {
4654 } else if (args->cq_depth == 512) {
4656 } else if (args->cq_depth == 1024) {
4660 "[%s():%d] Internal error: invalid CQ depth\n",
4661 __func__, __LINE__);
4666 DLB2_BITS_SET(reg, ds,
4667 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4669 DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4673 * To support CQs with depth less than 8, program the token count
4674 * register with a non-zero initial value. Operations such as domain
4675 * reset must take this initial value into account when quiescing the
4678 port->init_tkn_cnt = 0;
4680 if (args->cq_depth < 8) {
4682 port->init_tkn_cnt = 8 - args->cq_depth;
4684 DLB2_BITS_SET(reg, port->init_tkn_cnt,
4685 DLB2_LSP_CQ_DIR_TKN_CNT_COUNT);
4687 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4691 DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4692 DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4696 DLB2_BITS_SET(reg, ds,
4697 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_TOKEN_DEPTH_SELECT_V2);
4699 DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
4703 /* Reset the CQ write pointer */
4705 DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
4706 DLB2_CHP_DIR_CQ_WPTR_RST);
4708 /* Virtualize the PPID */
4710 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), reg);
4713 * Address translation (AT) settings: 0: untranslated, 2: translated
4714 * (see ATS spec regarding Address Type field for more details)
4716 if (hw->ver == DLB2_HW_V2) {
4718 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), reg);
4721 if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4722 DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4723 DLB2_SYS_DIR_CQ_PASID_PASID);
4724 DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ_PASID_FMT2);
4727 DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id), reg);
4730 DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_DIR_CQ2VAS_CQ2VAS);
4731 DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id), reg);
4736 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
4737 struct dlb2_hw_domain *domain,
4738 struct dlb2_dir_pq_pair *port,
4739 uintptr_t cq_dma_base,
4740 struct dlb2_create_dir_port_args *args,
4742 unsigned int vdev_id)
4746 ret = dlb2_dir_port_configure_cq(hw,
4757 dlb2_dir_port_configure_pp(hw,
4763 dlb2_dir_port_cq_enable(hw, port);
4765 port->enabled = true;
4767 port->port_configured = true;
4773 * dlb2_hw_create_dir_port() - create a directed port
4774 * @hw: dlb2_hw handle for a particular device.
4775 * @domain_id: domain ID.
4776 * @args: port creation arguments.
4777 * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4778 * @resp: response structure.
4779 * @vdev_req: indicates whether this request came from a vdev.
4780 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4782 * This function creates a directed port.
4784 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4788 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4789 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4790 * contains the port ID.
4792 * resp->id contains a virtual ID if vdev_req is true.
4795 * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4796 * pointer address is not properly aligned, the domain is not
4797 * configured, or the domain has already been started.
4798 * EFAULT - Internal error (resp->status not set).
4800 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
4802 struct dlb2_create_dir_port_args *args,
4803 uintptr_t cq_dma_base,
4804 struct dlb2_cmd_response *resp,
4806 unsigned int vdev_id)
4808 struct dlb2_dir_pq_pair *port;
4809 struct dlb2_hw_domain *domain;
4812 dlb2_log_create_dir_port_args(hw,
4820 * Verify that hardware resources are available before attempting to
4821 * satisfy the request. This simplifies the error unwinding code.
4823 ret = dlb2_verify_create_dir_port_args(hw,
4835 ret = dlb2_configure_dir_port(hw,
4846 * Configuration succeeded, so move the resource from the 'avail' to
4847 * the 'used' list (if it's not already there).
4849 if (args->queue_id == -1) {
4850 dlb2_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
4852 dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
4856 resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4861 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
4862 struct dlb2_hw_domain *domain,
4863 struct dlb2_dir_pq_pair *queue,
4864 struct dlb2_create_dir_queue_args *args,
4866 unsigned int vdev_id)
4871 /* QID write permissions are turned on when the domain is started */
4872 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4875 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), reg);
4877 /* Don't timestamp QEs that pass through this queue */
4878 DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_ITS(queue->id.phys_id), reg);
4881 DLB2_BITS_SET(reg, args->depth_threshold,
4882 DLB2_LSP_QID_DIR_DEPTH_THRSH_THRESH);
4884 DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver, queue->id.phys_id),
4888 offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
4892 DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VQID_V_VQID_V);
4893 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), reg);
4896 DLB2_BITS_SET(reg, queue->id.phys_id,
4897 DLB2_SYS_VF_DIR_VQID2QID_QID);
4898 DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), reg);
4902 DLB2_BIT_SET(reg, DLB2_SYS_DIR_QID_V_QID_V);
4903 DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), reg);
4905 queue->queue_configured = true;
4909 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
4911 struct dlb2_create_dir_queue_args *args,
4913 unsigned int vdev_id)
4915 DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
4917 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4918 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
4919 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
4923 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
4925 struct dlb2_create_dir_queue_args *args,
4926 struct dlb2_cmd_response *resp,
4928 unsigned int vdev_id,
4929 struct dlb2_hw_domain **out_domain,
4930 struct dlb2_dir_pq_pair **out_queue)
4932 struct dlb2_hw_domain *domain;
4933 struct dlb2_dir_pq_pair *pq;
4935 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4938 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4942 if (!domain->configured) {
4943 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4947 if (domain->started) {
4948 resp->status = DLB2_ST_DOMAIN_STARTED;
4953 * If the user claims the port is already configured, validate the port
4954 * ID, its domain, and whether the port is configured.
4956 if (args->port_id != -1) {
4957 pq = dlb2_get_domain_used_dir_pq(hw,
4962 if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4963 !pq->port_configured) {
4964 resp->status = DLB2_ST_INVALID_PORT_ID;
4969 * If the queue's port is not configured, validate that a free
4970 * port-queue pair is available.
4972 pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4975 resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
4980 *out_domain = domain;
4987 * dlb2_hw_create_dir_queue() - create a directed queue
4988 * @hw: dlb2_hw handle for a particular device.
4989 * @domain_id: domain ID.
4990 * @args: queue creation arguments.
4991 * @resp: response structure.
4992 * @vdev_req: indicates whether this request came from a vdev.
4993 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4995 * This function creates a directed queue.
4997 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5001 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5002 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5003 * contains the queue ID.
5005 * resp->id contains a virtual ID if vdev_req is true.
5008 * EINVAL - A requested resource is unavailable, the domain is not configured,
5009 * or the domain has already been started.
5010 * EFAULT - Internal error (resp->status not set).
5012 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5014 struct dlb2_create_dir_queue_args *args,
5015 struct dlb2_cmd_response *resp,
5017 unsigned int vdev_id)
5019 struct dlb2_dir_pq_pair *queue;
5020 struct dlb2_hw_domain *domain;
5023 dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5026 * Verify that hardware resources are available before attempting to
5027 * satisfy the request. This simplifies the error unwinding code.
5029 ret = dlb2_verify_create_dir_queue_args(hw,
5040 dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5043 * Configuration succeeded, so move the resource from the 'avail' to
5044 * the 'used' list (if it's not already there).
5046 if (args->port_id == -1) {
5047 dlb2_list_del(&domain->avail_dir_pq_pairs,
5048 &queue->domain_list);
5050 dlb2_list_add(&domain->used_dir_pq_pairs,
5051 &queue->domain_list);
5056 resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5062 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5063 struct dlb2_ldb_queue *queue,
5068 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5069 struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5071 if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5072 map->pending_qid == queue->id.phys_id)
5078 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5081 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5082 struct dlb2_ldb_queue *queue,
5083 struct dlb2_cmd_response *resp)
5085 enum dlb2_qid_map_state state;
5088 /* Unused slot available? */
5089 if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5093 * If the queue is already mapped (from the application's perspective),
5094 * this is simply a priority update.
5096 state = DLB2_QUEUE_MAPPED;
5097 if (dlb2_port_find_slot_queue(port, state, queue, &i))
5100 state = DLB2_QUEUE_MAP_IN_PROG;
5101 if (dlb2_port_find_slot_queue(port, state, queue, &i))
5104 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5108 * If the slot contains an unmap in progress, it's considered
5111 state = DLB2_QUEUE_UNMAP_IN_PROG;
5112 if (dlb2_port_find_slot(port, state, &i))
5115 state = DLB2_QUEUE_UNMAPPED;
5116 if (dlb2_port_find_slot(port, state, &i))
5119 resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5123 static struct dlb2_ldb_queue *
5124 dlb2_get_domain_ldb_queue(u32 id,
5126 struct dlb2_hw_domain *domain)
5128 struct dlb2_list_entry *iter;
5129 struct dlb2_ldb_queue *queue;
5132 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5135 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
5136 if ((!vdev_req && queue->id.phys_id == id) ||
5137 (vdev_req && queue->id.virt_id == id))
5144 static struct dlb2_ldb_port *
5145 dlb2_get_domain_used_ldb_port(u32 id,
5147 struct dlb2_hw_domain *domain)
5149 struct dlb2_list_entry *iter;
5150 struct dlb2_ldb_port *port;
5154 if (id >= DLB2_MAX_NUM_LDB_PORTS)
5157 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5158 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
5159 if ((!vdev_req && port->id.phys_id == id) ||
5160 (vdev_req && port->id.virt_id == id))
5164 DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter) {
5165 if ((!vdev_req && port->id.phys_id == id) ||
5166 (vdev_req && port->id.virt_id == id))
5174 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5175 struct dlb2_ldb_port *port,
5177 struct dlb2_map_qid_args *args)
5181 /* Read-modify-write the priority and valid bit register */
5182 cq2priov = DLB2_CSR_RD(hw,
5183 DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id));
5185 cq2priov |= (1 << (slot + DLB2_LSP_CQ2PRIOV_V_LOC)) &
5186 DLB2_LSP_CQ2PRIOV_V;
5187 cq2priov |= ((args->priority & 0x7) << slot * 3) &
5188 DLB2_LSP_CQ2PRIOV_PRIO;
5190 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), cq2priov);
5194 port->qid_map[slot].priority = args->priority;
5197 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5199 struct dlb2_map_qid_args *args,
5200 struct dlb2_cmd_response *resp,
5202 unsigned int vdev_id,
5203 struct dlb2_hw_domain **out_domain,
5204 struct dlb2_ldb_port **out_port,
5205 struct dlb2_ldb_queue **out_queue)
5207 struct dlb2_hw_domain *domain;
5208 struct dlb2_ldb_queue *queue;
5209 struct dlb2_ldb_port *port;
5212 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5215 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5219 if (!domain->configured) {
5220 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5226 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5228 if (!port || !port->configured) {
5229 resp->status = DLB2_ST_INVALID_PORT_ID;
5233 if (args->priority >= DLB2_QID_PRIORITIES) {
5234 resp->status = DLB2_ST_INVALID_PRIORITY;
5238 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5240 if (!queue || !queue->configured) {
5241 resp->status = DLB2_ST_INVALID_QID;
5245 if (queue->domain_id.phys_id != domain->id.phys_id) {
5246 resp->status = DLB2_ST_INVALID_QID;
5250 if (port->domain_id.phys_id != domain->id.phys_id) {
5251 resp->status = DLB2_ST_INVALID_PORT_ID;
5255 *out_domain = domain;
5262 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5264 struct dlb2_map_qid_args *args,
5266 unsigned int vdev_id)
5268 DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5270 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5271 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5273 DLB2_HW_DBG(hw, "\tPort ID: %d\n",
5275 DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
5277 DLB2_HW_DBG(hw, "\tPriority: %d\n",
5282 * dlb2_hw_map_qid() - map a load-balanced queue to a load-balanced port
5283 * @hw: dlb2_hw handle for a particular device.
5284 * @domain_id: domain ID.
5285 * @args: map QID arguments.
5286 * @resp: response structure.
5287 * @vdev_req: indicates whether this request came from a vdev.
5288 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5290 * This function configures the DLB to schedule QEs from the specified queue
5291 * to the specified port. Each load-balanced port can be mapped to up to 8
5292 * queues; each load-balanced queue can potentially map to all the
5293 * load-balanced ports.
5295 * A successful return does not necessarily mean the mapping was configured. If
5296 * this function is unable to immediately map the queue to the port, it will
5297 * add the requested operation to a per-port list of pending map/unmap
5298 * operations, and (if it's not already running) launch a kernel thread that
5299 * periodically attempts to process all pending operations. In a sense, this is
5300 * an asynchronous function.
5302 * This asynchronicity creates two views of the state of hardware: the actual
5303 * hardware state and the requested state (as if every request completed
5304 * immediately). If there are any pending map/unmap operations, the requested
5305 * state will differ from the actual state. All validation is performed with
5306 * respect to the pending state; for instance, if there are 8 pending map
5307 * operations for port X, a request for a 9th will fail because a load-balanced
5308 * port can only map up to 8 queues.
5310 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5314 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5315 * assigned a detailed error code from enum dlb2_error.
5318 * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5319 * the domain is not configured.
5320 * EFAULT - Internal error (resp->status not set).
5322 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5324 struct dlb2_map_qid_args *args,
5325 struct dlb2_cmd_response *resp,
5327 unsigned int vdev_id)
5329 struct dlb2_hw_domain *domain;
5330 struct dlb2_ldb_queue *queue;
5331 enum dlb2_qid_map_state st;
5332 struct dlb2_ldb_port *port;
5336 dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5339 * Verify that hardware resources are available before attempting to
5340 * satisfy the request. This simplifies the error unwinding code.
5342 ret = dlb2_verify_map_qid_args(hw,
5354 prio = args->priority;
5357 * If there are any outstanding detach operations for this port,
5358 * attempt to complete them. This may be necessary to free up a QID
5359 * slot for this requested mapping.
5361 if (port->num_pending_removals)
5362 dlb2_domain_finish_unmap_port(hw, domain, port);
5364 ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5368 /* Hardware requires disabling the CQ before mapping QIDs. */
5370 dlb2_ldb_port_cq_disable(hw, port);
5373 * If this is only a priority change, don't perform the full QID->CQ
5376 st = DLB2_QUEUE_MAPPED;
5377 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5378 if (prio != port->qid_map[i].priority) {
5379 dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5380 DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5383 st = DLB2_QUEUE_MAPPED;
5384 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5391 st = DLB2_QUEUE_UNMAP_IN_PROG;
5392 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5393 if (prio != port->qid_map[i].priority) {
5394 dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5395 DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5398 st = DLB2_QUEUE_MAPPED;
5399 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5407 * If this is a priority change on an in-progress mapping, don't
5408 * perform the full QID->CQ mapping procedure.
5410 st = DLB2_QUEUE_MAP_IN_PROG;
5411 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5412 port->qid_map[i].priority = prio;
5414 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5420 * If this is a priority change on a pending mapping, update the
5423 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5424 port->qid_map[i].pending_priority = prio;
5426 DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5432 * If all the CQ's slots are in use, then there's an unmap in progress
5433 * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5434 * mapping to pending_map and return. When the removal is completed for
5435 * the slot's current occupant, this mapping will be performed.
5437 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5438 if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5439 enum dlb2_qid_map_state new_st;
5441 port->qid_map[i].pending_qid = queue->id.phys_id;
5442 port->qid_map[i].pending_priority = prio;
5444 new_st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5446 ret = dlb2_port_slot_state_transition(hw, port, queue,
5451 DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5458 * If the domain has started, a special "dynamic" CQ->queue mapping
5459 * procedure is required in order to safely update the CQ<->QID tables.
5460 * The "static" procedure cannot be used when traffic is flowing,
5461 * because the CQ<->QID tables cannot be updated atomically and the
5462 * scheduler won't see the new mapping unless the queue's if_status
5463 * changes, which isn't guaranteed.
5465 ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5467 /* If ret is less than zero, it's due to an internal error */
5473 dlb2_ldb_port_cq_enable(hw, port);
5480 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5482 struct dlb2_unmap_qid_args *args,
5484 unsigned int vdev_id)
5486 DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5488 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5489 DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5491 DLB2_HW_DBG(hw, "\tPort ID: %d\n",
5493 DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
5495 if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5496 DLB2_HW_DBG(hw, "\tQueue's num mappings: %d\n",
5497 hw->rsrcs.ldb_queues[args->qid].num_mappings);
5500 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5502 struct dlb2_unmap_qid_args *args,
5503 struct dlb2_cmd_response *resp,
5505 unsigned int vdev_id,
5506 struct dlb2_hw_domain **out_domain,
5507 struct dlb2_ldb_port **out_port,
5508 struct dlb2_ldb_queue **out_queue)
5510 enum dlb2_qid_map_state state;
5511 struct dlb2_hw_domain *domain;
5512 struct dlb2_ldb_queue *queue;
5513 struct dlb2_ldb_port *port;
5517 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5520 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5524 if (!domain->configured) {
5525 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5531 port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5533 if (!port || !port->configured) {
5534 resp->status = DLB2_ST_INVALID_PORT_ID;
5538 if (port->domain_id.phys_id != domain->id.phys_id) {
5539 resp->status = DLB2_ST_INVALID_PORT_ID;
5543 queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5545 if (!queue || !queue->configured) {
5546 DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5547 __func__, args->qid);
5548 resp->status = DLB2_ST_INVALID_QID;
5553 * Verify that the port has the queue mapped. From the application's
5554 * perspective a queue is mapped if it is actually mapped, the map is
5555 * in progress, or the map is blocked pending an unmap.
5557 state = DLB2_QUEUE_MAPPED;
5558 if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5561 state = DLB2_QUEUE_MAP_IN_PROG;
5562 if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5565 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5568 resp->status = DLB2_ST_INVALID_QID;
5572 *out_domain = domain;
5580 * dlb2_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
5581 * @hw: dlb2_hw handle for a particular device.
5582 * @domain_id: domain ID.
5583 * @args: unmap QID arguments.
5584 * @resp: response structure.
5585 * @vdev_req: indicates whether this request came from a vdev.
5586 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5588 * This function configures the DLB to stop scheduling QEs from the specified
5589 * queue to the specified port.
5591 * A successful return does not necessarily mean the mapping was removed. If
5592 * this function is unable to immediately unmap the queue from the port, it
5593 * will add the requested operation to a per-port list of pending map/unmap
5594 * operations, and (if it's not already running) launch a kernel thread that
5595 * periodically attempts to process all pending operations. See
5596 * dlb2_hw_map_qid() for more details.
5598 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5602 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5603 * assigned a detailed error code from enum dlb2_error.
5606 * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5607 * the domain is not configured.
5608 * EFAULT - Internal error (resp->status not set).
5610 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5612 struct dlb2_unmap_qid_args *args,
5613 struct dlb2_cmd_response *resp,
5615 unsigned int vdev_id)
5617 struct dlb2_hw_domain *domain;
5618 struct dlb2_ldb_queue *queue;
5619 enum dlb2_qid_map_state st;
5620 struct dlb2_ldb_port *port;
5621 bool unmap_complete;
5624 dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5627 * Verify that hardware resources are available before attempting to
5628 * satisfy the request. This simplifies the error unwinding code.
5630 ret = dlb2_verify_unmap_qid_args(hw,
5643 * If the queue hasn't been mapped yet, we need to update the slot's
5644 * state and re-enable the queue's inflights.
5646 st = DLB2_QUEUE_MAP_IN_PROG;
5647 if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5649 * Since the in-progress map was aborted, re-enable the QID's
5652 if (queue->num_pending_additions == 0)
5653 dlb2_ldb_queue_set_inflight_limit(hw, queue);
5655 st = DLB2_QUEUE_UNMAPPED;
5656 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5660 goto unmap_qid_done;
5664 * If the queue mapping is on hold pending an unmap, we simply need to
5665 * update the slot's state.
5667 if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5668 st = DLB2_QUEUE_UNMAP_IN_PROG;
5669 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5673 goto unmap_qid_done;
5676 st = DLB2_QUEUE_MAPPED;
5677 if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5679 "[%s()] Internal error: no available CQ slots\n",
5685 * QID->CQ mapping removal is an asynchronous procedure. It requires
5686 * stopping the DLB2 from scheduling this CQ, draining all inflights
5687 * from the CQ, then unmapping the queue from the CQ. This function
5688 * simply marks the port as needing the queue unmapped, and (if
5689 * necessary) starts the unmapping worker thread.
5691 dlb2_ldb_port_cq_disable(hw, port);
5693 st = DLB2_QUEUE_UNMAP_IN_PROG;
5694 ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5699 * Attempt to finish the unmapping now, in case the port has no
5700 * outstanding inflights. If that's not the case, this will fail and
5701 * the unmapping will be completed at a later time.
5703 unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
5706 * If the unmapping couldn't complete immediately, launch the worker
5707 * thread (if it isn't already launched) to finish it later.
5709 if (!unmap_complete && !os_worker_active(hw))
5710 os_schedule_work(hw);
5719 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
5720 struct dlb2_pending_port_unmaps_args *args,
5722 unsigned int vdev_id)
5724 DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
5726 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
5727 DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
5731 * dlb2_hw_pending_port_unmaps() - returns the number of unmap operations in
5733 * @hw: dlb2_hw handle for a particular device.
5734 * @domain_id: domain ID.
5735 * @args: number of unmaps in progress args
5736 * @resp: response structure.
5737 * @vdev_req: indicates whether this request came from a vdev.
5738 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5741 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5742 * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5743 * contains the number of unmaps in progress.
5746 * EINVAL - Invalid port ID.
5748 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
5750 struct dlb2_pending_port_unmaps_args *args,
5751 struct dlb2_cmd_response *resp,
5753 unsigned int vdev_id)
5755 struct dlb2_hw_domain *domain;
5756 struct dlb2_ldb_port *port;
5758 dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
5760 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5763 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5767 port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
5768 if (!port || !port->configured) {
5769 resp->status = DLB2_ST_INVALID_PORT_ID;
5773 resp->id = port->num_pending_removals;
5778 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
5780 struct dlb2_cmd_response *resp,
5782 unsigned int vdev_id,
5783 struct dlb2_hw_domain **out_domain)
5785 struct dlb2_hw_domain *domain;
5787 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5790 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5794 if (!domain->configured) {
5795 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5799 if (domain->started) {
5800 resp->status = DLB2_ST_DOMAIN_STARTED;
5804 *out_domain = domain;
5809 static void dlb2_log_start_domain(struct dlb2_hw *hw,
5812 unsigned int vdev_id)
5814 DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
5816 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5817 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5821 * dlb2_hw_start_domain() - start a scheduling domain
5822 * @hw: dlb2_hw handle for a particular device.
5823 * @domain_id: domain ID.
5824 * @arg: start domain arguments.
5825 * @resp: response structure.
5826 * @vdev_req: indicates whether this request came from a vdev.
5827 * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5829 * This function starts a scheduling domain, which allows applications to send
5830 * traffic through it. Once a domain is started, its resources can no longer be
5831 * configured (besides QID remapping and port enable/disable).
5833 * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5837 * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5838 * assigned a detailed error code from enum dlb2_error.
5841 * EINVAL - the domain is not configured, or the domain is already started.
5844 dlb2_hw_start_domain(struct dlb2_hw *hw,
5846 struct dlb2_start_domain_args *args,
5847 struct dlb2_cmd_response *resp,
5849 unsigned int vdev_id)
5851 struct dlb2_list_entry *iter;
5852 struct dlb2_dir_pq_pair *dir_queue;
5853 struct dlb2_ldb_queue *ldb_queue;
5854 struct dlb2_hw_domain *domain;
5859 dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
5861 ret = dlb2_verify_start_domain_args(hw,
5871 * Enable load-balanced and directed queue write permissions for the
5872 * queues this domain owns. Without this, the DLB2 will drop all
5873 * incoming traffic to those queues.
5875 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
5879 DLB2_BIT_SET(vasqid_v, DLB2_SYS_LDB_VASQID_V_VASQID_V);
5881 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
5882 ldb_queue->id.phys_id;
5884 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), vasqid_v);
5887 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
5891 DLB2_BIT_SET(vasqid_v, DLB2_SYS_DIR_VASQID_V_VASQID_V);
5893 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
5894 dir_queue->id.phys_id;
5896 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), vasqid_v);
5901 domain->started = true;