1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18 DLB2_LIST_HEAD((head), type, domain_list)
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21 DLB2_LIST_HEAD((head), type, func_list)
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24 DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27 DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33 DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
35 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
37 union dlb2_chp_cfg_chp_csr_ctrl r0;
39 r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
41 r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
43 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
46 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
48 union dlb2_chp_cfg_chp_csr_ctrl r0;
50 r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
52 r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
54 DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
58 * The PF driver cannot assume that a register write will affect subsequent HCW
59 * writes. To ensure a write completes, the driver must read back a CSR. This
60 * function only need be called for configuration that can occur after the
61 * domain has started; prior to starting, applications can't send HCWs.
63 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
65 DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
68 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
69 struct dlb2_dir_pq_pair *queue)
71 union dlb2_lsp_qid_dir_enqueue_cnt r0;
73 r0.val = DLB2_CSR_RD(hw,
74 DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
76 return r0.field.count;
79 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
80 struct dlb2_ldb_port *port)
82 union dlb2_lsp_cq_ldb_dsbl reg;
85 * Don't re-enable the port if a removal is pending. The caller should
86 * mark this port as enabled (if it isn't already), and when the
87 * removal completes the port will be enabled.
89 if (port->num_pending_removals)
92 reg.field.disabled = 0;
94 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
99 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
100 struct dlb2_ldb_port *port)
102 union dlb2_lsp_cq_ldb_dsbl reg;
104 reg.field.disabled = 1;
106 DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
111 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
112 struct dlb2_ldb_queue *queue)
114 union dlb2_lsp_qid_aqed_active_cnt r0;
115 union dlb2_lsp_qid_atm_active r1;
116 union dlb2_lsp_qid_ldb_enqueue_cnt r2;
118 r0.val = DLB2_CSR_RD(hw,
119 DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
120 r1.val = DLB2_CSR_RD(hw,
121 DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
123 r2.val = DLB2_CSR_RD(hw,
124 DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
126 return r0.field.count + r1.field.count + r2.field.count;
129 static struct dlb2_ldb_queue *
130 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
133 unsigned int vdev_id)
135 struct dlb2_list_entry *iter1;
136 struct dlb2_list_entry *iter2;
137 struct dlb2_function_resources *rsrcs;
138 struct dlb2_hw_domain *domain;
139 struct dlb2_ldb_queue *queue;
143 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
146 rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
149 return &hw->rsrcs.ldb_queues[id];
151 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
152 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
153 if (queue->id.virt_id == id)
157 DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
158 if (queue->id.virt_id == id)
164 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
167 unsigned int vdev_id)
169 struct dlb2_list_entry *iteration;
170 struct dlb2_function_resources *rsrcs;
171 struct dlb2_hw_domain *domain;
172 RTE_SET_USED(iteration);
174 if (id >= DLB2_MAX_NUM_DOMAINS)
178 return &hw->domains[id];
180 rsrcs = &hw->vdev[vdev_id];
182 DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
183 if (domain->id.virt_id == id)
189 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
190 struct dlb2_ldb_port *port,
191 struct dlb2_ldb_queue *queue,
193 enum dlb2_qid_map_state new_state)
195 enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
196 struct dlb2_hw_domain *domain;
199 domain_id = port->domain_id.phys_id;
201 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
202 if (domain == NULL) {
204 "[%s()] Internal error: unable to find domain %d\n",
205 __func__, domain_id);
209 switch (curr_state) {
210 case DLB2_QUEUE_UNMAPPED:
212 case DLB2_QUEUE_MAPPED:
213 queue->num_mappings++;
214 port->num_mappings++;
216 case DLB2_QUEUE_MAP_IN_PROG:
217 queue->num_pending_additions++;
218 domain->num_pending_additions++;
224 case DLB2_QUEUE_MAPPED:
226 case DLB2_QUEUE_UNMAPPED:
227 queue->num_mappings--;
228 port->num_mappings--;
230 case DLB2_QUEUE_UNMAP_IN_PROG:
231 port->num_pending_removals++;
232 domain->num_pending_removals++;
234 case DLB2_QUEUE_MAPPED:
235 /* Priority change, nothing to update */
241 case DLB2_QUEUE_MAP_IN_PROG:
243 case DLB2_QUEUE_UNMAPPED:
244 queue->num_pending_additions--;
245 domain->num_pending_additions--;
247 case DLB2_QUEUE_MAPPED:
248 queue->num_mappings++;
249 port->num_mappings++;
250 queue->num_pending_additions--;
251 domain->num_pending_additions--;
257 case DLB2_QUEUE_UNMAP_IN_PROG:
259 case DLB2_QUEUE_UNMAPPED:
260 port->num_pending_removals--;
261 domain->num_pending_removals--;
262 queue->num_mappings--;
263 port->num_mappings--;
265 case DLB2_QUEUE_MAPPED:
266 port->num_pending_removals--;
267 domain->num_pending_removals--;
269 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
270 /* Nothing to update */
276 case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
278 case DLB2_QUEUE_UNMAP_IN_PROG:
279 /* Nothing to update */
281 case DLB2_QUEUE_UNMAPPED:
283 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
284 * becomes UNMAPPED before it transitions to
287 queue->num_mappings--;
288 port->num_mappings--;
289 port->num_pending_removals--;
290 domain->num_pending_removals--;
300 port->qid_map[slot].state = new_state;
303 "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
304 __func__, queue->id.phys_id, port->id.phys_id,
305 curr_state, new_state);
310 "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
311 __func__, queue->id.phys_id, port->id.phys_id,
312 curr_state, new_state);
316 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
317 enum dlb2_qid_map_state state,
322 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
323 if (port->qid_map[i].state == state)
329 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
332 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
333 enum dlb2_qid_map_state state,
334 struct dlb2_ldb_queue *queue,
339 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
340 if (port->qid_map[i].state == state &&
341 port->qid_map[i].qid == queue->id.phys_id)
347 return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
351 * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
352 * their function names imply, and should only be called by the dynamic CQ
355 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
356 struct dlb2_hw_domain *domain,
357 struct dlb2_ldb_queue *queue)
359 struct dlb2_list_entry *iter;
360 struct dlb2_ldb_port *port;
364 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
365 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
366 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
368 if (!dlb2_port_find_slot_queue(port, state,
373 dlb2_ldb_port_cq_disable(hw, port);
378 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
379 struct dlb2_hw_domain *domain,
380 struct dlb2_ldb_queue *queue)
382 struct dlb2_list_entry *iter;
383 struct dlb2_ldb_port *port;
387 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
388 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
389 enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
391 if (!dlb2_port_find_slot_queue(port, state,
396 dlb2_ldb_port_cq_enable(hw, port);
401 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
402 struct dlb2_ldb_port *port,
405 union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
407 r0.field.cq = port->id.phys_id;
408 r0.field.qidix = slot;
410 r0.field.inflight_ok_v = 1;
412 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
417 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
418 struct dlb2_ldb_port *port,
421 union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
423 r0.field.cq = port->id.phys_id;
424 r0.field.qidix = slot;
426 r0.field.inflight_ok_v = 1;
428 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
433 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
434 struct dlb2_ldb_port *p,
435 struct dlb2_ldb_queue *q,
438 union dlb2_lsp_cq2priov r0;
439 union dlb2_lsp_cq2qid0 r1;
440 union dlb2_atm_qid2cqidix_00 r2;
441 union dlb2_lsp_qid2cqidix_00 r3;
442 union dlb2_lsp_qid2cqidix2_00 r4;
443 enum dlb2_qid_map_state state;
446 /* Look for a pending or already mapped slot, else an unused slot */
447 if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
448 !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
449 !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
451 "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
456 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
458 "[%s():%d] Internal error: port slot tracking failed\n",
463 /* Read-modify-write the priority and valid bit register */
464 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
466 r0.field.v |= 1 << i;
467 r0.field.prio |= (priority & 0x7) << i * 3;
469 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
471 /* Read-modify-write the QID map register */
473 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
475 r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
477 if (i == 0 || i == 4)
478 r1.field.qid_p0 = q->id.phys_id;
479 if (i == 1 || i == 5)
480 r1.field.qid_p1 = q->id.phys_id;
481 if (i == 2 || i == 6)
482 r1.field.qid_p2 = q->id.phys_id;
483 if (i == 3 || i == 7)
484 r1.field.qid_p3 = q->id.phys_id;
487 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
489 DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
491 r2.val = DLB2_CSR_RD(hw,
492 DLB2_ATM_QID2CQIDIX(q->id.phys_id,
495 r3.val = DLB2_CSR_RD(hw,
496 DLB2_LSP_QID2CQIDIX(q->id.phys_id,
499 r4.val = DLB2_CSR_RD(hw,
500 DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
503 switch (p->id.phys_id % 4) {
505 r2.field.cq_p0 |= 1 << i;
506 r3.field.cq_p0 |= 1 << i;
507 r4.field.cq_p0 |= 1 << i;
511 r2.field.cq_p1 |= 1 << i;
512 r3.field.cq_p1 |= 1 << i;
513 r4.field.cq_p1 |= 1 << i;
517 r2.field.cq_p2 |= 1 << i;
518 r3.field.cq_p2 |= 1 << i;
519 r4.field.cq_p2 |= 1 << i;
523 r2.field.cq_p3 |= 1 << i;
524 r3.field.cq_p3 |= 1 << i;
525 r4.field.cq_p3 |= 1 << i;
530 DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
534 DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
538 DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
543 p->qid_map[i].qid = q->id.phys_id;
544 p->qid_map[i].priority = priority;
546 state = DLB2_QUEUE_MAPPED;
548 return dlb2_port_slot_state_transition(hw, p, q, i, state);
551 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
552 struct dlb2_ldb_port *port,
553 struct dlb2_ldb_queue *queue,
556 union dlb2_lsp_qid_aqed_active_cnt r0;
557 union dlb2_lsp_qid_ldb_enqueue_cnt r1;
558 union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
560 /* Set the atomic scheduling haswork bit */
561 r0.val = DLB2_CSR_RD(hw,
562 DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
564 r2.field.cq = port->id.phys_id;
565 r2.field.qidix = slot;
567 r2.field.rlist_haswork_v = r0.field.count > 0;
569 /* Set the non-atomic scheduling haswork bit */
570 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
572 r1.val = DLB2_CSR_RD(hw,
573 DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
575 memset(&r2, 0, sizeof(r2));
577 r2.field.cq = port->id.phys_id;
578 r2.field.qidix = slot;
580 r2.field.nalb_haswork_v = (r1.field.count > 0);
582 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
589 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
590 struct dlb2_ldb_port *port,
593 union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
595 r2.field.cq = port->id.phys_id;
596 r2.field.qidix = slot;
598 r2.field.rlist_haswork_v = 1;
600 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
602 memset(&r2, 0, sizeof(r2));
604 r2.field.cq = port->id.phys_id;
605 r2.field.qidix = slot;
607 r2.field.nalb_haswork_v = 1;
609 DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
614 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
615 struct dlb2_ldb_queue *queue)
617 union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
619 r0.field.limit = queue->num_qid_inflights;
621 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
624 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
625 struct dlb2_ldb_queue *queue)
628 DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
629 DLB2_LSP_QID_LDB_INFL_LIM_RST);
632 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
633 struct dlb2_hw_domain *domain,
634 struct dlb2_ldb_port *port,
635 struct dlb2_ldb_queue *queue)
637 struct dlb2_list_entry *iter;
638 union dlb2_lsp_qid_ldb_infl_cnt r0;
639 enum dlb2_qid_map_state state;
644 r0.val = DLB2_CSR_RD(hw,
645 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
647 if (r0.field.count) {
649 "[%s()] Internal error: non-zero QID inflight count\n",
655 * Static map the port and set its corresponding has_work bits.
657 state = DLB2_QUEUE_MAP_IN_PROG;
658 if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
661 if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
663 "[%s():%d] Internal error: port slot tracking failed\n",
668 prio = port->qid_map[slot].priority;
671 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
672 * the port's qid_map state.
674 ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
678 ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
683 * Ensure IF_status(cq,qid) is 0 before enabling the port to
684 * prevent spurious schedules to cause the queue's inflight
687 dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
689 /* Reset the queue's inflight status */
690 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
691 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
692 state = DLB2_QUEUE_MAPPED;
693 if (!dlb2_port_find_slot_queue(port, state,
697 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
701 dlb2_ldb_queue_set_inflight_limit(hw, queue);
703 /* Re-enable CQs mapped to this queue */
704 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
706 /* If this queue has other mappings pending, clear its inflight limit */
707 if (queue->num_pending_additions > 0)
708 dlb2_ldb_queue_clear_inflight_limit(hw, queue);
714 * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
715 * @hw: dlb2_hw handle for a particular device.
716 * @port: load-balanced port
717 * @queue: load-balanced queue
718 * @priority: queue servicing priority
720 * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
721 * at a later point, and <0 if an error occurred.
723 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
724 struct dlb2_ldb_port *port,
725 struct dlb2_ldb_queue *queue,
728 union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
729 enum dlb2_qid_map_state state;
730 struct dlb2_hw_domain *domain;
731 int domain_id, slot, ret;
733 domain_id = port->domain_id.phys_id;
735 domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
736 if (domain == NULL) {
738 "[%s()] Internal error: unable to find domain %d\n",
739 __func__, port->domain_id.phys_id);
744 * Set the QID inflight limit to 0 to prevent further scheduling of the
747 DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
749 if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
751 "Internal error: No available unmapped slots\n");
755 if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
757 "[%s():%d] Internal error: port slot tracking failed\n",
762 port->qid_map[slot].qid = queue->id.phys_id;
763 port->qid_map[slot].priority = priority;
765 state = DLB2_QUEUE_MAP_IN_PROG;
766 ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
770 r0.val = DLB2_CSR_RD(hw,
771 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
773 if (r0.field.count) {
775 * The queue is owed completions so it's not safe to map it
776 * yet. Schedule a kernel thread to complete the mapping later,
777 * once software has completed all the queue's inflight events.
779 if (!os_worker_active(hw))
780 os_schedule_work(hw);
786 * Disable the affected CQ, and the CQs already mapped to the QID,
787 * before reading the QID's inflight count a second time. There is an
788 * unlikely race in which the QID may schedule one more QE after we
789 * read an inflight count of 0, and disabling the CQs guarantees that
790 * the race will not occur after a re-read of the inflight count
794 dlb2_ldb_port_cq_disable(hw, port);
796 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
798 r0.val = DLB2_CSR_RD(hw,
799 DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
801 if (r0.field.count) {
803 dlb2_ldb_port_cq_enable(hw, port);
805 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
808 * The queue is owed completions so it's not safe to map it
809 * yet. Schedule a kernel thread to complete the mapping later,
810 * once software has completed all the queue's inflight events.
812 if (!os_worker_active(hw))
813 os_schedule_work(hw);
818 return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
821 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
822 struct dlb2_hw_domain *domain,
823 struct dlb2_ldb_port *port)
827 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
828 union dlb2_lsp_qid_ldb_infl_cnt r0;
829 struct dlb2_ldb_queue *queue;
832 if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
835 qid = port->qid_map[i].qid;
837 queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
841 "[%s()] Internal error: unable to find queue %d\n",
846 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
852 * Disable the affected CQ, and the CQs already mapped to the
853 * QID, before reading the QID's inflight count a second time.
854 * There is an unlikely race in which the QID may schedule one
855 * more QE after we read an inflight count of 0, and disabling
856 * the CQs guarantees that the race will not occur after a
857 * re-read of the inflight count register.
860 dlb2_ldb_port_cq_disable(hw, port);
862 dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
864 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
866 if (r0.field.count) {
868 dlb2_ldb_port_cq_enable(hw, port);
870 dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
875 dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
880 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
881 struct dlb2_hw_domain *domain)
883 struct dlb2_list_entry *iter;
884 struct dlb2_ldb_port *port;
888 if (!domain->configured || domain->num_pending_additions == 0)
891 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
892 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
893 dlb2_domain_finish_map_port(hw, domain, port);
896 return domain->num_pending_additions;
899 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
900 struct dlb2_ldb_port *port,
901 struct dlb2_ldb_queue *queue)
903 enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
904 union dlb2_lsp_cq2priov r0;
905 union dlb2_atm_qid2cqidix_00 r1;
906 union dlb2_lsp_qid2cqidix_00 r2;
907 union dlb2_lsp_qid2cqidix2_00 r3;
912 /* Find the queue's slot */
913 mapped = DLB2_QUEUE_MAPPED;
914 in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
915 pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
917 if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
918 !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
919 !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
921 "[%s():%d] Internal error: QID %d isn't mapped\n",
922 __func__, __LINE__, queue->id.phys_id);
926 if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
928 "[%s():%d] Internal error: port slot tracking failed\n",
933 port_id = port->id.phys_id;
934 queue_id = queue->id.phys_id;
936 /* Read-modify-write the priority and valid bit register */
937 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
939 r0.field.v &= ~(1 << i);
941 DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
943 r1.val = DLB2_CSR_RD(hw,
944 DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
946 r2.val = DLB2_CSR_RD(hw,
947 DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
949 r3.val = DLB2_CSR_RD(hw,
950 DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
952 switch (port_id % 4) {
954 r1.field.cq_p0 &= ~(1 << i);
955 r2.field.cq_p0 &= ~(1 << i);
956 r3.field.cq_p0 &= ~(1 << i);
960 r1.field.cq_p1 &= ~(1 << i);
961 r2.field.cq_p1 &= ~(1 << i);
962 r3.field.cq_p1 &= ~(1 << i);
966 r1.field.cq_p2 &= ~(1 << i);
967 r2.field.cq_p2 &= ~(1 << i);
968 r3.field.cq_p2 &= ~(1 << i);
972 r1.field.cq_p3 &= ~(1 << i);
973 r2.field.cq_p3 &= ~(1 << i);
974 r3.field.cq_p3 &= ~(1 << i);
979 DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
983 DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
987 DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
992 unmapped = DLB2_QUEUE_UNMAPPED;
994 return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
997 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
998 struct dlb2_hw_domain *domain,
999 struct dlb2_ldb_port *port,
1000 struct dlb2_ldb_queue *queue,
1003 if (domain->started)
1004 return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
1006 return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
1010 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
1011 struct dlb2_hw_domain *domain,
1012 struct dlb2_ldb_port *port,
1015 enum dlb2_qid_map_state state;
1016 struct dlb2_ldb_queue *queue;
1018 queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
1020 state = port->qid_map[slot].state;
1022 /* Update the QID2CQIDX and CQ2QID vectors */
1023 dlb2_ldb_port_unmap_qid(hw, port, queue);
1026 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
1029 dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
1031 /* Reset the {CQ, slot} to its default state */
1032 dlb2_ldb_port_set_queue_if_status(hw, port, slot);
1034 /* Re-enable the CQ if it wasn't manually disabled by the user */
1036 dlb2_ldb_port_cq_enable(hw, port);
1039 * If there is a mapping that is pending this slot's removal, perform
1042 if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
1043 struct dlb2_ldb_port_qid_map *map;
1044 struct dlb2_ldb_queue *map_queue;
1047 map = &port->qid_map[slot];
1049 map->qid = map->pending_qid;
1050 map->priority = map->pending_priority;
1052 map_queue = &hw->rsrcs.ldb_queues[map->qid];
1053 prio = map->priority;
1055 dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
1059 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
1060 struct dlb2_hw_domain *domain,
1061 struct dlb2_ldb_port *port)
1063 union dlb2_lsp_cq_ldb_infl_cnt r0;
1066 if (port->num_pending_removals == 0)
1070 * The unmap requires all the CQ's outstanding inflights to be
1073 r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
1074 if (r0.field.count > 0)
1077 for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1078 struct dlb2_ldb_port_qid_map *map;
1080 map = &port->qid_map[i];
1082 if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
1083 map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
1086 dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
1093 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
1094 struct dlb2_hw_domain *domain)
1096 struct dlb2_list_entry *iter;
1097 struct dlb2_ldb_port *port;
1101 if (!domain->configured || domain->num_pending_removals == 0)
1104 for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1105 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
1106 dlb2_domain_finish_unmap_port(hw, domain, port);
1109 return domain->num_pending_removals;
1112 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
1116 /* Finish queue unmap jobs for any domain that needs it */
1117 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
1118 struct dlb2_hw_domain *domain = &hw->domains[i];
1120 num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
1126 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
1130 /* Finish queue map jobs for any domain that needs it */
1131 for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
1132 struct dlb2_hw_domain *domain = &hw->domains[i];
1134 num += dlb2_domain_finish_map_qid_procedures(hw, domain);
1140 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, unsigned int group_id)
1142 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1145 return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
1148 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw,
1149 unsigned int group_id)
1151 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1154 return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
1157 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
1158 unsigned int group_id,
1161 DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
1162 DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
1163 DLB2_HW_DBG(hw, "\tValue: %lu\n", val);
1166 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
1167 unsigned int group_id,
1170 u32 valid_allocations[] = {64, 128, 256, 512, 1024};
1171 union dlb2_ro_pipe_grp_sn_mode r0 = { {0} };
1172 struct dlb2_sn_group *group;
1175 if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
1178 group = &hw->rsrcs.sn_groups[group_id];
1181 * Once the first load-balanced queue using an SN group is configured,
1182 * the group cannot be changed.
1184 if (group->slot_use_bitmap != 0)
1187 for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
1188 if (val == valid_allocations[mode])
1191 if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
1195 group->sequence_numbers_per_queue = val;
1197 r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
1198 r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
1200 DLB2_CSR_WR(hw, DLB2_RO_PIPE_GRP_SN_MODE, r0.val);
1202 dlb2_log_set_group_sequence_numbers(hw, group_id, val);
1207 static struct dlb2_dir_pq_pair *
1208 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
1211 struct dlb2_hw_domain *domain)
1213 struct dlb2_list_entry *iter;
1214 struct dlb2_dir_pq_pair *port;
1217 if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
1220 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
1221 if ((!vdev_req && port->id.phys_id == id) ||
1222 (vdev_req && port->id.virt_id == id))
1228 static struct dlb2_ldb_queue *
1229 dlb2_get_domain_ldb_queue(u32 id,
1231 struct dlb2_hw_domain *domain)
1233 struct dlb2_list_entry *iter;
1234 struct dlb2_ldb_queue *queue;
1237 if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1240 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
1241 if ((!vdev_req && queue->id.phys_id == id) ||
1242 (vdev_req && queue->id.virt_id == id))
1248 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
1250 struct dlb2_cmd_response *resp,
1252 unsigned int vdev_id)
1254 struct dlb2_hw_domain *domain;
1256 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1258 if (domain == NULL) {
1259 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1263 if (!domain->configured) {
1264 resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
1268 if (domain->started) {
1269 resp->status = DLB2_ST_DOMAIN_STARTED;
1276 static void dlb2_log_start_domain(struct dlb2_hw *hw,
1279 unsigned int vdev_id)
1281 DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
1283 DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
1284 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
1288 * dlb2_hw_start_domain() - Lock the domain configuration
1289 * @hw: Contains the current state of the DLB2 hardware.
1290 * @domain_id: Domain ID
1291 * @arg: User-provided arguments (unused, here for ioctl callback template).
1292 * @resp: Response to user.
1293 * @vdev_req: Request came from a virtual device.
1294 * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
1296 * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
1297 * satisfy a request, resp->status will be set accordingly.
1300 dlb2_hw_start_domain(struct dlb2_hw *hw,
1302 struct dlb2_start_domain_args *arg,
1303 struct dlb2_cmd_response *resp,
1305 unsigned int vdev_id)
1307 struct dlb2_list_entry *iter;
1308 struct dlb2_dir_pq_pair *dir_queue;
1309 struct dlb2_ldb_queue *ldb_queue;
1310 struct dlb2_hw_domain *domain;
1315 dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
1317 ret = dlb2_verify_start_domain_args(hw,
1325 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1326 if (domain == NULL) {
1328 "[%s():%d] Internal error: domain not found\n",
1329 __func__, __LINE__);
1334 * Enable load-balanced and directed queue write permissions for the
1335 * queues this domain owns. Without this, the DLB2 will drop all
1336 * incoming traffic to those queues.
1338 DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
1339 union dlb2_sys_ldb_vasqid_v r0 = { {0} };
1342 r0.field.vasqid_v = 1;
1344 offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
1345 ldb_queue->id.phys_id;
1347 DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val);
1350 DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
1351 union dlb2_sys_dir_vasqid_v r0 = { {0} };
1354 r0.field.vasqid_v = 1;
1356 offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
1357 dir_queue->id.phys_id;
1359 DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
1364 domain->started = true;
1371 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
1377 DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
1379 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
1380 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
1381 DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
1384 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
1386 struct dlb2_get_dir_queue_depth_args *args,
1387 struct dlb2_cmd_response *resp,
1389 unsigned int vdev_id)
1391 struct dlb2_dir_pq_pair *queue;
1392 struct dlb2_hw_domain *domain;
1397 dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
1400 domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
1401 if (domain == NULL) {
1402 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1406 id = args->queue_id;
1408 queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
1409 if (queue == NULL) {
1410 resp->status = DLB2_ST_INVALID_QID;
1414 resp->id = dlb2_dir_queue_depth(hw, queue);
1419 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
1425 DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
1427 DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
1428 DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
1429 DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
1432 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
1434 struct dlb2_get_ldb_queue_depth_args *args,
1435 struct dlb2_cmd_response *resp,
1437 unsigned int vdev_id)
1439 struct dlb2_hw_domain *domain;
1440 struct dlb2_ldb_queue *queue;
1442 dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
1445 domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
1446 if (domain == NULL) {
1447 resp->status = DLB2_ST_INVALID_DOMAIN_ID;
1451 queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
1452 if (queue == NULL) {
1453 resp->status = DLB2_ST_INVALID_QID;
1457 resp->id = dlb2_ldb_queue_depth(hw, queue);