1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Broadcom
8 #include <rte_bitmap.h>
11 #include "bcmfs_logs.h"
12 #include "bcmfs_dev_msg.h"
13 #include "bcmfs_device.h"
14 #include "bcmfs_hw_defs.h"
15 #include "bcmfs_rm_common.h"
18 #define RING_VER_MAGIC 0x76303032
20 /* Per-Ring register offsets */
21 #define RING_VER 0x000
22 #define RING_BD_START_ADDRESS_LSB 0x004
23 #define RING_BD_READ_PTR 0x008
24 #define RING_BD_WRITE_PTR 0x00c
25 #define RING_BD_READ_PTR_DDR_LS 0x010
26 #define RING_BD_READ_PTR_DDR_MS 0x014
27 #define RING_CMPL_START_ADDR_LSB 0x018
28 #define RING_CMPL_WRITE_PTR 0x01c
29 #define RING_NUM_REQ_RECV_LS 0x020
30 #define RING_NUM_REQ_RECV_MS 0x024
31 #define RING_NUM_REQ_TRANS_LS 0x028
32 #define RING_NUM_REQ_TRANS_MS 0x02c
33 #define RING_NUM_REQ_OUTSTAND 0x030
34 #define RING_CONTROL 0x034
35 #define RING_FLUSH_DONE 0x038
36 #define RING_MSI_ADDR_LS 0x03c
37 #define RING_MSI_ADDR_MS 0x040
38 #define RING_MSI_CONTROL 0x048
39 #define RING_BD_READ_PTR_DDR_CONTROL 0x04c
40 #define RING_MSI_DATA_VALUE 0x064
41 #define RING_BD_START_ADDRESS_MSB 0x078
42 #define RING_CMPL_START_ADDR_MSB 0x07c
43 #define RING_DOORBELL_BD_WRITE_COUNT 0x074
45 /* Register RING_BD_START_ADDR fields */
46 #define BD_LAST_UPDATE_HW_SHIFT 28
47 #define BD_LAST_UPDATE_HW_MASK 0x1
48 #define BD_START_ADDR_VALUE(pa) \
49 ((uint32_t)((((uint64_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
50 #define BD_START_ADDR_DECODE(val) \
51 ((uint64_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
53 /* Register RING_CMPL_START_ADDR fields */
54 #define CMPL_START_ADDR_VALUE(pa) \
55 ((uint32_t)((((uint64_t)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
57 /* Register RING_CONTROL fields */
58 #define CONTROL_MASK_DISABLE_CONTROL 12
59 #define CONTROL_FLUSH_SHIFT 5
60 #define CONTROL_ACTIVE_SHIFT 4
61 #define CONTROL_RATE_ADAPT_MASK 0xf
62 #define CONTROL_RATE_DYNAMIC 0x0
63 #define CONTROL_RATE_FAST 0x8
64 #define CONTROL_RATE_MEDIUM 0x9
65 #define CONTROL_RATE_SLOW 0xa
66 #define CONTROL_RATE_IDLE 0xb
68 /* Register RING_FLUSH_DONE fields */
69 #define FLUSH_DONE_MASK 0x1
71 /* Register RING_MSI_CONTROL fields */
72 #define MSI_TIMER_VAL_SHIFT 16
73 #define MSI_TIMER_VAL_MASK 0xffff
74 #define MSI_ENABLE_SHIFT 15
75 #define MSI_ENABLE_MASK 0x1
76 #define MSI_COUNT_SHIFT 0
77 #define MSI_COUNT_MASK 0x3ff
79 /* Register RING_BD_READ_PTR_DDR_CONTROL fields */
80 #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
81 #define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
82 #define BD_READ_PTR_DDR_ENABLE_SHIFT 15
83 #define BD_READ_PTR_DDR_ENABLE_MASK 0x1
85 /* General descriptor format */
86 #define DESC_TYPE_SHIFT 60
87 #define DESC_TYPE_MASK 0xf
88 #define DESC_PAYLOAD_SHIFT 0
89 #define DESC_PAYLOAD_MASK 0x0fffffffffffffff
91 /* Null descriptor format */
93 #define NULL_TOGGLE_SHIFT 59
94 #define NULL_TOGGLE_MASK 0x1
96 /* Header descriptor format */
98 #define HEADER_TOGGLE_SHIFT 59
99 #define HEADER_TOGGLE_MASK 0x1
100 #define HEADER_ENDPKT_SHIFT 57
101 #define HEADER_ENDPKT_MASK 0x1
102 #define HEADER_STARTPKT_SHIFT 56
103 #define HEADER_STARTPKT_MASK 0x1
104 #define HEADER_BDCOUNT_SHIFT 36
105 #define HEADER_BDCOUNT_MASK 0x1f
106 #define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
107 #define HEADER_FLAGS_SHIFT 16
108 #define HEADER_FLAGS_MASK 0xffff
109 #define HEADER_OPAQUE_SHIFT 0
110 #define HEADER_OPAQUE_MASK 0xffff
112 /* Source (SRC) descriptor format */
115 #define SRC_LENGTH_SHIFT 44
116 #define SRC_LENGTH_MASK 0xffff
117 #define SRC_ADDR_SHIFT 0
118 #define SRC_ADDR_MASK 0x00000fffffffffff
120 /* Destination (DST) descriptor format */
122 #define DST_LENGTH_SHIFT 44
123 #define DST_LENGTH_MASK 0xffff
124 #define DST_ADDR_SHIFT 0
125 #define DST_ADDR_MASK 0x00000fffffffffff
127 /* Next pointer (NPTR) descriptor format */
129 #define NPTR_TOGGLE_SHIFT 59
130 #define NPTR_TOGGLE_MASK 0x1
131 #define NPTR_ADDR_SHIFT 0
132 #define NPTR_ADDR_MASK 0x00000fffffffffff
134 /* Mega source (MSRC) descriptor format */
136 #define MSRC_LENGTH_SHIFT 44
137 #define MSRC_LENGTH_MASK 0xffff
138 #define MSRC_ADDR_SHIFT 0
139 #define MSRC_ADDR_MASK 0x00000fffffffffff
141 /* Mega destination (MDST) descriptor format */
143 #define MDST_LENGTH_SHIFT 44
144 #define MDST_LENGTH_MASK 0xffff
145 #define MDST_ADDR_SHIFT 0
146 #define MDST_ADDR_MASK 0x00000fffffffffff
149 bcmfs5_is_next_table_desc(void *desc_ptr)
151 uint64_t desc = rm_read_desc(desc_ptr);
152 uint32_t type = FS_DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
154 return (type == NPTR_TYPE) ? true : false;
158 bcmfs5_next_table_desc(uint64_t next_addr)
160 return (rm_build_desc(NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
161 rm_build_desc(next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK));
165 bcmfs5_null_desc(void)
167 return rm_build_desc(NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
171 bcmfs5_header_desc(uint32_t startpkt, uint32_t endpkt,
172 uint32_t bdcount, uint32_t flags,
175 return (rm_build_desc(HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
176 rm_build_desc(startpkt, HEADER_STARTPKT_SHIFT,
177 HEADER_STARTPKT_MASK) |
178 rm_build_desc(endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK) |
179 rm_build_desc(bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK) |
180 rm_build_desc(flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK) |
181 rm_build_desc(opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK));
185 bcmfs5_enqueue_desc(uint32_t nhpos, uint32_t nhcnt,
186 uint32_t reqid, uint64_t desc,
187 void **desc_ptr, void *start_desc,
191 uint32_t nhavail, _startpkt, _endpkt, _bdcount;
195 * Each request or packet start with a HEADER descriptor followed
196 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
197 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
198 * following a HEADER descriptor is represented by BDCOUNT field
199 * of HEADER descriptor. The max value of BDCOUNT field is 31 which
200 * means we can only have 31 non-HEADER descriptors following one
203 * In general use, number of non-HEADER descriptors can easily go
204 * beyond 31. To tackle this situation, we have packet (or request)
205 * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
207 * To use packet extension, the first HEADER descriptor of request
208 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
209 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
210 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1.
213 if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
214 /* Prepare the header descriptor */
215 nhavail = (nhcnt - nhpos);
216 _startpkt = (nhpos == 0) ? 0x1 : 0x0;
217 _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
218 _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
219 nhavail : HEADER_BDCOUNT_MAX;
220 if (nhavail <= HEADER_BDCOUNT_MAX)
223 _bdcount = HEADER_BDCOUNT_MAX;
224 d = bcmfs5_header_desc(_startpkt, _endpkt,
225 _bdcount, 0x0, reqid);
227 /* Write header descriptor */
228 rm_write_desc(*desc_ptr, d);
230 /* Point to next descriptor */
231 *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
232 if (*desc_ptr == end_desc)
233 *desc_ptr = start_desc;
235 /* Skip next pointer descriptors */
236 while (bcmfs5_is_next_table_desc(*desc_ptr)) {
238 *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
239 if (*desc_ptr == end_desc)
240 *desc_ptr = start_desc;
244 /* Write desired descriptor */
245 rm_write_desc(*desc_ptr, desc);
247 /* Point to next descriptor */
248 *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
249 if (*desc_ptr == end_desc)
250 *desc_ptr = start_desc;
252 /* Skip next pointer descriptors */
253 while (bcmfs5_is_next_table_desc(*desc_ptr)) {
255 *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
256 if (*desc_ptr == end_desc)
257 *desc_ptr = start_desc;
264 bcmfs5_src_desc(uint64_t addr, unsigned int len)
266 return (rm_build_desc(SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
267 rm_build_desc(len, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK) |
268 rm_build_desc(addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK));
272 bcmfs5_msrc_desc(uint64_t addr, unsigned int len_div_16)
274 return (rm_build_desc(MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
275 rm_build_desc(len_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK) |
276 rm_build_desc(addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK));
280 bcmfs5_dst_desc(uint64_t addr, unsigned int len)
282 return (rm_build_desc(DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
283 rm_build_desc(len, DST_LENGTH_SHIFT, DST_LENGTH_MASK) |
284 rm_build_desc(addr, DST_ADDR_SHIFT, DST_ADDR_MASK));
288 bcmfs5_mdst_desc(uint64_t addr, unsigned int len_div_16)
290 return (rm_build_desc(MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
291 rm_build_desc(len_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK) |
292 rm_build_desc(addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK));
296 bcmfs5_sanity_check(struct bcmfs_qp_message *msg)
303 for (i = 0; i < msg->srcs_count; i++) {
304 if (msg->srcs_len[i] & 0xf) {
305 if (msg->srcs_len[i] > SRC_LENGTH_MASK)
308 if (msg->srcs_len[i] > (MSRC_LENGTH_MASK * 16))
312 for (i = 0; i < msg->dsts_count; i++) {
313 if (msg->dsts_len[i] & 0xf) {
314 if (msg->dsts_len[i] > DST_LENGTH_MASK)
317 if (msg->dsts_len[i] > (MDST_LENGTH_MASK * 16))
326 bcmfs5_enqueue_msg(struct bcmfs_queue *txq,
327 struct bcmfs_qp_message *msg,
328 uint32_t reqid, void *desc_ptr,
329 void *start_desc, void *end_desc)
332 unsigned int src, dst;
335 uint32_t nhcnt = msg->srcs_count + msg->dsts_count;
337 if (desc_ptr == NULL || start_desc == NULL || end_desc == NULL)
340 if (desc_ptr < start_desc || end_desc <= desc_ptr)
343 for (src = 0; src < msg->srcs_count; src++) {
344 if (msg->srcs_len[src] & 0xf)
345 d = bcmfs5_src_desc(msg->srcs_addr[src],
348 d = bcmfs5_msrc_desc(msg->srcs_addr[src],
349 msg->srcs_len[src] / 16);
351 nxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,
352 d, &desc_ptr, start_desc,
355 txq->descs_inflight++;
359 for (dst = 0; dst < msg->dsts_count; dst++) {
360 if (msg->dsts_len[dst] & 0xf)
361 d = bcmfs5_dst_desc(msg->dsts_addr[dst],
364 d = bcmfs5_mdst_desc(msg->dsts_addr[dst],
365 msg->dsts_len[dst] / 16);
367 nxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,
368 d, &desc_ptr, start_desc,
371 txq->descs_inflight++;
375 txq->descs_inflight += nhcnt + 1;
381 bcmfs5_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)
388 uint8_t exit_cleanup = false;
389 struct bcmfs_queue *txq = &qp->tx_q;
390 struct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;
392 /* Do sanity check on message */
393 if (!bcmfs5_sanity_check(msg)) {
394 BCMFS_DP_LOG(ERR, "Invalid msg on queue %d", qp->qpair_id);
398 /* Scan from the beginning */
399 __rte_bitmap_scan_init(qp->ctx_bmp);
400 /* Scan bitmap to get the free pool */
401 ret = rte_bitmap_scan(qp->ctx_bmp, &pos, &slab);
403 BCMFS_DP_LOG(ERR, "BD memory exhausted");
407 reqid = pos + __builtin_ctzll(slab);
408 rte_bitmap_clear(qp->ctx_bmp, reqid);
409 qp->ctx_pool[reqid] = (unsigned long)msg;
411 /* Write descriptors to ring */
412 next = bcmfs5_enqueue_msg(txq, msg, reqid,
413 (uint8_t *)txq->base_addr + txq->tx_write_ptr,
415 (uint8_t *)txq->base_addr + txq->queue_size);
417 BCMFS_DP_LOG(ERR, "Enqueue for desc failed on queue %d",
424 /* Save ring BD write offset */
425 txq->tx_write_ptr = (uint32_t)((uint8_t *)next -
426 (uint8_t *)txq->base_addr);
428 qp->nb_pending_requests++;
433 /* Cleanup if we failed */
435 rte_bitmap_set(qp->ctx_bmp, reqid);
440 static void bcmfs5_write_doorbell(struct bcmfs_qp *qp)
442 struct bcmfs_queue *txq = &qp->tx_q;
444 /* sync in bfeore ringing the door-bell */
447 FS_MMIO_WRITE32(txq->descs_inflight,
448 (uint8_t *)qp->ioreg + RING_DOORBELL_BD_WRITE_COUNT);
450 /* reset the count */
451 txq->descs_inflight = 0;
455 bcmfs5_dequeue_qp(struct bcmfs_qp *qp, void **ops, uint16_t budget)
461 unsigned long context = 0;
462 struct bcmfs_queue *hwq = &qp->cmpl_q;
463 uint32_t cmpl_read_offset, cmpl_write_offset;
466 * Check whether budget is valid, else set the budget to maximum
467 * so that all the available completions will be processed.
469 if (budget > qp->nb_pending_requests)
470 budget = qp->nb_pending_requests;
473 * Get current completion read and write offset
475 * Note: We should read completion write pointer at least once
476 * after we get a MSI interrupt because HW maintains internal
477 * MSI status which will allow next MSI interrupt only after
478 * completion write pointer is read.
480 cmpl_write_offset = FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
481 cmpl_write_offset *= FS_RING_DESC_SIZE;
482 cmpl_read_offset = hwq->cmpl_read_ptr;
484 /* read the ring cmpl write ptr before cmpl read offset */
487 /* For each completed request notify mailbox clients */
489 while ((cmpl_read_offset != cmpl_write_offset) && (budget > 0)) {
490 /* Dequeue next completion descriptor */
491 desc = *((uint64_t *)((uint8_t *)hwq->base_addr +
494 /* Next read offset */
495 cmpl_read_offset += FS_RING_DESC_SIZE;
496 if (cmpl_read_offset == FS_RING_CMPL_SIZE)
497 cmpl_read_offset = 0;
499 /* Decode error from completion descriptor */
500 err = rm_cmpl_desc_to_error(desc);
502 BCMFS_DP_LOG(ERR, "error desc rcvd");
504 /* Determine request id from completion descriptor */
505 reqid = rm_cmpl_desc_to_reqid(desc);
507 /* Retrieve context */
508 context = qp->ctx_pool[reqid];
510 BCMFS_DP_LOG(ERR, "HW error detected");
512 /* Release reqid for recycling */
513 qp->ctx_pool[reqid] = 0;
514 rte_bitmap_set(qp->ctx_bmp, reqid);
516 *ops = (void *)context;
518 /* Increment number of completions processed */
524 hwq->cmpl_read_ptr = cmpl_read_offset;
526 qp->nb_pending_requests -= count;
532 bcmfs5_start_qp(struct bcmfs_qp *qp)
535 uint64_t d, next_addr, msi;
537 uint32_t bd_high, bd_low, cmpl_high, cmpl_low;
538 struct bcmfs_queue *tx_queue = &qp->tx_q;
539 struct bcmfs_queue *cmpl_queue = &qp->cmpl_q;
541 /* Disable/deactivate ring */
542 FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
544 /* Configure next table pointer entries in BD memory */
545 for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) {
546 next_addr = off + FS_RING_DESC_SIZE;
547 if (next_addr == tx_queue->queue_size)
549 next_addr += (uint64_t)tx_queue->base_phys_addr;
550 if (FS_RING_BD_ALIGN_CHECK(next_addr))
551 d = bcmfs5_next_table_desc(next_addr);
553 d = bcmfs5_null_desc();
554 rm_write_desc((uint8_t *)tx_queue->base_addr + off, d);
558 * If user interrupt the test in between the run(Ctrl+C), then all
559 * subsequent test run will fail because sw cmpl_read_offset and hw
560 * cmpl_write_offset will be pointing at different completion BD. To
561 * handle this we should flush all the rings in the startup instead
562 * of shutdown function.
563 * Ring flush will reset hw cmpl_write_offset.
566 /* Set ring flush state */
568 FS_MMIO_WRITE32(BIT(CONTROL_FLUSH_SHIFT),
569 (uint8_t *)qp->ioreg + RING_CONTROL);
572 * If previous test is stopped in between the run, then
573 * sw has to read cmpl_write_offset else DME/AE will be not
574 * come out of flush state.
576 FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
578 if (FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
584 BCMFS_DP_LOG(ERR, "Ring flush timeout hw-queue %d",
588 /* Clear ring flush state */
590 FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
592 if (!(FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
598 BCMFS_DP_LOG(ERR, "Ring clear flush timeout hw-queue %d",
602 /* Program BD start address */
603 bd_low = lower_32_bits(tx_queue->base_phys_addr);
604 bd_high = upper_32_bits(tx_queue->base_phys_addr);
605 FS_MMIO_WRITE32(bd_low, (uint8_t *)qp->ioreg +
606 RING_BD_START_ADDRESS_LSB);
607 FS_MMIO_WRITE32(bd_high, (uint8_t *)qp->ioreg +
608 RING_BD_START_ADDRESS_MSB);
610 tx_queue->tx_write_ptr = 0;
612 for (off = 0; off < FS_RING_CMPL_SIZE; off += FS_RING_DESC_SIZE)
613 rm_write_desc((uint8_t *)cmpl_queue->base_addr + off, 0x0);
615 /* Completion read pointer will be same as HW write pointer */
616 cmpl_queue->cmpl_read_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +
617 RING_CMPL_WRITE_PTR);
618 /* Program completion start address */
619 cmpl_low = lower_32_bits(cmpl_queue->base_phys_addr);
620 cmpl_high = upper_32_bits(cmpl_queue->base_phys_addr);
621 FS_MMIO_WRITE32(cmpl_low, (uint8_t *)qp->ioreg +
622 RING_CMPL_START_ADDR_LSB);
623 FS_MMIO_WRITE32(cmpl_high, (uint8_t *)qp->ioreg +
624 RING_CMPL_START_ADDR_MSB);
626 cmpl_queue->cmpl_read_ptr *= FS_RING_DESC_SIZE;
628 /* Read ring Tx, Rx, and Outstanding counts to clear */
629 FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_LS);
630 FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_MS);
631 FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_LS);
632 FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_MS);
633 FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_OUTSTAND);
635 /* Configure per-Ring MSI registers with dummy location */
636 msi = cmpl_queue->base_phys_addr + (1024 * FS_RING_DESC_SIZE);
637 FS_MMIO_WRITE32((msi & 0xFFFFFFFF),
638 (uint8_t *)qp->ioreg + RING_MSI_ADDR_LS);
639 FS_MMIO_WRITE32(((msi >> 32) & 0xFFFFFFFF),
640 (uint8_t *)qp->ioreg + RING_MSI_ADDR_MS);
641 FS_MMIO_WRITE32(qp->qpair_id, (uint8_t *)qp->ioreg +
642 RING_MSI_DATA_VALUE);
644 /* Configure RING_MSI_CONTROL */
646 val |= (MSI_TIMER_VAL_MASK << MSI_TIMER_VAL_SHIFT);
647 val |= BIT(MSI_ENABLE_SHIFT);
648 val |= (0x1 & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
649 FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_MSI_CONTROL);
651 /* Enable/activate ring */
652 val = BIT(CONTROL_ACTIVE_SHIFT);
653 FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CONTROL);
659 bcmfs5_shutdown_qp(struct bcmfs_qp *qp)
661 /* Disable/deactivate ring */
662 FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
665 struct bcmfs_hw_queue_pair_ops bcmfs5_qp_ops = {
667 .enq_one_req = bcmfs5_enqueue_single_request_qp,
668 .ring_db = bcmfs5_write_doorbell,
669 .dequeue = bcmfs5_dequeue_qp,
670 .startq = bcmfs5_start_qp,
671 .stopq = bcmfs5_shutdown_qp,
674 RTE_INIT(bcmfs5_register_qp_ops)
676 bcmfs_hw_queue_pair_register_ops(&bcmfs5_qp_ops);