net/iavf: enable IRQ mapping configuration for large VF
[dpdk.git] / drivers / crypto / bcmfs / hw / bcmfs5_rm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_bitmap.h>
9
10 #include "bcmfs_qp.h"
11 #include "bcmfs_logs.h"
12 #include "bcmfs_dev_msg.h"
13 #include "bcmfs_device.h"
14 #include "bcmfs_hw_defs.h"
15 #include "bcmfs_rm_common.h"
16
17 /* Ring version */
18 #define RING_VER_MAGIC                                  0x76303032
19
20 /* Per-Ring register offsets */
21 #define RING_VER                                        0x000
22 #define RING_BD_START_ADDRESS_LSB                       0x004
23 #define RING_BD_READ_PTR                                0x008
24 #define RING_BD_WRITE_PTR                               0x00c
25 #define RING_BD_READ_PTR_DDR_LS                         0x010
26 #define RING_BD_READ_PTR_DDR_MS                         0x014
27 #define RING_CMPL_START_ADDR_LSB                        0x018
28 #define RING_CMPL_WRITE_PTR                             0x01c
29 #define RING_NUM_REQ_RECV_LS                            0x020
30 #define RING_NUM_REQ_RECV_MS                            0x024
31 #define RING_NUM_REQ_TRANS_LS                           0x028
32 #define RING_NUM_REQ_TRANS_MS                           0x02c
33 #define RING_NUM_REQ_OUTSTAND                           0x030
34 #define RING_CONTROL                                    0x034
35 #define RING_FLUSH_DONE                                 0x038
36 #define RING_MSI_ADDR_LS                                0x03c
37 #define RING_MSI_ADDR_MS                                0x040
38 #define RING_MSI_CONTROL                                0x048
39 #define RING_BD_READ_PTR_DDR_CONTROL                    0x04c
40 #define RING_MSI_DATA_VALUE                             0x064
41 #define RING_BD_START_ADDRESS_MSB                       0x078
42 #define RING_CMPL_START_ADDR_MSB                        0x07c
43 #define RING_DOORBELL_BD_WRITE_COUNT                    0x074
44
45 /* Register RING_BD_START_ADDR fields */
46 #define BD_LAST_UPDATE_HW_SHIFT                         28
47 #define BD_LAST_UPDATE_HW_MASK                          0x1
48 #define BD_START_ADDR_VALUE(pa)                         \
49         ((uint32_t)((((uint64_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
50 #define BD_START_ADDR_DECODE(val)                       \
51         ((uint64_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
52
53 /* Register RING_CMPL_START_ADDR fields */
54 #define CMPL_START_ADDR_VALUE(pa)                       \
55         ((uint32_t)((((uint64_t)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
56
57 /* Register RING_CONTROL fields */
58 #define CONTROL_MASK_DISABLE_CONTROL                    12
59 #define CONTROL_FLUSH_SHIFT                             5
60 #define CONTROL_ACTIVE_SHIFT                            4
61 #define CONTROL_RATE_ADAPT_MASK                         0xf
62 #define CONTROL_RATE_DYNAMIC                            0x0
63 #define CONTROL_RATE_FAST                               0x8
64 #define CONTROL_RATE_MEDIUM                             0x9
65 #define CONTROL_RATE_SLOW                               0xa
66 #define CONTROL_RATE_IDLE                               0xb
67
68 /* Register RING_FLUSH_DONE fields */
69 #define FLUSH_DONE_MASK                                 0x1
70
71 /* Register RING_MSI_CONTROL fields */
72 #define MSI_TIMER_VAL_SHIFT                             16
73 #define MSI_TIMER_VAL_MASK                              0xffff
74 #define MSI_ENABLE_SHIFT                                15
75 #define MSI_ENABLE_MASK                                 0x1
76 #define MSI_COUNT_SHIFT                                 0
77 #define MSI_COUNT_MASK                                  0x3ff
78
79 /* Register RING_BD_READ_PTR_DDR_CONTROL fields */
80 #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT                 16
81 #define BD_READ_PTR_DDR_TIMER_VAL_MASK                  0xffff
82 #define BD_READ_PTR_DDR_ENABLE_SHIFT                    15
83 #define BD_READ_PTR_DDR_ENABLE_MASK                     0x1
84
85 /* General descriptor format */
86 #define DESC_TYPE_SHIFT                                 60
87 #define DESC_TYPE_MASK                                  0xf
88 #define DESC_PAYLOAD_SHIFT                              0
89 #define DESC_PAYLOAD_MASK                               0x0fffffffffffffff
90
91 /* Null descriptor format  */
92 #define NULL_TYPE                                       0
93 #define NULL_TOGGLE_SHIFT                               59
94 #define NULL_TOGGLE_MASK                                0x1
95
96 /* Header descriptor format */
97 #define HEADER_TYPE                                     1
98 #define HEADER_TOGGLE_SHIFT                             59
99 #define HEADER_TOGGLE_MASK                              0x1
100 #define HEADER_ENDPKT_SHIFT                             57
101 #define HEADER_ENDPKT_MASK                              0x1
102 #define HEADER_STARTPKT_SHIFT                           56
103 #define HEADER_STARTPKT_MASK                            0x1
104 #define HEADER_BDCOUNT_SHIFT                            36
105 #define HEADER_BDCOUNT_MASK                             0x1f
106 #define HEADER_BDCOUNT_MAX                              HEADER_BDCOUNT_MASK
107 #define HEADER_FLAGS_SHIFT                              16
108 #define HEADER_FLAGS_MASK                               0xffff
109 #define HEADER_OPAQUE_SHIFT                             0
110 #define HEADER_OPAQUE_MASK                              0xffff
111
112 /* Source (SRC) descriptor format */
113
114 #define SRC_TYPE                                        2
115 #define SRC_LENGTH_SHIFT                                44
116 #define SRC_LENGTH_MASK                                 0xffff
117 #define SRC_ADDR_SHIFT                                  0
118 #define SRC_ADDR_MASK                                   0x00000fffffffffff
119
120 /* Destination (DST) descriptor format */
121 #define DST_TYPE                                        3
122 #define DST_LENGTH_SHIFT                                44
123 #define DST_LENGTH_MASK                                 0xffff
124 #define DST_ADDR_SHIFT                                  0
125 #define DST_ADDR_MASK                                   0x00000fffffffffff
126
127 /* Next pointer (NPTR) descriptor format */
128 #define NPTR_TYPE                                       5
129 #define NPTR_TOGGLE_SHIFT                               59
130 #define NPTR_TOGGLE_MASK                                0x1
131 #define NPTR_ADDR_SHIFT                                 0
132 #define NPTR_ADDR_MASK                                  0x00000fffffffffff
133
134 /* Mega source (MSRC) descriptor format */
135 #define MSRC_TYPE                                       6
136 #define MSRC_LENGTH_SHIFT                               44
137 #define MSRC_LENGTH_MASK                                0xffff
138 #define MSRC_ADDR_SHIFT                                 0
139 #define MSRC_ADDR_MASK                                  0x00000fffffffffff
140
141 /* Mega destination (MDST) descriptor format */
142 #define MDST_TYPE                                       7
143 #define MDST_LENGTH_SHIFT                               44
144 #define MDST_LENGTH_MASK                                0xffff
145 #define MDST_ADDR_SHIFT                                 0
146 #define MDST_ADDR_MASK                                  0x00000fffffffffff
147
148 static uint8_t
149 bcmfs5_is_next_table_desc(void *desc_ptr)
150 {
151         uint64_t desc = rm_read_desc(desc_ptr);
152         uint32_t type = FS_DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
153
154         return (type == NPTR_TYPE) ? true : false;
155 }
156
157 static uint64_t
158 bcmfs5_next_table_desc(uint64_t next_addr)
159 {
160         return (rm_build_desc(NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
161                 rm_build_desc(next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK));
162 }
163
164 static uint64_t
165 bcmfs5_null_desc(void)
166 {
167         return rm_build_desc(NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
168 }
169
170 static uint64_t
171 bcmfs5_header_desc(uint32_t startpkt, uint32_t endpkt,
172                                        uint32_t bdcount, uint32_t flags,
173                                        uint32_t opaque)
174 {
175         return (rm_build_desc(HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
176                 rm_build_desc(startpkt, HEADER_STARTPKT_SHIFT,
177                               HEADER_STARTPKT_MASK) |
178                 rm_build_desc(endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK) |
179                 rm_build_desc(bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK) |
180                 rm_build_desc(flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK) |
181                 rm_build_desc(opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK));
182 }
183
184 static int
185 bcmfs5_enqueue_desc(uint32_t nhpos, uint32_t nhcnt,
186                     uint32_t reqid, uint64_t desc,
187                     void **desc_ptr, void *start_desc,
188                     void *end_desc)
189 {
190         uint64_t d;
191         uint32_t nhavail, _startpkt, _endpkt, _bdcount;
192         int is_nxt_page = 0;
193
194         /*
195          * Each request or packet start with a HEADER descriptor followed
196          * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
197          * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
198          * following a HEADER descriptor is represented by BDCOUNT field
199          * of HEADER descriptor. The max value of BDCOUNT field is 31 which
200          * means we can only have 31 non-HEADER descriptors following one
201          * HEADER descriptor.
202          *
203          * In general use, number of non-HEADER descriptors can easily go
204          * beyond 31. To tackle this situation, we have packet (or request)
205          * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
206          *
207          * To use packet extension, the first HEADER descriptor of request
208          * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
209          * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
210          * HEADER descriptor will have STARTPKT=0 and ENDPKT=1.
211          */
212
213         if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
214                 /* Prepare the header descriptor */
215                 nhavail = (nhcnt - nhpos);
216                 _startpkt = (nhpos == 0) ? 0x1 : 0x0;
217                 _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
218                 _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
219                                 nhavail : HEADER_BDCOUNT_MAX;
220                 if (nhavail <= HEADER_BDCOUNT_MAX)
221                         _bdcount = nhavail;
222                 else
223                         _bdcount = HEADER_BDCOUNT_MAX;
224                 d = bcmfs5_header_desc(_startpkt, _endpkt,
225                                        _bdcount, 0x0, reqid);
226
227                 /* Write header descriptor */
228                 rm_write_desc(*desc_ptr, d);
229
230                 /* Point to next descriptor */
231                 *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
232                 if (*desc_ptr == end_desc)
233                         *desc_ptr = start_desc;
234
235                 /* Skip next pointer descriptors */
236                 while (bcmfs5_is_next_table_desc(*desc_ptr)) {
237                         is_nxt_page = 1;
238                         *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
239                         if (*desc_ptr == end_desc)
240                                 *desc_ptr = start_desc;
241                 }
242         }
243
244         /* Write desired descriptor */
245         rm_write_desc(*desc_ptr, desc);
246
247         /* Point to next descriptor */
248         *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
249         if (*desc_ptr == end_desc)
250                 *desc_ptr = start_desc;
251
252         /* Skip next pointer descriptors */
253         while (bcmfs5_is_next_table_desc(*desc_ptr)) {
254                 is_nxt_page = 1;
255                 *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
256                 if (*desc_ptr == end_desc)
257                         *desc_ptr = start_desc;
258         }
259
260         return is_nxt_page;
261 }
262
263 static uint64_t
264 bcmfs5_src_desc(uint64_t addr, unsigned int len)
265 {
266         return (rm_build_desc(SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
267                 rm_build_desc(len, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK) |
268                 rm_build_desc(addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK));
269 }
270
271 static uint64_t
272 bcmfs5_msrc_desc(uint64_t addr, unsigned int len_div_16)
273 {
274         return (rm_build_desc(MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
275                 rm_build_desc(len_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK) |
276                 rm_build_desc(addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK));
277 }
278
279 static uint64_t
280 bcmfs5_dst_desc(uint64_t addr, unsigned int len)
281 {
282         return (rm_build_desc(DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
283                 rm_build_desc(len, DST_LENGTH_SHIFT, DST_LENGTH_MASK) |
284                 rm_build_desc(addr, DST_ADDR_SHIFT, DST_ADDR_MASK));
285 }
286
287 static uint64_t
288 bcmfs5_mdst_desc(uint64_t addr, unsigned int len_div_16)
289 {
290         return (rm_build_desc(MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
291                 rm_build_desc(len_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK) |
292                 rm_build_desc(addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK));
293 }
294
295 static bool
296 bcmfs5_sanity_check(struct bcmfs_qp_message *msg)
297 {
298         unsigned int i = 0;
299
300         if (msg == NULL)
301                 return false;
302
303         for (i = 0; i <  msg->srcs_count; i++) {
304                 if (msg->srcs_len[i] & 0xf) {
305                         if (msg->srcs_len[i] > SRC_LENGTH_MASK)
306                                 return false;
307                 } else {
308                         if (msg->srcs_len[i] > (MSRC_LENGTH_MASK * 16))
309                                 return false;
310                 }
311         }
312         for (i = 0; i <  msg->dsts_count; i++) {
313                 if (msg->dsts_len[i] & 0xf) {
314                         if (msg->dsts_len[i] > DST_LENGTH_MASK)
315                                 return false;
316                 } else {
317                         if (msg->dsts_len[i] > (MDST_LENGTH_MASK * 16))
318                                 return false;
319                 }
320         }
321
322         return true;
323 }
324
325 static void *
326 bcmfs5_enqueue_msg(struct bcmfs_queue *txq,
327                    struct bcmfs_qp_message *msg,
328                    uint32_t reqid, void *desc_ptr,
329                    void *start_desc, void *end_desc)
330 {
331         uint64_t d;
332         unsigned int src, dst;
333         uint32_t nhpos = 0;
334         int nxt_page = 0;
335         uint32_t nhcnt = msg->srcs_count + msg->dsts_count;
336
337         if (desc_ptr == NULL || start_desc == NULL || end_desc == NULL)
338                 return NULL;
339
340         if (desc_ptr < start_desc || end_desc <= desc_ptr)
341                 return NULL;
342
343         for (src = 0; src < msg->srcs_count; src++) {
344                 if (msg->srcs_len[src] & 0xf)
345                         d = bcmfs5_src_desc(msg->srcs_addr[src],
346                                             msg->srcs_len[src]);
347                 else
348                         d = bcmfs5_msrc_desc(msg->srcs_addr[src],
349                                              msg->srcs_len[src] / 16);
350
351                 nxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,
352                                                d, &desc_ptr, start_desc,
353                                                end_desc);
354                 if (nxt_page)
355                         txq->descs_inflight++;
356                 nhpos++;
357         }
358
359         for (dst = 0; dst < msg->dsts_count; dst++) {
360                 if (msg->dsts_len[dst] & 0xf)
361                         d = bcmfs5_dst_desc(msg->dsts_addr[dst],
362                                             msg->dsts_len[dst]);
363                 else
364                         d = bcmfs5_mdst_desc(msg->dsts_addr[dst],
365                                              msg->dsts_len[dst] / 16);
366
367                 nxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,
368                                                d, &desc_ptr, start_desc,
369                                                end_desc);
370                 if (nxt_page)
371                         txq->descs_inflight++;
372                 nhpos++;
373         }
374
375         txq->descs_inflight += nhcnt + 1;
376
377         return desc_ptr;
378 }
379
380 static int
381 bcmfs5_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)
382 {
383         void *next;
384         int reqid;
385         int ret = 0;
386         uint64_t slab = 0;
387         uint32_t pos = 0;
388         uint8_t exit_cleanup = false;
389         struct bcmfs_queue *txq = &qp->tx_q;
390         struct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;
391
392         /* Do sanity check on message */
393         if (!bcmfs5_sanity_check(msg)) {
394                 BCMFS_DP_LOG(ERR, "Invalid msg on queue %d", qp->qpair_id);
395                 return -EIO;
396         }
397
398         /* Scan from the beginning */
399         __rte_bitmap_scan_init(qp->ctx_bmp);
400         /* Scan bitmap to get the free pool */
401         ret = rte_bitmap_scan(qp->ctx_bmp, &pos, &slab);
402         if (ret == 0) {
403                 BCMFS_DP_LOG(ERR, "BD memory exhausted");
404                 return -ERANGE;
405         }
406
407         reqid = pos + __builtin_ctzll(slab);
408         rte_bitmap_clear(qp->ctx_bmp, reqid);
409         qp->ctx_pool[reqid] = (unsigned long)msg;
410
411         /* Write descriptors to ring */
412         next = bcmfs5_enqueue_msg(txq, msg, reqid,
413                                   (uint8_t *)txq->base_addr + txq->tx_write_ptr,
414                                   txq->base_addr,
415                                   (uint8_t *)txq->base_addr + txq->queue_size);
416         if (next == NULL) {
417                 BCMFS_DP_LOG(ERR, "Enqueue for desc failed on queue %d",
418                              qp->qpair_id);
419                 ret = -EINVAL;
420                 exit_cleanup = true;
421                 goto exit;
422         }
423
424         /* Save ring BD write offset */
425         txq->tx_write_ptr = (uint32_t)((uint8_t *)next -
426                                        (uint8_t *)txq->base_addr);
427
428         qp->nb_pending_requests++;
429
430         return 0;
431
432 exit:
433         /* Cleanup if we failed */
434         if (exit_cleanup)
435                 rte_bitmap_set(qp->ctx_bmp, reqid);
436
437         return ret;
438 }
439
440 static void bcmfs5_write_doorbell(struct bcmfs_qp *qp)
441 {
442         struct bcmfs_queue *txq = &qp->tx_q;
443
444         /* sync in bfeore ringing the door-bell */
445         rte_wmb();
446
447         FS_MMIO_WRITE32(txq->descs_inflight,
448                         (uint8_t *)qp->ioreg + RING_DOORBELL_BD_WRITE_COUNT);
449
450         /* reset the count */
451         txq->descs_inflight = 0;
452 }
453
454 static uint16_t
455 bcmfs5_dequeue_qp(struct bcmfs_qp *qp, void **ops, uint16_t budget)
456 {
457         int err;
458         uint16_t reqid;
459         uint64_t desc;
460         uint16_t count = 0;
461         unsigned long context = 0;
462         struct bcmfs_queue *hwq = &qp->cmpl_q;
463         uint32_t cmpl_read_offset, cmpl_write_offset;
464
465         /*
466          * Check whether budget is valid, else set the budget to maximum
467          * so that all the available completions will be processed.
468          */
469         if (budget > qp->nb_pending_requests)
470                 budget =  qp->nb_pending_requests;
471
472         /*
473          * Get current completion read and write offset
474          *
475          * Note: We should read completion write pointer at least once
476          * after we get a MSI interrupt because HW maintains internal
477          * MSI status which will allow next MSI interrupt only after
478          * completion write pointer is read.
479          */
480         cmpl_write_offset = FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
481         cmpl_write_offset *= FS_RING_DESC_SIZE;
482         cmpl_read_offset = hwq->cmpl_read_ptr;
483
484         /* read the ring cmpl write ptr before cmpl read offset */
485         rte_io_rmb();
486
487         /* For each completed request notify mailbox clients */
488         reqid = 0;
489         while ((cmpl_read_offset != cmpl_write_offset) && (budget > 0)) {
490                 /* Dequeue next completion descriptor */
491                 desc = *((uint64_t *)((uint8_t *)hwq->base_addr +
492                                       cmpl_read_offset));
493
494                 /* Next read offset */
495                 cmpl_read_offset += FS_RING_DESC_SIZE;
496                 if (cmpl_read_offset == FS_RING_CMPL_SIZE)
497                         cmpl_read_offset = 0;
498
499                 /* Decode error from completion descriptor */
500                 err = rm_cmpl_desc_to_error(desc);
501                 if (err < 0)
502                         BCMFS_DP_LOG(ERR, "error desc rcvd");
503
504                 /* Determine request id from completion descriptor */
505                 reqid = rm_cmpl_desc_to_reqid(desc);
506
507                 /* Retrieve context */
508                 context = qp->ctx_pool[reqid];
509                 if (context == 0)
510                         BCMFS_DP_LOG(ERR, "HW error detected");
511
512                 /* Release reqid for recycling */
513                 qp->ctx_pool[reqid] = 0;
514                 rte_bitmap_set(qp->ctx_bmp, reqid);
515
516                 *ops = (void *)context;
517
518                 /* Increment number of completions processed */
519                 count++;
520                 budget--;
521                 ops++;
522         }
523
524         hwq->cmpl_read_ptr = cmpl_read_offset;
525
526         qp->nb_pending_requests -= count;
527
528         return count;
529 }
530
531 static int
532 bcmfs5_start_qp(struct bcmfs_qp *qp)
533 {
534         uint32_t val, off;
535         uint64_t d, next_addr, msi;
536         int timeout;
537         uint32_t bd_high, bd_low, cmpl_high, cmpl_low;
538         struct bcmfs_queue *tx_queue = &qp->tx_q;
539         struct bcmfs_queue *cmpl_queue = &qp->cmpl_q;
540
541         /* Disable/deactivate ring */
542         FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
543
544         /* Configure next table pointer entries in BD memory */
545         for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) {
546                 next_addr = off + FS_RING_DESC_SIZE;
547                 if (next_addr == tx_queue->queue_size)
548                         next_addr = 0;
549                 next_addr += (uint64_t)tx_queue->base_phys_addr;
550                 if (FS_RING_BD_ALIGN_CHECK(next_addr))
551                         d = bcmfs5_next_table_desc(next_addr);
552                 else
553                         d = bcmfs5_null_desc();
554                 rm_write_desc((uint8_t *)tx_queue->base_addr + off, d);
555         }
556
557         /*
558          * If user interrupt the test in between the run(Ctrl+C), then all
559          * subsequent test run will fail because sw cmpl_read_offset and hw
560          * cmpl_write_offset will be pointing at different completion BD. To
561          * handle this we should flush all the rings in the startup instead
562          * of shutdown function.
563          * Ring flush will reset hw cmpl_write_offset.
564          */
565
566         /* Set ring flush state */
567         timeout = 1000;
568         FS_MMIO_WRITE32(BIT(CONTROL_FLUSH_SHIFT),
569                         (uint8_t *)qp->ioreg + RING_CONTROL);
570         do {
571                 /*
572                  * If previous test is stopped in between the run, then
573                  * sw has to read cmpl_write_offset else DME/AE will be not
574                  * come out of flush state.
575                  */
576                 FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
577
578                 if (FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
579                                    FLUSH_DONE_MASK)
580                         break;
581                 usleep(1000);
582         } while (--timeout);
583         if (!timeout) {
584                 BCMFS_DP_LOG(ERR, "Ring flush timeout hw-queue %d",
585                              qp->qpair_id);
586         }
587
588         /* Clear ring flush state */
589         timeout = 1000;
590         FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
591         do {
592                 if (!(FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
593                                      FLUSH_DONE_MASK))
594                         break;
595                 usleep(1000);
596         } while (--timeout);
597         if (!timeout) {
598                 BCMFS_DP_LOG(ERR, "Ring clear flush timeout hw-queue %d",
599                              qp->qpair_id);
600         }
601
602         /* Program BD start address */
603         bd_low = lower_32_bits(tx_queue->base_phys_addr);
604         bd_high = upper_32_bits(tx_queue->base_phys_addr);
605         FS_MMIO_WRITE32(bd_low, (uint8_t *)qp->ioreg +
606                                 RING_BD_START_ADDRESS_LSB);
607         FS_MMIO_WRITE32(bd_high, (uint8_t *)qp->ioreg +
608                                  RING_BD_START_ADDRESS_MSB);
609
610         tx_queue->tx_write_ptr = 0;
611
612         for (off = 0; off < FS_RING_CMPL_SIZE; off += FS_RING_DESC_SIZE)
613                 rm_write_desc((uint8_t *)cmpl_queue->base_addr + off, 0x0);
614
615         /* Completion read pointer will be same as HW write pointer */
616         cmpl_queue->cmpl_read_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +
617                                                    RING_CMPL_WRITE_PTR);
618         /* Program completion start address */
619         cmpl_low = lower_32_bits(cmpl_queue->base_phys_addr);
620         cmpl_high = upper_32_bits(cmpl_queue->base_phys_addr);
621         FS_MMIO_WRITE32(cmpl_low, (uint8_t *)qp->ioreg +
622                                 RING_CMPL_START_ADDR_LSB);
623         FS_MMIO_WRITE32(cmpl_high, (uint8_t *)qp->ioreg +
624                                 RING_CMPL_START_ADDR_MSB);
625
626         cmpl_queue->cmpl_read_ptr *= FS_RING_DESC_SIZE;
627
628         /* Read ring Tx, Rx, and Outstanding counts to clear */
629         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_LS);
630         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_MS);
631         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_LS);
632         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_MS);
633         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_OUTSTAND);
634
635         /* Configure per-Ring MSI registers with dummy location */
636         msi = cmpl_queue->base_phys_addr + (1024 * FS_RING_DESC_SIZE);
637         FS_MMIO_WRITE32((msi & 0xFFFFFFFF),
638                         (uint8_t *)qp->ioreg + RING_MSI_ADDR_LS);
639         FS_MMIO_WRITE32(((msi >> 32) & 0xFFFFFFFF),
640                         (uint8_t *)qp->ioreg + RING_MSI_ADDR_MS);
641         FS_MMIO_WRITE32(qp->qpair_id, (uint8_t *)qp->ioreg +
642                                       RING_MSI_DATA_VALUE);
643
644         /* Configure RING_MSI_CONTROL */
645         val = 0;
646         val |= (MSI_TIMER_VAL_MASK << MSI_TIMER_VAL_SHIFT);
647         val |= BIT(MSI_ENABLE_SHIFT);
648         val |= (0x1 & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
649         FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_MSI_CONTROL);
650
651         /* Enable/activate ring */
652         val = BIT(CONTROL_ACTIVE_SHIFT);
653         FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CONTROL);
654
655         return 0;
656 }
657
658 static void
659 bcmfs5_shutdown_qp(struct bcmfs_qp *qp)
660 {
661         /* Disable/deactivate ring */
662         FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
663 }
664
665 struct bcmfs_hw_queue_pair_ops bcmfs5_qp_ops = {
666         .name = "fs5",
667         .enq_one_req = bcmfs5_enqueue_single_request_qp,
668         .ring_db = bcmfs5_write_doorbell,
669         .dequeue = bcmfs5_dequeue_qp,
670         .startq = bcmfs5_start_qp,
671         .stopq = bcmfs5_shutdown_qp,
672 };
673
674 RTE_INIT(bcmfs5_register_qp_ops)
675 {
676         bcmfs_hw_queue_pair_register_ops(&bcmfs5_qp_ops);
677 }