net/txgbe: add MAC type and bus LAN id
[dpdk.git] / drivers / crypto / bcmfs / hw / bcmfs4_rm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_bitmap.h>
9
10 #include "bcmfs_device.h"
11 #include "bcmfs_dev_msg.h"
12 #include "bcmfs_hw_defs.h"
13 #include "bcmfs_logs.h"
14 #include "bcmfs_qp.h"
15 #include "bcmfs_rm_common.h"
16
17 /* FS4 configuration */
18 #define RING_BD_TOGGLE_INVALID(offset)                  \
19                         (((offset) >> FS_RING_BD_ALIGN_ORDER) & 0x1)
20 #define RING_BD_TOGGLE_VALID(offset)                    \
21                         (!RING_BD_TOGGLE_INVALID(offset))
22
23 #define RING_VER_MAGIC                                  0x76303031
24
25 /* Per-Ring register offsets */
26 #define RING_VER                                        0x000
27 #define RING_BD_START_ADDR                              0x004
28 #define RING_BD_READ_PTR                                0x008
29 #define RING_BD_WRITE_PTR                               0x00c
30 #define RING_BD_READ_PTR_DDR_LS                         0x010
31 #define RING_BD_READ_PTR_DDR_MS                         0x014
32 #define RING_CMPL_START_ADDR                            0x018
33 #define RING_CMPL_WRITE_PTR                             0x01c
34 #define RING_NUM_REQ_RECV_LS                            0x020
35 #define RING_NUM_REQ_RECV_MS                            0x024
36 #define RING_NUM_REQ_TRANS_LS                           0x028
37 #define RING_NUM_REQ_TRANS_MS                           0x02c
38 #define RING_NUM_REQ_OUTSTAND                           0x030
39 #define RING_CONTROL                                    0x034
40 #define RING_FLUSH_DONE                                 0x038
41 #define RING_MSI_ADDR_LS                                0x03c
42 #define RING_MSI_ADDR_MS                                0x040
43 #define RING_MSI_CONTROL                                0x048
44 #define RING_BD_READ_PTR_DDR_CONTROL                    0x04c
45 #define RING_MSI_DATA_VALUE                             0x064
46
47 /* Register RING_BD_START_ADDR fields */
48 #define BD_LAST_UPDATE_HW_SHIFT                         28
49 #define BD_LAST_UPDATE_HW_MASK                          0x1
50 #define BD_START_ADDR_VALUE(pa)                         \
51         ((uint32_t)((((uint64_t)(pa)) >> FS_RING_BD_ALIGN_ORDER) & 0x0fffffff))
52 #define BD_START_ADDR_DECODE(val)                       \
53         ((uint64_t)((val) & 0x0fffffff) << FS_RING_BD_ALIGN_ORDER)
54
55 /* Register RING_CMPL_START_ADDR fields */
56 #define CMPL_START_ADDR_VALUE(pa)                       \
57         ((uint32_t)((((uint64_t)(pa)) >> FS_RING_CMPL_ALIGN_ORDER) & 0x7ffffff))
58
59 /* Register RING_CONTROL fields */
60 #define CONTROL_MASK_DISABLE_CONTROL                    12
61 #define CONTROL_FLUSH_SHIFT                             5
62 #define CONTROL_ACTIVE_SHIFT                            4
63 #define CONTROL_RATE_ADAPT_MASK                         0xf
64 #define CONTROL_RATE_DYNAMIC                            0x0
65 #define CONTROL_RATE_FAST                               0x8
66 #define CONTROL_RATE_MEDIUM                             0x9
67 #define CONTROL_RATE_SLOW                               0xa
68 #define CONTROL_RATE_IDLE                               0xb
69
70 /* Register RING_FLUSH_DONE fields */
71 #define FLUSH_DONE_MASK                                 0x1
72
73 /* Register RING_MSI_CONTROL fields */
74 #define MSI_TIMER_VAL_SHIFT                             16
75 #define MSI_TIMER_VAL_MASK                              0xffff
76 #define MSI_ENABLE_SHIFT                                15
77 #define MSI_ENABLE_MASK                                 0x1
78 #define MSI_COUNT_SHIFT                                 0
79 #define MSI_COUNT_MASK                                  0x3ff
80
81 /* Register RING_BD_READ_PTR_DDR_CONTROL fields */
82 #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT                 16
83 #define BD_READ_PTR_DDR_TIMER_VAL_MASK                  0xffff
84 #define BD_READ_PTR_DDR_ENABLE_SHIFT                    15
85 #define BD_READ_PTR_DDR_ENABLE_MASK                     0x1
86
87 /* ====== Broadcom FS4-RM ring descriptor defines ===== */
88
89
90 /* General descriptor format */
91 #define DESC_TYPE_SHIFT                         60
92 #define DESC_TYPE_MASK                          0xf
93 #define DESC_PAYLOAD_SHIFT                      0
94 #define DESC_PAYLOAD_MASK                       0x0fffffffffffffff
95
96 /* Null descriptor format  */
97 #define NULL_TYPE                               0
98 #define NULL_TOGGLE_SHIFT                       58
99 #define NULL_TOGGLE_MASK                        0x1
100
101 /* Header descriptor format */
102 #define HEADER_TYPE                             1
103 #define HEADER_TOGGLE_SHIFT                     58
104 #define HEADER_TOGGLE_MASK                      0x1
105 #define HEADER_ENDPKT_SHIFT                     57
106 #define HEADER_ENDPKT_MASK                      0x1
107 #define HEADER_STARTPKT_SHIFT                   56
108 #define HEADER_STARTPKT_MASK                    0x1
109 #define HEADER_BDCOUNT_SHIFT                    36
110 #define HEADER_BDCOUNT_MASK                     0x1f
111 #define HEADER_BDCOUNT_MAX                      HEADER_BDCOUNT_MASK
112 #define HEADER_FLAGS_SHIFT                      16
113 #define HEADER_FLAGS_MASK                       0xffff
114 #define HEADER_OPAQUE_SHIFT                     0
115 #define HEADER_OPAQUE_MASK                      0xffff
116
117 /* Source (SRC) descriptor format */
118 #define SRC_TYPE                                2
119 #define SRC_LENGTH_SHIFT                        44
120 #define SRC_LENGTH_MASK                         0xffff
121 #define SRC_ADDR_SHIFT                          0
122 #define SRC_ADDR_MASK                           0x00000fffffffffff
123
124 /* Destination (DST) descriptor format */
125 #define DST_TYPE                                3
126 #define DST_LENGTH_SHIFT                        44
127 #define DST_LENGTH_MASK                         0xffff
128 #define DST_ADDR_SHIFT                          0
129 #define DST_ADDR_MASK                           0x00000fffffffffff
130
131 /* Next pointer (NPTR) descriptor format */
132 #define NPTR_TYPE                               5
133 #define NPTR_TOGGLE_SHIFT                       58
134 #define NPTR_TOGGLE_MASK                        0x1
135 #define NPTR_ADDR_SHIFT                         0
136 #define NPTR_ADDR_MASK                          0x00000fffffffffff
137
138 /* Mega source (MSRC) descriptor format */
139 #define MSRC_TYPE                               6
140 #define MSRC_LENGTH_SHIFT                       44
141 #define MSRC_LENGTH_MASK                        0xffff
142 #define MSRC_ADDR_SHIFT                         0
143 #define MSRC_ADDR_MASK                          0x00000fffffffffff
144
145 /* Mega destination (MDST) descriptor format */
146 #define MDST_TYPE                               7
147 #define MDST_LENGTH_SHIFT                       44
148 #define MDST_LENGTH_MASK                        0xffff
149 #define MDST_ADDR_SHIFT                         0
150 #define MDST_ADDR_MASK                          0x00000fffffffffff
151
152 static uint8_t
153 bcmfs4_is_next_table_desc(void *desc_ptr)
154 {
155         uint64_t desc = rm_read_desc(desc_ptr);
156         uint32_t type = FS_DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
157
158         return (type == NPTR_TYPE) ? true : false;
159 }
160
161 static uint64_t
162 bcmfs4_next_table_desc(uint32_t toggle, uint64_t next_addr)
163 {
164         return (rm_build_desc(NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
165                 rm_build_desc(toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK) |
166                 rm_build_desc(next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK));
167 }
168
169 static uint64_t
170 bcmfs4_null_desc(uint32_t toggle)
171 {
172         return (rm_build_desc(NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
173                 rm_build_desc(toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK));
174 }
175
176 static void
177 bcmfs4_flip_header_toggle(void *desc_ptr)
178 {
179         uint64_t desc = rm_read_desc(desc_ptr);
180
181         if (desc & ((uint64_t)0x1 << HEADER_TOGGLE_SHIFT))
182                 desc &= ~((uint64_t)0x1 << HEADER_TOGGLE_SHIFT);
183         else
184                 desc |= ((uint64_t)0x1 << HEADER_TOGGLE_SHIFT);
185
186         rm_write_desc(desc_ptr, desc);
187 }
188
189 static uint64_t
190 bcmfs4_header_desc(uint32_t toggle, uint32_t startpkt,
191                    uint32_t endpkt, uint32_t bdcount,
192                    uint32_t flags, uint32_t opaque)
193 {
194         return (rm_build_desc(HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
195                 rm_build_desc(toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK) |
196                 rm_build_desc(startpkt, HEADER_STARTPKT_SHIFT,
197                               HEADER_STARTPKT_MASK) |
198                 rm_build_desc(endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK) |
199                 rm_build_desc(bdcount, HEADER_BDCOUNT_SHIFT,
200                               HEADER_BDCOUNT_MASK) |
201                 rm_build_desc(flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK) |
202                 rm_build_desc(opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK));
203 }
204
205 static void
206 bcmfs4_enqueue_desc(uint32_t nhpos, uint32_t nhcnt,
207                     uint32_t reqid, uint64_t desc,
208                     void **desc_ptr, uint32_t *toggle,
209                     void *start_desc, void *end_desc)
210 {
211         uint64_t d;
212         uint32_t nhavail, _toggle, _startpkt, _endpkt, _bdcount;
213
214         /*
215          * Each request or packet start with a HEADER descriptor followed
216          * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
217          * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
218          * following a HEADER descriptor is represented by BDCOUNT field
219          * of HEADER descriptor. The max value of BDCOUNT field is 31 which
220          * means we can only have 31 non-HEADER descriptors following one
221          * HEADER descriptor.
222          *
223          * In general use, number of non-HEADER descriptors can easily go
224          * beyond 31. To tackle this situation, we have packet (or request)
225          * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
226          *
227          * To use packet extension, the first HEADER descriptor of request
228          * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
229          * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
230          * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
231          * TOGGLE bit of the first HEADER will be set to invalid state to
232          * ensure that FlexDMA engine does not start fetching descriptors
233          * till all descriptors are enqueued. The user of this function
234          * will flip the TOGGLE bit of first HEADER after all descriptors
235          * are enqueued.
236          */
237
238         if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
239                 /* Prepare the header descriptor */
240                 nhavail = (nhcnt - nhpos);
241                 _toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
242                 _startpkt = (nhpos == 0) ? 0x1 : 0x0;
243                 _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
244                 _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
245                                 nhavail : HEADER_BDCOUNT_MAX;
246                 if (nhavail <= HEADER_BDCOUNT_MAX)
247                         _bdcount = nhavail;
248                 else
249                         _bdcount = HEADER_BDCOUNT_MAX;
250                 d = bcmfs4_header_desc(_toggle, _startpkt, _endpkt,
251                                         _bdcount, 0x0, reqid);
252
253                 /* Write header descriptor */
254                 rm_write_desc(*desc_ptr, d);
255
256                 /* Point to next descriptor */
257                 *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
258                 if (*desc_ptr == end_desc)
259                         *desc_ptr = start_desc;
260
261                 /* Skip next pointer descriptors */
262                 while (bcmfs4_is_next_table_desc(*desc_ptr)) {
263                         *toggle = (*toggle) ? 0 : 1;
264                         *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
265                         if (*desc_ptr == end_desc)
266                                 *desc_ptr = start_desc;
267                 }
268         }
269
270         /* Write desired descriptor */
271         rm_write_desc(*desc_ptr, desc);
272
273         /* Point to next descriptor */
274         *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
275         if (*desc_ptr == end_desc)
276                 *desc_ptr = start_desc;
277
278         /* Skip next pointer descriptors */
279         while (bcmfs4_is_next_table_desc(*desc_ptr)) {
280                 *toggle = (*toggle) ? 0 : 1;
281                 *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
282                 if (*desc_ptr == end_desc)
283                         *desc_ptr = start_desc;
284         }
285 }
286
287 static uint64_t
288 bcmfs4_src_desc(uint64_t addr, unsigned int length)
289 {
290         return (rm_build_desc(SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
291                 rm_build_desc(length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK) |
292                 rm_build_desc(addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK));
293 }
294
295 static uint64_t
296 bcmfs4_msrc_desc(uint64_t addr, unsigned int length_div_16)
297 {
298         return (rm_build_desc(MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
299                 rm_build_desc(length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK) |
300                 rm_build_desc(addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK));
301 }
302
303 static uint64_t
304 bcmfs4_dst_desc(uint64_t addr, unsigned int length)
305 {
306         return (rm_build_desc(DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
307                 rm_build_desc(length, DST_LENGTH_SHIFT, DST_LENGTH_MASK) |
308                 rm_build_desc(addr, DST_ADDR_SHIFT, DST_ADDR_MASK));
309 }
310
311 static uint64_t
312 bcmfs4_mdst_desc(uint64_t addr, unsigned int length_div_16)
313 {
314         return (rm_build_desc(MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
315                 rm_build_desc(length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK) |
316                 rm_build_desc(addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK));
317 }
318
319 static bool
320 bcmfs4_sanity_check(struct bcmfs_qp_message *msg)
321 {
322         unsigned int i = 0;
323
324         if (msg == NULL)
325                 return false;
326
327         for (i = 0; i <  msg->srcs_count; i++) {
328                 if (msg->srcs_len[i] & 0xf) {
329                         if (msg->srcs_len[i] > SRC_LENGTH_MASK)
330                                 return false;
331                 } else {
332                         if (msg->srcs_len[i] > (MSRC_LENGTH_MASK * 16))
333                                 return false;
334                 }
335         }
336         for (i = 0; i <  msg->dsts_count; i++) {
337                 if (msg->dsts_len[i] & 0xf) {
338                         if (msg->dsts_len[i] > DST_LENGTH_MASK)
339                                 return false;
340                 } else {
341                         if (msg->dsts_len[i] > (MDST_LENGTH_MASK * 16))
342                                 return false;
343                 }
344         }
345
346         return true;
347 }
348
349 static uint32_t
350 estimate_nonheader_desc_count(struct bcmfs_qp_message *msg)
351 {
352         uint32_t cnt = 0;
353         unsigned int src = 0;
354         unsigned int dst = 0;
355         unsigned int dst_target = 0;
356
357         while (src < msg->srcs_count ||
358                dst < msg->dsts_count) {
359                 if (src < msg->srcs_count) {
360                         cnt++;
361                         dst_target = msg->srcs_len[src];
362                         src++;
363                 } else {
364                         dst_target = UINT_MAX;
365                 }
366                 while (dst_target && dst < msg->dsts_count) {
367                         cnt++;
368                         if (msg->dsts_len[dst] < dst_target)
369                                 dst_target -= msg->dsts_len[dst];
370                         else
371                                 dst_target = 0;
372                         dst++;
373                 }
374         }
375
376         return cnt;
377 }
378
379 static void *
380 bcmfs4_enqueue_msg(struct bcmfs_qp_message *msg,
381                    uint32_t nhcnt, uint32_t reqid,
382                    void *desc_ptr, uint32_t toggle,
383                    void *start_desc, void *end_desc)
384 {
385         uint64_t d;
386         uint32_t nhpos = 0;
387         unsigned int src = 0;
388         unsigned int dst = 0;
389         unsigned int dst_target = 0;
390         void *orig_desc_ptr = desc_ptr;
391
392         if (!desc_ptr || !start_desc || !end_desc)
393                 return NULL;
394
395         if (desc_ptr < start_desc || end_desc <= desc_ptr)
396                 return NULL;
397
398         while (src < msg->srcs_count || dst < msg->dsts_count) {
399                 if (src < msg->srcs_count) {
400                         if (msg->srcs_len[src] & 0xf) {
401                                 d = bcmfs4_src_desc(msg->srcs_addr[src],
402                                                     msg->srcs_len[src]);
403                         } else {
404                                 d = bcmfs4_msrc_desc(msg->srcs_addr[src],
405                                                      msg->srcs_len[src] / 16);
406                         }
407                         bcmfs4_enqueue_desc(nhpos, nhcnt, reqid,
408                                             d, &desc_ptr, &toggle,
409                                             start_desc, end_desc);
410                         nhpos++;
411                         dst_target = msg->srcs_len[src];
412                         src++;
413                 } else {
414                         dst_target = UINT_MAX;
415                 }
416
417                 while (dst_target && (dst < msg->dsts_count)) {
418                         if (msg->dsts_len[dst] & 0xf) {
419                                 d = bcmfs4_dst_desc(msg->dsts_addr[dst],
420                                                     msg->dsts_len[dst]);
421                         } else {
422                                 d = bcmfs4_mdst_desc(msg->dsts_addr[dst],
423                                                      msg->dsts_len[dst] / 16);
424                         }
425                         bcmfs4_enqueue_desc(nhpos, nhcnt, reqid,
426                                             d, &desc_ptr, &toggle,
427                                             start_desc, end_desc);
428                         nhpos++;
429                         if (msg->dsts_len[dst] < dst_target)
430                                 dst_target -= msg->dsts_len[dst];
431                         else
432                                 dst_target = 0;
433                         dst++; /* for next buffer */
434                 }
435         }
436
437         /* Null descriptor with invalid toggle bit */
438         rm_write_desc(desc_ptr, bcmfs4_null_desc(!toggle));
439
440         /* Ensure that descriptors have been written to memory */
441         rte_io_wmb();
442
443         bcmfs4_flip_header_toggle(orig_desc_ptr);
444
445         return desc_ptr;
446 }
447
448 static int
449 bcmfs4_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)
450 {
451         int reqid;
452         void *next;
453         uint32_t nhcnt;
454         int ret = 0;
455         uint32_t pos = 0;
456         uint64_t slab = 0;
457         uint8_t exit_cleanup = false;
458         struct bcmfs_queue *txq = &qp->tx_q;
459         struct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;
460
461         /* Do sanity check on message */
462         if (!bcmfs4_sanity_check(msg)) {
463                 BCMFS_DP_LOG(ERR, "Invalid msg on queue %d", qp->qpair_id);
464                 return -EIO;
465         }
466
467         /* Scan from the beginning */
468         __rte_bitmap_scan_init(qp->ctx_bmp);
469         /* Scan bitmap to get the free pool */
470         ret = rte_bitmap_scan(qp->ctx_bmp, &pos, &slab);
471         if (ret == 0) {
472                 BCMFS_DP_LOG(ERR, "BD memory exhausted");
473                 return -ERANGE;
474         }
475
476         reqid = pos + __builtin_ctzll(slab);
477         rte_bitmap_clear(qp->ctx_bmp, reqid);
478         qp->ctx_pool[reqid] = (unsigned long)msg;
479
480         /*
481          * Number required descriptors = number of non-header descriptors +
482          *                               number of header descriptors +
483          *                               1x null descriptor
484          */
485         nhcnt = estimate_nonheader_desc_count(msg);
486
487         /* Write descriptors to ring */
488         next = bcmfs4_enqueue_msg(msg, nhcnt, reqid,
489                                   (uint8_t *)txq->base_addr + txq->tx_write_ptr,
490                                   RING_BD_TOGGLE_VALID(txq->tx_write_ptr),
491                                   txq->base_addr,
492                                   (uint8_t *)txq->base_addr + txq->queue_size);
493         if (next == NULL) {
494                 BCMFS_DP_LOG(ERR, "Enqueue for desc failed on queue %d",
495                              qp->qpair_id);
496                 ret = -EINVAL;
497                 exit_cleanup = true;
498                 goto exit;
499         }
500
501         /* Save ring BD write offset */
502         txq->tx_write_ptr = (uint32_t)((uint8_t *)next -
503                                        (uint8_t *)txq->base_addr);
504
505         qp->nb_pending_requests++;
506
507         return 0;
508
509 exit:
510         /* Cleanup if we failed */
511         if (exit_cleanup)
512                 rte_bitmap_set(qp->ctx_bmp, reqid);
513
514         return ret;
515 }
516
517 static void
518 bcmfs4_ring_doorbell_qp(struct bcmfs_qp *qp __rte_unused)
519 {
520         /* no door bell method supported */
521 }
522
523 static uint16_t
524 bcmfs4_dequeue_qp(struct bcmfs_qp *qp, void **ops, uint16_t budget)
525 {
526         int err;
527         uint16_t reqid;
528         uint64_t desc;
529         uint16_t count = 0;
530         unsigned long context = 0;
531         struct bcmfs_queue *hwq = &qp->cmpl_q;
532         uint32_t cmpl_read_offset, cmpl_write_offset;
533
534         /*
535          * Check whether budget is valid, else set the budget to maximum
536          * so that all the available completions will be processed.
537          */
538         if (budget > qp->nb_pending_requests)
539                 budget =  qp->nb_pending_requests;
540
541         /*
542          * Get current completion read and write offset
543          * Note: We should read completion write pointer at least once
544          * after we get a MSI interrupt because HW maintains internal
545          * MSI status which will allow next MSI interrupt only after
546          * completion write pointer is read.
547          */
548         cmpl_write_offset = FS_MMIO_READ32((uint8_t *)qp->ioreg +
549                                            RING_CMPL_WRITE_PTR);
550         cmpl_write_offset *= FS_RING_DESC_SIZE;
551         cmpl_read_offset = hwq->cmpl_read_ptr;
552
553         /* Ensure completion pointer is read before proceeding */
554         rte_io_rmb();
555
556         /* For each completed request notify mailbox clients */
557         reqid = 0;
558         while ((cmpl_read_offset != cmpl_write_offset) && (budget > 0)) {
559                 /* Dequeue next completion descriptor */
560                 desc = *((uint64_t *)((uint8_t *)hwq->base_addr +
561                                        cmpl_read_offset));
562
563                 /* Next read offset */
564                 cmpl_read_offset += FS_RING_DESC_SIZE;
565                 if (cmpl_read_offset == FS_RING_CMPL_SIZE)
566                         cmpl_read_offset = 0;
567
568                 /* Decode error from completion descriptor */
569                 err = rm_cmpl_desc_to_error(desc);
570                 if (err < 0)
571                         BCMFS_DP_LOG(ERR, "error desc rcvd");
572
573                 /* Determine request id from completion descriptor */
574                 reqid = rm_cmpl_desc_to_reqid(desc);
575
576                 /* Determine message pointer based on reqid */
577                 context = qp->ctx_pool[reqid];
578                 if (context == 0)
579                         BCMFS_DP_LOG(ERR, "HW error detected");
580
581                 /* Release reqid for recycling */
582                 qp->ctx_pool[reqid] = 0;
583                 rte_bitmap_set(qp->ctx_bmp, reqid);
584
585                 *ops = (void *)context;
586
587                 /* Increment number of completions processed */
588                 count++;
589                 budget--;
590                 ops++;
591         }
592
593         hwq->cmpl_read_ptr = cmpl_read_offset;
594
595         qp->nb_pending_requests -= count;
596
597         return count;
598 }
599
600 static int
601 bcmfs4_start_qp(struct bcmfs_qp *qp)
602 {
603         int timeout;
604         uint32_t val, off;
605         uint64_t d, next_addr, msi;
606         struct bcmfs_queue *tx_queue = &qp->tx_q;
607         struct bcmfs_queue *cmpl_queue = &qp->cmpl_q;
608
609         /* Disable/deactivate ring */
610         FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
611
612         /* Configure next table pointer entries in BD memory */
613         for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) {
614                 next_addr = off + FS_RING_DESC_SIZE;
615                 if (next_addr == tx_queue->queue_size)
616                         next_addr = 0;
617                 next_addr += (uint64_t)tx_queue->base_phys_addr;
618                 if (FS_RING_BD_ALIGN_CHECK(next_addr))
619                         d = bcmfs4_next_table_desc(RING_BD_TOGGLE_VALID(off),
620                                                     next_addr);
621                 else
622                         d = bcmfs4_null_desc(RING_BD_TOGGLE_INVALID(off));
623                 rm_write_desc((uint8_t *)tx_queue->base_addr + off, d);
624         }
625
626         /*
627          * If user interrupt the test in between the run(Ctrl+C), then all
628          * subsequent test run will fail because sw cmpl_read_offset and hw
629          * cmpl_write_offset will be pointing at different completion BD. To
630          * handle this we should flush all the rings in the startup instead
631          * of shutdown function.
632          * Ring flush will reset hw cmpl_write_offset.
633          */
634
635         /* Set ring flush state */
636         timeout = 1000;
637         FS_MMIO_WRITE32(BIT(CONTROL_FLUSH_SHIFT),
638                         (uint8_t *)qp->ioreg + RING_CONTROL);
639         do {
640                 /*
641                  * If previous test is stopped in between the run, then
642                  * sw has to read cmpl_write_offset else DME/AE will be not
643                  * come out of flush state.
644                  */
645                 FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
646
647                 if (FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
648                                 FLUSH_DONE_MASK)
649                         break;
650                 usleep(1000);
651         } while (--timeout);
652         if (!timeout) {
653                 BCMFS_DP_LOG(ERR, "Ring flush timeout hw-queue %d",
654                              qp->qpair_id);
655         }
656
657         /* Clear ring flush state */
658         timeout = 1000;
659         FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
660         do {
661                 if (!(FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
662                                   FLUSH_DONE_MASK))
663                         break;
664                 usleep(1000);
665         } while (--timeout);
666         if (!timeout) {
667                 BCMFS_DP_LOG(ERR, "Ring clear flush timeout hw-queue %d",
668                              qp->qpair_id);
669         }
670
671         /* Program BD start address */
672         val = BD_START_ADDR_VALUE(tx_queue->base_phys_addr);
673         FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_BD_START_ADDR);
674
675         /* BD write pointer will be same as HW write pointer */
676         tx_queue->tx_write_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +
677                                                 RING_BD_WRITE_PTR);
678         tx_queue->tx_write_ptr *= FS_RING_DESC_SIZE;
679
680
681         for (off = 0; off < FS_RING_CMPL_SIZE; off += FS_RING_DESC_SIZE)
682                 rm_write_desc((uint8_t *)cmpl_queue->base_addr + off, 0x0);
683
684         /* Program completion start address */
685         val = CMPL_START_ADDR_VALUE(cmpl_queue->base_phys_addr);
686         FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CMPL_START_ADDR);
687
688         /* Completion read pointer will be same as HW write pointer */
689         cmpl_queue->cmpl_read_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +
690                                                    RING_CMPL_WRITE_PTR);
691         cmpl_queue->cmpl_read_ptr *= FS_RING_DESC_SIZE;
692
693         /* Read ring Tx, Rx, and Outstanding counts to clear */
694         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_LS);
695         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_MS);
696         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_LS);
697         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_MS);
698         FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_OUTSTAND);
699
700         /* Configure per-Ring MSI registers with dummy location */
701         /* We leave 1k * FS_RING_DESC_SIZE size from base phys for MSI */
702         msi = cmpl_queue->base_phys_addr + (1024 * FS_RING_DESC_SIZE);
703         FS_MMIO_WRITE32((msi & 0xFFFFFFFF),
704                         (uint8_t *)qp->ioreg + RING_MSI_ADDR_LS);
705         FS_MMIO_WRITE32(((msi >> 32) & 0xFFFFFFFF),
706                         (uint8_t *)qp->ioreg + RING_MSI_ADDR_MS);
707         FS_MMIO_WRITE32(qp->qpair_id,
708                         (uint8_t *)qp->ioreg + RING_MSI_DATA_VALUE);
709
710         /* Configure RING_MSI_CONTROL */
711         val = 0;
712         val |= (MSI_TIMER_VAL_MASK << MSI_TIMER_VAL_SHIFT);
713         val |= BIT(MSI_ENABLE_SHIFT);
714         val |= (0x1 & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
715         FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_MSI_CONTROL);
716
717         /* Enable/activate ring */
718         val = BIT(CONTROL_ACTIVE_SHIFT);
719         FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CONTROL);
720
721         return 0;
722 }
723
724 static void
725 bcmfs4_shutdown_qp(struct bcmfs_qp *qp)
726 {
727         /* Disable/deactivate ring */
728         FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
729 }
730
731 struct bcmfs_hw_queue_pair_ops bcmfs4_qp_ops = {
732         .name = "fs4",
733         .enq_one_req = bcmfs4_enqueue_single_request_qp,
734         .ring_db = bcmfs4_ring_doorbell_qp,
735         .dequeue = bcmfs4_dequeue_qp,
736         .startq = bcmfs4_start_qp,
737         .stopq = bcmfs4_shutdown_qp,
738 };
739
740 RTE_INIT(bcmfs4_register_qp_ops)
741 {
742          bcmfs_hw_queue_pair_register_ops(&bcmfs4_qp_ops);
743 }