baseband/fpga_5gnr_fec: add HW error capture
[dpdk.git] / drivers / baseband / fpga_5gnr_fec / rte_fpga_5gnr_fec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_log.h>
9 #include <rte_dev.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_byteorder.h>
16 #ifdef RTE_BBDEV_OFFLOAD_COST
17 #include <rte_cycles.h>
18 #endif
19
20 #include <rte_bbdev.h>
21 #include <rte_bbdev_pmd.h>
22
23 #include "fpga_5gnr_fec.h"
24
25 /* 5GNR SW PMD logging ID */
26 static int fpga_5gnr_fec_logtype;
27
28 static int
29 fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
30 {
31         /* Number of queues bound to a PF/VF */
32         uint32_t hw_q_num = 0;
33         uint32_t ring_size, payload, address, q_id, offset;
34         rte_iova_t phys_addr;
35         struct fpga_ring_ctrl_reg ring_reg;
36         struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
37
38         address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
39         if (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {
40                 rte_bbdev_log(ERR,
41                                 "Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?",
42                                 dev->data->name);
43                 return -EPERM;
44         }
45
46         /* Clear queue registers structure */
47         memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
48
49         /* Scan queue map.
50          * If a queue is valid and mapped to a calling PF/VF the read value is
51          * replaced with a queue ID and if it's not then
52          * FPGA_INVALID_HW_QUEUE_ID is returned.
53          */
54         for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
55                 uint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,
56                                 FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
57
58                 rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
59                                 dev->device->name, q_id, hw_q_id);
60
61                 if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {
62                         fpga_dev->q_bound_bit_map |= (1ULL << q_id);
63                         /* Clear queue register of found queue */
64                         offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
65                                 (sizeof(struct fpga_ring_ctrl_reg) * q_id);
66                         fpga_ring_reg_write(fpga_dev->mmio_base,
67                                         offset, ring_reg);
68                         ++hw_q_num;
69                 }
70         }
71         if (hw_q_num == 0) {
72                 rte_bbdev_log(ERR,
73                         "No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!");
74                 return -ENODEV;
75         }
76
77         if (num_queues > hw_q_num) {
78                 rte_bbdev_log(ERR,
79                         "Not enough queues for device %s! Requested: %u, available: %u",
80                         dev->device->name, num_queues, hw_q_num);
81                 return -EINVAL;
82         }
83
84         ring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
85
86         /* Enforce 32 byte alignment */
87         RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
88
89         /* Allocate memory for SW descriptor rings */
90         fpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
91                         num_queues * ring_size, RTE_CACHE_LINE_SIZE,
92                         socket_id);
93         if (fpga_dev->sw_rings == NULL) {
94                 rte_bbdev_log(ERR,
95                                 "Failed to allocate memory for %s:%u sw_rings",
96                                 dev->device->driver->name, dev->data->dev_id);
97                 return -ENOMEM;
98         }
99
100         fpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);
101         fpga_dev->sw_ring_size = ring_size;
102         fpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;
103
104         /* Allocate memory for ring flush status */
105         fpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,
106                         sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
107         if (fpga_dev->flush_queue_status == NULL) {
108                 rte_bbdev_log(ERR,
109                                 "Failed to allocate memory for %s:%u flush_queue_status",
110                                 dev->device->driver->name, dev->data->dev_id);
111                 return -ENOMEM;
112         }
113
114         /* Set the flush status address registers */
115         phys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);
116
117         address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
118         payload = (uint32_t)(phys_addr);
119         fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
120
121         address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;
122         payload = (uint32_t)(phys_addr >> 32);
123         fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
124
125         return 0;
126 }
127
128 static int
129 fpga_dev_close(struct rte_bbdev *dev)
130 {
131         struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
132
133         rte_free(fpga_dev->sw_rings);
134         rte_free(fpga_dev->flush_queue_status);
135
136         return 0;
137 }
138
139 static void
140 fpga_dev_info_get(struct rte_bbdev *dev,
141                 struct rte_bbdev_driver_info *dev_info)
142 {
143         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
144         uint32_t q_id = 0;
145
146         static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
147                 {
148                         .type   = RTE_BBDEV_OP_LDPC_ENC,
149                         .cap.ldpc_enc = {
150                                 .capability_flags =
151                                                 RTE_BBDEV_LDPC_RATE_MATCH |
152                                                 RTE_BBDEV_LDPC_ENC_INTERRUPTS |
153                                                 RTE_BBDEV_LDPC_CRC_24B_ATTACH,
154                                 .num_buffers_src =
155                                                 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
156                                 .num_buffers_dst =
157                                                 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
158                         }
159                 },
160                 {
161                 .type   = RTE_BBDEV_OP_LDPC_DEC,
162                 .cap.ldpc_dec = {
163                         .capability_flags =
164                                 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
165                                 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
166                                 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
167                                 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
168                                 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
169                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
170                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
171                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,
172                         .llr_size = 6,
173                         .llr_decimals = 2,
174                         .num_buffers_src =
175                                         RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
176                         .num_buffers_hard_out =
177                                         RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
178                         .num_buffers_soft_out = 0,
179                 }
180                 },
181                 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
182         };
183
184         /* Check the HARQ DDR size available */
185         uint8_t timeout_counter = 0;
186         uint32_t harq_buf_ready = fpga_reg_read_32(d->mmio_base,
187                         FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
188         while (harq_buf_ready != 1) {
189                 usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
190                 timeout_counter++;
191                 harq_buf_ready = fpga_reg_read_32(d->mmio_base,
192                                 FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
193                 if (timeout_counter > FPGA_HARQ_RDY_TIMEOUT) {
194                         rte_bbdev_log(ERR, "HARQ Buffer not ready %d",
195                                         harq_buf_ready);
196                         harq_buf_ready = 1;
197                 }
198         }
199         uint32_t harq_buf_size = fpga_reg_read_32(d->mmio_base,
200                         FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
201
202         static struct rte_bbdev_queue_conf default_queue_conf;
203         default_queue_conf.socket = dev->data->socket_id;
204         default_queue_conf.queue_size = FPGA_RING_MAX_SIZE;
205
206         dev_info->driver_name = dev->device->driver->name;
207         dev_info->queue_size_lim = FPGA_RING_MAX_SIZE;
208         dev_info->hardware_accelerated = true;
209         dev_info->min_alignment = 64;
210         dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
211         dev_info->default_queue_conf = default_queue_conf;
212         dev_info->capabilities = bbdev_capabilities;
213         dev_info->cpu_flag_reqs = NULL;
214
215         /* Calculates number of queues assigned to device */
216         dev_info->max_num_queues = 0;
217         for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
218                 uint32_t hw_q_id = fpga_reg_read_32(d->mmio_base,
219                                 FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
220                 if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)
221                         dev_info->max_num_queues++;
222         }
223 }
224
225 /**
226  * Find index of queue bound to current PF/VF which is unassigned. Return -1
227  * when there is no available queue
228  */
229 static inline int
230 fpga_find_free_queue_idx(struct rte_bbdev *dev,
231                 const struct rte_bbdev_queue_conf *conf)
232 {
233         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
234         uint64_t q_idx;
235         uint8_t i = 0;
236         uint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;
237
238         if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
239                 i = FPGA_NUM_DL_QUEUES;
240                 range = FPGA_TOTAL_NUM_QUEUES;
241         }
242
243         for (; i < range; ++i) {
244                 q_idx = 1ULL << i;
245                 /* Check if index of queue is bound to current PF/VF */
246                 if (d->q_bound_bit_map & q_idx)
247                         /* Check if found queue was not already assigned */
248                         if (!(d->q_assigned_bit_map & q_idx)) {
249                                 d->q_assigned_bit_map |= q_idx;
250                                 return i;
251                         }
252         }
253
254         rte_bbdev_log(INFO, "Failed to find free queue on %s", dev->data->name);
255
256         return -1;
257 }
258
259 static int
260 fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
261                 const struct rte_bbdev_queue_conf *conf)
262 {
263         uint32_t address, ring_offset;
264         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
265         struct fpga_queue *q;
266         int8_t q_idx;
267
268         /* Check if there is a free queue to assign */
269         q_idx = fpga_find_free_queue_idx(dev, conf);
270         if (q_idx == -1)
271                 return -1;
272
273         /* Allocate the queue data structure. */
274         q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
275                         RTE_CACHE_LINE_SIZE, conf->socket);
276         if (q == NULL) {
277                 /* Mark queue as un-assigned */
278                 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
279                 rte_bbdev_log(ERR, "Failed to allocate queue memory");
280                 return -ENOMEM;
281         }
282
283         q->d = d;
284         q->q_idx = q_idx;
285
286         /* Set ring_base_addr */
287         q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
288         q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +
289                         (d->sw_ring_size * queue_id);
290
291         /* Allocate memory for Completion Head variable*/
292         q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
293                         sizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);
294         if (q->ring_head_addr == NULL) {
295                 /* Mark queue as un-assigned */
296                 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
297                 rte_free(q);
298                 rte_bbdev_log(ERR,
299                                 "Failed to allocate memory for %s:%u completion_head",
300                                 dev->device->driver->name, dev->data->dev_id);
301                 return -ENOMEM;
302         }
303         /* Set ring_head_addr */
304         q->ring_ctrl_reg.ring_head_addr =
305                         rte_malloc_virt2iova(q->ring_head_addr);
306
307         /* Clear shadow_completion_head */
308         q->shadow_completion_head = 0;
309
310         /* Set ring_size */
311         if (conf->queue_size > FPGA_RING_MAX_SIZE) {
312                 /* Mark queue as un-assigned */
313                 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
314                 rte_free(q->ring_head_addr);
315                 rte_free(q);
316                 rte_bbdev_log(ERR,
317                                 "Size of queue is too big %d (MAX: %d ) for %s:%u",
318                                 conf->queue_size, FPGA_RING_MAX_SIZE,
319                                 dev->device->driver->name, dev->data->dev_id);
320                 return -EINVAL;
321         }
322         q->ring_ctrl_reg.ring_size = conf->queue_size;
323
324         /* Set Miscellaneous FPGA register*/
325         /* Max iteration number for TTI mitigation - todo */
326         q->ring_ctrl_reg.max_ul_dec = 0;
327         /* Enable max iteration number for TTI - todo */
328         q->ring_ctrl_reg.max_ul_dec_en = 0;
329
330         /* Enable the ring */
331         q->ring_ctrl_reg.enable = 1;
332
333         /* Set FPGA head_point and tail registers */
334         q->ring_ctrl_reg.head_point = q->tail = 0;
335
336         /* Set FPGA shadow_tail register */
337         q->ring_ctrl_reg.shadow_tail = q->tail;
338
339         /* Calculates the ring offset for found queue */
340         ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
341                         (sizeof(struct fpga_ring_ctrl_reg) * q_idx);
342
343         /* Set FPGA Ring Control Registers */
344         fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
345
346         /* Store MMIO register of shadow_tail */
347         address = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;
348         q->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);
349
350         q->head_free_desc = q->tail;
351
352         /* Set wrap mask */
353         q->sw_ring_wrap_mask = conf->queue_size - 1;
354
355         rte_bbdev_log_debug("Setup dev%u q%u: queue_idx=%u",
356                         dev->data->dev_id, queue_id, q->q_idx);
357
358         dev->data->queues[queue_id].queue_private = q;
359
360         rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA queue[%d]",
361                         queue_id, q_idx);
362
363         return 0;
364 }
365
366 static int
367 fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
368 {
369         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
370         struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
371         struct fpga_ring_ctrl_reg ring_reg;
372         uint32_t offset;
373
374         rte_bbdev_log_debug("FPGA Queue[%d] released", queue_id);
375
376         if (q != NULL) {
377                 memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
378                 offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
379                         (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
380                 /* Disable queue */
381                 fpga_reg_write_8(d->mmio_base,
382                                 offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
383                 /* Clear queue registers */
384                 fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
385
386                 /* Mark the Queue as un-assigned */
387                 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));
388                 rte_free(q->ring_head_addr);
389                 rte_free(q);
390                 dev->data->queues[queue_id].queue_private = NULL;
391         }
392
393         return 0;
394 }
395
396 /* Function starts a device queue. */
397 static int
398 fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
399 {
400         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
401 #ifdef RTE_LIBRTE_BBDEV_DEBUG
402         if (d == NULL) {
403                 rte_bbdev_log(ERR, "Invalid device pointer");
404                 return -1;
405         }
406 #endif
407         struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
408         uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
409                         (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
410         uint8_t enable = 0x01;
411         uint16_t zero = 0x0000;
412
413         /* Clear queue head and tail variables */
414         q->tail = q->head_free_desc = 0;
415
416         /* Clear FPGA head_point and tail registers */
417         fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
418                         zero);
419         fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
420                         zero);
421
422         /* Enable queue */
423         fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
424                         enable);
425
426         rte_bbdev_log_debug("FPGA Queue[%d] started", queue_id);
427         return 0;
428 }
429
430 /* Function stops a device queue. */
431 static int
432 fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
433 {
434         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
435 #ifdef RTE_LIBRTE_BBDEV_DEBUG
436         if (d == NULL) {
437                 rte_bbdev_log(ERR, "Invalid device pointer");
438                 return -1;
439         }
440 #endif
441         struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
442         uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
443                         (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
444         uint8_t payload = 0x01;
445         uint8_t counter = 0;
446         uint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /
447                         FPGA_TIMEOUT_CHECK_INTERVAL;
448
449         /* Set flush_queue_en bit to trigger queue flushing */
450         fpga_reg_write_8(d->mmio_base,
451                         offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
452
453         /** Check if queue flush is completed.
454          * FPGA will update the completion flag after queue flushing is
455          * completed. If completion flag is not updated within 1ms it is
456          * considered as a failure.
457          */
458         while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx)
459                         & payload)) {
460                 if (counter > timeout) {
461                         rte_bbdev_log(ERR, "FPGA Queue Flush failed for queue %d",
462                                         queue_id);
463                         return -1;
464                 }
465                 usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
466                 counter++;
467         }
468
469         /* Disable queue */
470         payload = 0x00;
471         fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
472                         payload);
473
474         rte_bbdev_log_debug("FPGA Queue[%d] stopped", queue_id);
475         return 0;
476 }
477
478 static const struct rte_bbdev_ops fpga_ops = {
479         .setup_queues = fpga_setup_queues,
480         .close = fpga_dev_close,
481         .info_get = fpga_dev_info_get,
482         .queue_setup = fpga_queue_setup,
483         .queue_stop = fpga_queue_stop,
484         .queue_start = fpga_queue_start,
485         .queue_release = fpga_queue_release,
486 };
487 static inline void
488 fpga_dma_enqueue(struct fpga_queue *q, uint16_t num_desc,
489                 struct rte_bbdev_stats *queue_stats)
490 {
491 #ifdef RTE_BBDEV_OFFLOAD_COST
492         uint64_t start_time = 0;
493         queue_stats->acc_offload_cycles = 0;
494 #else
495         RTE_SET_USED(queue_stats);
496 #endif
497
498         /* Update tail and shadow_tail register */
499         q->tail = (q->tail + num_desc) & q->sw_ring_wrap_mask;
500
501         rte_wmb();
502
503 #ifdef RTE_BBDEV_OFFLOAD_COST
504         /* Start time measurement for enqueue function offload. */
505         start_time = rte_rdtsc_precise();
506 #endif
507         mmio_write_16(q->shadow_tail_addr, q->tail);
508
509 #ifdef RTE_BBDEV_OFFLOAD_COST
510         rte_wmb();
511         queue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
512 #endif
513 }
514
515 /* Read flag value 0/1/ from bitmap */
516 static inline bool
517 check_bit(uint32_t bitmap, uint32_t bitmask)
518 {
519         return bitmap & bitmask;
520 }
521
522 /* Print an error if a descriptor error has occurred.
523  *  Return 0 on success, 1 on failure
524  */
525 static inline int
526 check_desc_error(uint32_t error_code) {
527         switch (error_code) {
528         case DESC_ERR_NO_ERR:
529                 return 0;
530         case DESC_ERR_K_P_OUT_OF_RANGE:
531                 rte_bbdev_log(ERR, "Encode block size K' is out of range");
532                 break;
533         case DESC_ERR_Z_C_NOT_LEGAL:
534                 rte_bbdev_log(ERR, "Zc is illegal");
535                 break;
536         case DESC_ERR_DESC_OFFSET_ERR:
537                 rte_bbdev_log(ERR,
538                                 "Queue offset does not meet the expectation in the FPGA"
539                                 );
540                 break;
541         case DESC_ERR_DESC_READ_FAIL:
542                 rte_bbdev_log(ERR, "Unsuccessful completion for descriptor read");
543                 break;
544         case DESC_ERR_DESC_READ_TIMEOUT:
545                 rte_bbdev_log(ERR, "Descriptor read time-out");
546                 break;
547         case DESC_ERR_DESC_READ_TLP_POISONED:
548                 rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
549                 break;
550         case DESC_ERR_CB_READ_FAIL:
551                 rte_bbdev_log(ERR, "Unsuccessful completion for code block");
552                 break;
553         case DESC_ERR_CB_READ_TIMEOUT:
554                 rte_bbdev_log(ERR, "Code block read time-out");
555                 break;
556         case DESC_ERR_CB_READ_TLP_POISONED:
557                 rte_bbdev_log(ERR, "Code block read TLP poisoned");
558                 break;
559         case DESC_ERR_HBSTORE_ERR:
560                 rte_bbdev_log(ERR, "Hbstroe exceeds HARQ buffer size.");
561                 break;
562         default:
563                 rte_bbdev_log(ERR, "Descriptor error unknown error code %u",
564                                 error_code);
565                 break;
566         }
567         return 1;
568 }
569
570 /* Compute value of k0.
571  * Based on 3GPP 38.212 Table 5.4.2.1-2
572  * Starting position of different redundancy versions, k0
573  */
574 static inline uint16_t
575 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
576 {
577         if (rv_index == 0)
578                 return 0;
579         uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
580         if (n_cb == n) {
581                 if (rv_index == 1)
582                         return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
583                 else if (rv_index == 2)
584                         return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
585                 else
586                         return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
587         }
588         /* LBRM case - includes a division by N */
589         if (rv_index == 1)
590                 return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
591                                 / n) * z_c;
592         else if (rv_index == 2)
593                 return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
594                                 / n) * z_c;
595         else
596                 return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
597                                 / n) * z_c;
598 }
599
600 /**
601  * Set DMA descriptor for encode operation (1 Code Block)
602  *
603  * @param op
604  *   Pointer to a single encode operation.
605  * @param desc
606  *   Pointer to DMA descriptor.
607  * @param input
608  *   Pointer to pointer to input data which will be decoded.
609  * @param e
610  *   E value (length of output in bits).
611  * @param ncb
612  *   Ncb value (size of the soft buffer).
613  * @param out_length
614  *   Length of output buffer
615  * @param in_offset
616  *   Input offset in rte_mbuf structure. It is used for calculating the point
617  *   where data is starting.
618  * @param out_offset
619  *   Output offset in rte_mbuf structure. It is used for calculating the point
620  *   where hard output data will be stored.
621  * @param cbs_in_op
622  *   Number of CBs contained in one operation.
623  */
624 static inline int
625 fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
626                 struct fpga_dma_enc_desc *desc, struct rte_mbuf *input,
627                 struct rte_mbuf *output, uint16_t k_,  uint16_t e,
628                 uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
629                 uint8_t cbs_in_op)
630 {
631         /* reset */
632         desc->done = 0;
633         desc->error = 0;
634         desc->k_ = k_;
635         desc->rm_e = e;
636         desc->desc_idx = desc_offset;
637         desc->zc = op->ldpc_enc.z_c;
638         desc->bg_idx = op->ldpc_enc.basegraph - 1;
639         desc->qm_idx = op->ldpc_enc.q_m / 2;
640         desc->crc_en = check_bit(op->ldpc_enc.op_flags,
641                         RTE_BBDEV_LDPC_CRC_24B_ATTACH);
642         desc->irq_en = 0;
643         desc->k0 = get_k0(op->ldpc_enc.n_cb, op->ldpc_enc.z_c,
644                         op->ldpc_enc.basegraph, op->ldpc_enc.rv_index);
645         desc->ncb = op->ldpc_enc.n_cb;
646         desc->num_null = op->ldpc_enc.n_filler;
647         /* Set inbound data buffer address */
648         desc->in_addr_hi = (uint32_t)(
649                         rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
650         desc->in_addr_lw = (uint32_t)(
651                         rte_pktmbuf_mtophys_offset(input, in_offset));
652
653         desc->out_addr_hi = (uint32_t)(
654                         rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
655         desc->out_addr_lw = (uint32_t)(
656                         rte_pktmbuf_mtophys_offset(output, out_offset));
657         /* Save software context needed for dequeue */
658         desc->op_addr = op;
659         /* Set total number of CBs in an op */
660         desc->cbs_in_op = cbs_in_op;
661         return 0;
662 }
663
664 /**
665  * Set DMA descriptor for decode operation (1 Code Block)
666  *
667  * @param op
668  *   Pointer to a single encode operation.
669  * @param desc
670  *   Pointer to DMA descriptor.
671  * @param input
672  *   Pointer to pointer to input data which will be decoded.
673  * @param in_offset
674  *   Input offset in rte_mbuf structure. It is used for calculating the point
675  *   where data is starting.
676  * @param out_offset
677  *   Output offset in rte_mbuf structure. It is used for calculating the point
678  *   where hard output data will be stored.
679  * @param cbs_in_op
680  *   Number of CBs contained in one operation.
681  */
682 static inline int
683 fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
684                 struct fpga_dma_dec_desc *desc,
685                 struct rte_mbuf *input, struct rte_mbuf *output,
686                 uint16_t harq_in_length,
687                 uint32_t in_offset, uint32_t out_offset,
688                 uint32_t harq_offset,
689                 uint16_t desc_offset,
690                 uint8_t cbs_in_op)
691 {
692         /* reset */
693         desc->done = 0;
694         desc->error = 0;
695         /* Set inbound data buffer address */
696         desc->in_addr_hi = (uint32_t)(
697                         rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
698         desc->in_addr_lw = (uint32_t)(
699                         rte_pktmbuf_mtophys_offset(input, in_offset));
700         desc->rm_e = op->ldpc_dec.cb_params.e;
701         desc->harq_input_length = harq_in_length;
702         desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
703                         RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
704         desc->rv = op->ldpc_dec.rv_index;
705         desc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,
706                         RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
707         desc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,
708                         RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);
709         desc->desc_idx = desc_offset;
710         desc->ncb = op->ldpc_dec.n_cb;
711         desc->num_null = op->ldpc_dec.n_filler;
712         desc->hbstroe_offset = harq_offset >> 10;
713         desc->zc = op->ldpc_dec.z_c;
714         desc->harqin_en = check_bit(op->ldpc_dec.op_flags,
715                         RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
716         desc->bg_idx = op->ldpc_dec.basegraph - 1;
717         desc->max_iter = op->ldpc_dec.iter_max;
718         desc->qm_idx = op->ldpc_dec.q_m / 2;
719         desc->out_addr_hi = (uint32_t)(
720                         rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
721         desc->out_addr_lw = (uint32_t)(
722                         rte_pktmbuf_mtophys_offset(output, out_offset));
723         /* Save software context needed for dequeue */
724         desc->op_addr = op;
725         /* Set total number of CBs in an op */
726         desc->cbs_in_op = cbs_in_op;
727
728         return 0;
729 }
730
731 static inline char *
732 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
733 {
734         if (unlikely(len > rte_pktmbuf_tailroom(m)))
735                 return NULL;
736
737         char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
738         m->data_len = (uint16_t)(m->data_len + len);
739         m_head->pkt_len  = (m_head->pkt_len + len);
740         return tail;
741 }
742
743 static inline int
744 enqueue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,
745                 uint16_t desc_offset)
746 {
747         union fpga_dma_desc *desc;
748         int ret;
749         uint8_t c, crc24_bits = 0;
750         struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
751         uint16_t in_offset = enc->input.offset;
752         uint16_t out_offset = enc->output.offset;
753         struct rte_mbuf *m_in = enc->input.data;
754         struct rte_mbuf *m_out = enc->output.data;
755         struct rte_mbuf *m_out_head = enc->output.data;
756         uint32_t in_length, out_length, e;
757         uint16_t total_left = enc->input.length;
758         uint16_t ring_offset;
759         uint16_t K, k_;
760
761         /* Clear op status */
762         op->status = 0;
763
764         if (m_in == NULL || m_out == NULL) {
765                 rte_bbdev_log(ERR, "Invalid mbuf pointer");
766                 op->status = 1 << RTE_BBDEV_DATA_ERROR;
767                 return -EINVAL;
768         }
769
770         if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)
771                 crc24_bits = 24;
772
773         if (enc->code_block_mode == 0) {
774                 /* For Transport Block mode */
775                 /* FIXME */
776                 c = enc->tb_params.c;
777                 e = enc->tb_params.ea;
778         } else { /* For Code Block mode */
779                 c = 1;
780                 e = enc->cb_params.e;
781         }
782
783         /* Update total_left */
784         K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
785         k_ = K - enc->n_filler;
786         in_length = (k_ - crc24_bits) >> 3;
787         out_length = (e + 7) >> 3;
788
789         total_left = rte_pktmbuf_data_len(m_in) - in_offset;
790
791         /* Update offsets */
792         if (total_left != in_length) {
793                 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
794                 rte_bbdev_log(ERR,
795                                 "Mismatch between mbuf length and included CBs sizes %d",
796                                 total_left);
797         }
798
799         mbuf_append(m_out_head, m_out, out_length);
800
801         /* Offset into the ring */
802         ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
803         /* Setup DMA Descriptor */
804         desc = q->ring_addr + ring_offset;
805
806         ret = fpga_dma_desc_te_fill(op, &desc->enc_req, m_in, m_out,
807                         k_, e, in_offset, out_offset, ring_offset, c);
808         if (unlikely(ret < 0))
809                 return ret;
810
811         /* Update lengths */
812         total_left -= in_length;
813         op->ldpc_enc.output.length += out_length;
814
815         if (total_left > 0) {
816                 rte_bbdev_log(ERR,
817                         "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
818                                 total_left, in_length);
819                 return -1;
820         }
821
822         return 1;
823 }
824
825 static inline int
826 enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
827                 uint16_t desc_offset)
828 {
829         union fpga_dma_desc *desc;
830         int ret;
831         uint16_t ring_offset;
832         uint8_t c;
833         uint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;
834         uint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;
835         uint16_t crc24_overlap = 0;
836         struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
837         struct rte_mbuf *m_in = dec->input.data;
838         struct rte_mbuf *m_out = dec->hard_output.data;
839         struct rte_mbuf *m_out_head = dec->hard_output.data;
840         uint16_t in_offset = dec->input.offset;
841         uint16_t out_offset = dec->hard_output.offset;
842         uint32_t harq_offset = 0;
843
844         /* Clear op status */
845         op->status = 0;
846
847         /* Setup DMA Descriptor */
848         ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
849         desc = q->ring_addr + ring_offset;
850
851         if (m_in == NULL || m_out == NULL) {
852                 rte_bbdev_log(ERR, "Invalid mbuf pointer");
853                 op->status = 1 << RTE_BBDEV_DATA_ERROR;
854                 return -1;
855         }
856
857         c = 1;
858         e = dec->cb_params.e;
859
860         if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
861                 crc24_overlap = 24;
862
863         sys_cols = (dec->basegraph == 1) ? 22 : 10;
864         K = sys_cols * dec->z_c;
865         parity_offset = K - 2 * dec->z_c;
866
867         out_length = ((K - crc24_overlap - dec->n_filler) >> 3);
868         in_length = e;
869         seg_total_left = dec->input.length;
870
871         if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
872                 harq_in_length = RTE_MIN(dec->harq_combined_input.length,
873                                 (uint32_t)dec->n_cb);
874         }
875
876         if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
877                 k0 = get_k0(dec->n_cb, dec->z_c,
878                                 dec->basegraph, dec->rv_index);
879                 if (k0 > parity_offset)
880                         l = k0 + e;
881                 else
882                         l = k0 + e + dec->n_filler;
883                 harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l),
884                                 dec->n_cb - dec->n_filler);
885                 dec->harq_combined_output.length = harq_out_length;
886         }
887
888         mbuf_append(m_out_head, m_out, out_length);
889         if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
890                 harq_offset = dec->harq_combined_input.offset;
891         else if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
892                 harq_offset = dec->harq_combined_output.offset;
893
894         if ((harq_offset & 0x3FF) > 0) {
895                 rte_bbdev_log(ERR, "Invalid HARQ offset %d", harq_offset);
896                 op->status = 1 << RTE_BBDEV_DATA_ERROR;
897                 return -1;
898         }
899
900         ret = fpga_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
901                 harq_in_length, in_offset, out_offset, harq_offset,
902                 ring_offset, c);
903         if (unlikely(ret < 0))
904                 return ret;
905         /* Update lengths */
906         seg_total_left -= in_length;
907         op->ldpc_dec.hard_output.length += out_length;
908         if (seg_total_left > 0) {
909                 rte_bbdev_log(ERR,
910                                 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
911                                 seg_total_left, in_length);
912                 return -1;
913         }
914
915         return 1;
916 }
917
918 static uint16_t
919 fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
920                 struct rte_bbdev_enc_op **ops, uint16_t num)
921 {
922         uint16_t i, total_enqueued_cbs = 0;
923         int32_t avail;
924         int enqueued_cbs;
925         struct fpga_queue *q = q_data->queue_private;
926         union fpga_dma_desc *desc;
927
928         /* Check if queue is not full */
929         if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
930                         q->head_free_desc))
931                 return 0;
932
933         /* Calculates available space */
934         avail = (q->head_free_desc > q->tail) ?
935                 q->head_free_desc - q->tail - 1 :
936                 q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
937
938         for (i = 0; i < num; ++i) {
939
940                 /* Check if there is available space for further
941                  * processing
942                  */
943                 if (unlikely(avail - 1 < 0))
944                         break;
945                 avail -= 1;
946                 enqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i],
947                                 total_enqueued_cbs);
948
949                 if (enqueued_cbs < 0)
950                         break;
951
952                 total_enqueued_cbs += enqueued_cbs;
953
954                 rte_bbdev_log_debug("enqueuing enc ops [%d/%d] | head %d | tail %d",
955                                 total_enqueued_cbs, num,
956                                 q->head_free_desc, q->tail);
957         }
958
959         /* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
960          * only when all previous CBs were already processed.
961          */
962         desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
963                         & q->sw_ring_wrap_mask);
964         desc->enc_req.irq_en = q->irq_enable;
965
966         fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
967
968         /* Update stats */
969         q_data->queue_stats.enqueued_count += i;
970         q_data->queue_stats.enqueue_err_count += num - i;
971
972         return i;
973 }
974
975 static uint16_t
976 fpga_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
977                 struct rte_bbdev_dec_op **ops, uint16_t num)
978 {
979         uint16_t i, total_enqueued_cbs = 0;
980         int32_t avail;
981         int enqueued_cbs;
982         struct fpga_queue *q = q_data->queue_private;
983         union fpga_dma_desc *desc;
984
985         /* Check if queue is not full */
986         if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
987                         q->head_free_desc))
988                 return 0;
989
990         /* Calculates available space */
991         avail = (q->head_free_desc > q->tail) ?
992                 q->head_free_desc - q->tail - 1 :
993                 q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
994
995         for (i = 0; i < num; ++i) {
996
997                 /* Check if there is available space for further
998                  * processing
999                  */
1000                 if (unlikely(avail - 1 < 0))
1001                         break;
1002                 avail -= 1;
1003                 enqueued_cbs = enqueue_ldpc_dec_one_op_cb(q, ops[i],
1004                                 total_enqueued_cbs);
1005
1006                 if (enqueued_cbs < 0)
1007                         break;
1008
1009                 total_enqueued_cbs += enqueued_cbs;
1010
1011                 rte_bbdev_log_debug("enqueuing dec ops [%d/%d] | head %d | tail %d",
1012                                 total_enqueued_cbs, num,
1013                                 q->head_free_desc, q->tail);
1014         }
1015
1016         /* Update stats */
1017         q_data->queue_stats.enqueued_count += i;
1018         q_data->queue_stats.enqueue_err_count += num - i;
1019
1020         /* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1021          * only when all previous CBs were already processed.
1022          */
1023         desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1024                         & q->sw_ring_wrap_mask);
1025         desc->enc_req.irq_en = q->irq_enable;
1026         fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1027         return i;
1028 }
1029
1030
1031 static inline int
1032 dequeue_ldpc_enc_one_op_cb(struct fpga_queue *q,
1033                 struct rte_bbdev_enc_op **op,
1034                 uint16_t desc_offset)
1035 {
1036         union fpga_dma_desc *desc;
1037         int desc_error;
1038         /* Set current desc */
1039         desc = q->ring_addr + ((q->head_free_desc + desc_offset)
1040                         & q->sw_ring_wrap_mask);
1041
1042         /*check if done */
1043         if (desc->enc_req.done == 0)
1044                 return -1;
1045
1046         /* make sure the response is read atomically */
1047         rte_smp_rmb();
1048
1049         rte_bbdev_log_debug("DMA response desc %p", desc);
1050
1051         *op = desc->enc_req.op_addr;
1052         /* Check the descriptor error field, return 1 on error */
1053         desc_error = check_desc_error(desc->enc_req.error);
1054         (*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
1055
1056         return 1;
1057 }
1058
1059
1060 static inline int
1061 dequeue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
1062                 uint16_t desc_offset)
1063 {
1064         union fpga_dma_desc *desc;
1065         int desc_error;
1066         /* Set descriptor */
1067         desc = q->ring_addr + ((q->head_free_desc + desc_offset)
1068                         & q->sw_ring_wrap_mask);
1069
1070         /* Verify done bit is set */
1071         if (desc->dec_req.done == 0)
1072                 return -1;
1073
1074         /* make sure the response is read atomically */
1075         rte_smp_rmb();
1076
1077         *op = desc->dec_req.op_addr;
1078
1079         if (check_bit((*op)->ldpc_dec.op_flags,
1080                         RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1081                 (*op)->status = 0;
1082                 return 1;
1083         }
1084
1085         /* FPGA reports iterations based on round-up minus 1 */
1086         (*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;
1087         /* CRC Check criteria */
1088         if (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))
1089                 (*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
1090         /* et_pass = 0 when decoder fails */
1091         (*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;
1092         /* Check the descriptor error field, return 1 on error */
1093         desc_error = check_desc_error(desc->dec_req.error);
1094         (*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
1095         return 1;
1096 }
1097
1098 static uint16_t
1099 fpga_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1100                 struct rte_bbdev_enc_op **ops, uint16_t num)
1101 {
1102         struct fpga_queue *q = q_data->queue_private;
1103         uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
1104         uint16_t i;
1105         uint16_t dequeued_cbs = 0;
1106         int ret;
1107
1108         for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
1109                 ret = dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
1110
1111                 if (ret < 0)
1112                         break;
1113
1114                 dequeued_cbs += ret;
1115
1116                 rte_bbdev_log_debug("dequeuing enc ops [%d/%d] | head %d | tail %d",
1117                                 dequeued_cbs, num, q->head_free_desc, q->tail);
1118         }
1119
1120         /* Update head */
1121         q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
1122                         q->sw_ring_wrap_mask;
1123
1124         /* Update stats */
1125         q_data->queue_stats.dequeued_count += i;
1126
1127         return i;
1128 }
1129
1130 static uint16_t
1131 fpga_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1132                 struct rte_bbdev_dec_op **ops, uint16_t num)
1133 {
1134         struct fpga_queue *q = q_data->queue_private;
1135         uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
1136         uint16_t i;
1137         uint16_t dequeued_cbs = 0;
1138         int ret;
1139
1140         for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
1141                 ret = dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
1142
1143                 if (ret < 0)
1144                         break;
1145
1146                 dequeued_cbs += ret;
1147
1148                 rte_bbdev_log_debug("dequeuing dec ops [%d/%d] | head %d | tail %d",
1149                                 dequeued_cbs, num, q->head_free_desc, q->tail);
1150         }
1151
1152         /* Update head */
1153         q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
1154                         q->sw_ring_wrap_mask;
1155
1156         /* Update stats */
1157         q_data->queue_stats.dequeued_count += i;
1158
1159         return i;
1160 }
1161
1162
1163 /* Initialization Function */
1164 static void
1165 fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
1166 {
1167         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1168
1169         dev->dev_ops = &fpga_ops;
1170         dev->enqueue_ldpc_enc_ops = fpga_enqueue_ldpc_enc;
1171         dev->enqueue_ldpc_dec_ops = fpga_enqueue_ldpc_dec;
1172         dev->dequeue_ldpc_enc_ops = fpga_dequeue_ldpc_enc;
1173         dev->dequeue_ldpc_dec_ops = fpga_dequeue_ldpc_dec;
1174
1175         ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
1176                         !strcmp(drv->driver.name,
1177                                         RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
1178         ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
1179                         pci_dev->mem_resource[0].addr;
1180
1181         rte_bbdev_log_debug(
1182                         "Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
1183                         dev->device->driver->name, dev->data->name,
1184                         (void *)pci_dev->mem_resource[0].addr,
1185                         pci_dev->mem_resource[0].phys_addr);
1186 }
1187
1188 static int
1189 fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
1190         struct rte_pci_device *pci_dev)
1191 {
1192         struct rte_bbdev *bbdev = NULL;
1193         char dev_name[RTE_BBDEV_NAME_MAX_LEN];
1194
1195         if (pci_dev == NULL) {
1196                 rte_bbdev_log(ERR, "NULL PCI device");
1197                 return -EINVAL;
1198         }
1199
1200         rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
1201
1202         /* Allocate memory to be used privately by drivers */
1203         bbdev = rte_bbdev_allocate(pci_dev->device.name);
1204         if (bbdev == NULL)
1205                 return -ENODEV;
1206
1207         /* allocate device private memory */
1208         bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
1209                         sizeof(struct fpga_5gnr_fec_device),
1210                         RTE_CACHE_LINE_SIZE,
1211                         pci_dev->device.numa_node);
1212
1213         if (bbdev->data->dev_private == NULL) {
1214                 rte_bbdev_log(CRIT,
1215                                 "Allocate of %zu bytes for device \"%s\" failed",
1216                                 sizeof(struct fpga_5gnr_fec_device), dev_name);
1217                                 rte_bbdev_release(bbdev);
1218                         return -ENOMEM;
1219         }
1220
1221         /* Fill HW specific part of device structure */
1222         bbdev->device = &pci_dev->device;
1223         bbdev->intr_handle = &pci_dev->intr_handle;
1224         bbdev->data->socket_id = pci_dev->device.numa_node;
1225
1226         /* Invoke FEC FPGA device initialization function */
1227         fpga_5gnr_fec_init(bbdev, pci_drv);
1228
1229         rte_bbdev_log_debug("bbdev id = %u [%s]",
1230                         bbdev->data->dev_id, dev_name);
1231
1232         return 0;
1233 }
1234
1235 static int
1236 fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
1237 {
1238         struct rte_bbdev *bbdev;
1239         int ret;
1240         uint8_t dev_id;
1241
1242         if (pci_dev == NULL)
1243                 return -EINVAL;
1244
1245         /* Find device */
1246         bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
1247         if (bbdev == NULL) {
1248                 rte_bbdev_log(CRIT,
1249                                 "Couldn't find HW dev \"%s\" to uninitialise it",
1250                                 pci_dev->device.name);
1251                 return -ENODEV;
1252         }
1253         dev_id = bbdev->data->dev_id;
1254
1255         /* free device private memory before close */
1256         rte_free(bbdev->data->dev_private);
1257
1258         /* Close device */
1259         ret = rte_bbdev_close(dev_id);
1260         if (ret < 0)
1261                 rte_bbdev_log(ERR,
1262                                 "Device %i failed to close during uninit: %i",
1263                                 dev_id, ret);
1264
1265         /* release bbdev from library */
1266         ret = rte_bbdev_release(bbdev);
1267         if (ret)
1268                 rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id,
1269                                 ret);
1270
1271         rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
1272
1273         return 0;
1274 }
1275
1276 /* FPGA 5GNR FEC PCI PF address map */
1277 static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
1278         {
1279                 RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
1280                                 FPGA_5GNR_FEC_PF_DEVICE_ID)
1281         },
1282         {.device_id = 0},
1283 };
1284
1285 static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
1286         .probe = fpga_5gnr_fec_probe,
1287         .remove = fpga_5gnr_fec_remove,
1288         .id_table = pci_id_fpga_5gnr_fec_pf_map,
1289         .drv_flags = RTE_PCI_DRV_NEED_MAPPING
1290 };
1291
1292 /* FPGA 5GNR FEC PCI VF address map */
1293 static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
1294         {
1295                 RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
1296                                 FPGA_5GNR_FEC_VF_DEVICE_ID)
1297         },
1298         {.device_id = 0},
1299 };
1300
1301 static struct rte_pci_driver fpga_5gnr_fec_pci_vf_driver = {
1302         .probe = fpga_5gnr_fec_probe,
1303         .remove = fpga_5gnr_fec_remove,
1304         .id_table = pci_id_fpga_5gnr_fec_vf_map,
1305         .drv_flags = RTE_PCI_DRV_NEED_MAPPING
1306 };
1307
1308
1309 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_PF_DRIVER_NAME, fpga_5gnr_fec_pci_pf_driver);
1310 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME,
1311                 pci_id_fpga_5gnr_fec_pf_map);
1312 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_VF_DRIVER_NAME, fpga_5gnr_fec_pci_vf_driver);
1313 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME,
1314                 pci_id_fpga_5gnr_fec_vf_map);
1315
1316 RTE_INIT(fpga_5gnr_fec_init_log)
1317 {
1318         fpga_5gnr_fec_logtype = rte_log_register("pmd.bb.fpga_5gnr_fec");
1319         if (fpga_5gnr_fec_logtype >= 0)
1320 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1321                 rte_log_set_level(fpga_5gnr_fec_logtype, RTE_LOG_DEBUG);
1322 #else
1323                 rte_log_set_level(fpga_5gnr_fec_logtype, RTE_LOG_NOTICE);
1324 #endif
1325 }