7e44a19580a32f1bacf51910f03dda32ff406315
[dpdk.git] / drivers / baseband / fpga_5gnr_fec / rte_fpga_5gnr_fec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_log.h>
9 #include <rte_dev.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_byteorder.h>
16 #ifdef RTE_BBDEV_OFFLOAD_COST
17 #include <rte_cycles.h>
18 #endif
19
20 #include <rte_bbdev.h>
21 #include <rte_bbdev_pmd.h>
22
23 #include "fpga_5gnr_fec.h"
24
25 /* 5GNR SW PMD logging ID */
26 static int fpga_5gnr_fec_logtype;
27
28 #ifdef RTE_LIBRTE_BBDEV_DEBUG
29
30 /* Read Ring Control Register of FPGA 5GNR FEC device */
31 static inline void
32 print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
33 {
34         rte_bbdev_log_debug(
35                 "FPGA MMIO base address @ %p | Ring Control Register @ offset = 0x%08"
36                 PRIx32, mmio_base, offset);
37         rte_bbdev_log_debug(
38                 "RING_BASE_ADDR = 0x%016"PRIx64,
39                 fpga_reg_read_64(mmio_base, offset));
40         rte_bbdev_log_debug(
41                 "RING_HEAD_ADDR = 0x%016"PRIx64,
42                 fpga_reg_read_64(mmio_base, offset +
43                                 FPGA_5GNR_FEC_RING_HEAD_ADDR));
44         rte_bbdev_log_debug(
45                 "RING_SIZE = 0x%04"PRIx16,
46                 fpga_reg_read_16(mmio_base, offset +
47                                 FPGA_5GNR_FEC_RING_SIZE));
48         rte_bbdev_log_debug(
49                 "RING_MISC = 0x%02"PRIx8,
50                 fpga_reg_read_8(mmio_base, offset +
51                                 FPGA_5GNR_FEC_RING_MISC));
52         rte_bbdev_log_debug(
53                 "RING_ENABLE = 0x%02"PRIx8,
54                 fpga_reg_read_8(mmio_base, offset +
55                                 FPGA_5GNR_FEC_RING_ENABLE));
56         rte_bbdev_log_debug(
57                 "RING_FLUSH_QUEUE_EN = 0x%02"PRIx8,
58                 fpga_reg_read_8(mmio_base, offset +
59                                 FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN));
60         rte_bbdev_log_debug(
61                 "RING_SHADOW_TAIL = 0x%04"PRIx16,
62                 fpga_reg_read_16(mmio_base, offset +
63                                 FPGA_5GNR_FEC_RING_SHADOW_TAIL));
64         rte_bbdev_log_debug(
65                 "RING_HEAD_POINT = 0x%04"PRIx16,
66                 fpga_reg_read_16(mmio_base, offset +
67                                 FPGA_5GNR_FEC_RING_HEAD_POINT));
68 }
69
70 /* Read Static Register of FPGA 5GNR FEC device */
71 static inline void
72 print_static_reg_debug_info(void *mmio_base)
73 {
74         uint16_t config = fpga_reg_read_16(mmio_base,
75                         FPGA_5GNR_FEC_CONFIGURATION);
76         uint8_t qmap_done = fpga_reg_read_8(mmio_base,
77                         FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
78         uint16_t lb_factor = fpga_reg_read_16(mmio_base,
79                         FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);
80         uint16_t ring_desc_len = fpga_reg_read_16(mmio_base,
81                         FPGA_5GNR_FEC_RING_DESC_LEN);
82         uint16_t flr_time_out = fpga_reg_read_16(mmio_base,
83                         FPGA_5GNR_FEC_FLR_TIME_OUT);
84
85         rte_bbdev_log_debug("UL.DL Weights = %u.%u",
86                         ((uint8_t)config), ((uint8_t)(config >> 8)));
87         rte_bbdev_log_debug("UL.DL Load Balance = %u.%u",
88                         ((uint8_t)lb_factor), ((uint8_t)(lb_factor >> 8)));
89         rte_bbdev_log_debug("Queue-PF/VF Mapping Table = %s",
90                         (qmap_done > 0) ? "READY" : "NOT-READY");
91         rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
92                         ring_desc_len*FPGA_RING_DESC_LEN_UNIT_BYTES);
93         rte_bbdev_log_debug("FLR Timeout = %f usec",
94                         (float)flr_time_out*FPGA_FLR_TIMEOUT_UNIT);
95 }
96
97 /* Print decode DMA Descriptor of FPGA 5GNR Decoder device */
98 static void
99 print_dma_dec_desc_debug_info(union fpga_dma_desc *desc)
100 {
101         rte_bbdev_log_debug("DMA response desc %p\n"
102                 "\t-- done(%"PRIu32") | iter(%"PRIu32") | et_pass(%"PRIu32")"
103                 " | crcb_pass (%"PRIu32") | error(%"PRIu32")\n"
104                 "\t-- qm_idx(%"PRIu32") | max_iter(%"PRIu32") | "
105                 "bg_idx (%"PRIu32") | harqin_en(%"PRIu32") | zc(%"PRIu32")\n"
106                 "\t-- hbstroe_offset(%"PRIu32") | num_null (%"PRIu32") "
107                 "| irq_en(%"PRIu32")\n"
108                 "\t-- ncb(%"PRIu32") | desc_idx (%"PRIu32") | "
109                 "drop_crc24b(%"PRIu32") | RV (%"PRIu32")\n"
110                 "\t-- crc24b_ind(%"PRIu32") | et_dis (%"PRIu32")\n"
111                 "\t-- harq_input_length(%"PRIu32") | rm_e(%"PRIu32")\n"
112                 "\t-- cbs_in_op(%"PRIu32") | in_add (0x%08"PRIx32"%08"PRIx32")"
113                 "| out_add (0x%08"PRIx32"%08"PRIx32")",
114                 desc,
115                 (uint32_t)desc->dec_req.done,
116                 (uint32_t)desc->dec_req.iter,
117                 (uint32_t)desc->dec_req.et_pass,
118                 (uint32_t)desc->dec_req.crcb_pass,
119                 (uint32_t)desc->dec_req.error,
120                 (uint32_t)desc->dec_req.qm_idx,
121                 (uint32_t)desc->dec_req.max_iter,
122                 (uint32_t)desc->dec_req.bg_idx,
123                 (uint32_t)desc->dec_req.harqin_en,
124                 (uint32_t)desc->dec_req.zc,
125                 (uint32_t)desc->dec_req.hbstroe_offset,
126                 (uint32_t)desc->dec_req.num_null,
127                 (uint32_t)desc->dec_req.irq_en,
128                 (uint32_t)desc->dec_req.ncb,
129                 (uint32_t)desc->dec_req.desc_idx,
130                 (uint32_t)desc->dec_req.drop_crc24b,
131                 (uint32_t)desc->dec_req.rv,
132                 (uint32_t)desc->dec_req.crc24b_ind,
133                 (uint32_t)desc->dec_req.et_dis,
134                 (uint32_t)desc->dec_req.harq_input_length,
135                 (uint32_t)desc->dec_req.rm_e,
136                 (uint32_t)desc->dec_req.cbs_in_op,
137                 (uint32_t)desc->dec_req.in_addr_hi,
138                 (uint32_t)desc->dec_req.in_addr_lw,
139                 (uint32_t)desc->dec_req.out_addr_hi,
140                 (uint32_t)desc->dec_req.out_addr_lw);
141         uint32_t *word = (uint32_t *) desc;
142         rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
143                         "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
144                         word[0], word[1], word[2], word[3],
145                         word[4], word[5], word[6], word[7]);
146 }
147
148 /* Print decode DMA Descriptor of FPGA 5GNR encoder device */
149 static void
150 print_dma_enc_desc_debug_info(union fpga_dma_desc *desc)
151 {
152         rte_bbdev_log_debug("DMA response desc %p\n"
153                         "%"PRIu32" %"PRIu32"\n"
154                         "K' %"PRIu32" E %"PRIu32" desc %"PRIu32" Z %"PRIu32"\n"
155                         "BG %"PRIu32" Qm %"PRIu32" CRC %"PRIu32" IRQ %"PRIu32"\n"
156                         "k0 %"PRIu32" Ncb %"PRIu32" F %"PRIu32"\n",
157                         desc,
158                         (uint32_t)desc->enc_req.done,
159                         (uint32_t)desc->enc_req.error,
160
161                         (uint32_t)desc->enc_req.k_,
162                         (uint32_t)desc->enc_req.rm_e,
163                         (uint32_t)desc->enc_req.desc_idx,
164                         (uint32_t)desc->enc_req.zc,
165
166                         (uint32_t)desc->enc_req.bg_idx,
167                         (uint32_t)desc->enc_req.qm_idx,
168                         (uint32_t)desc->enc_req.crc_en,
169                         (uint32_t)desc->enc_req.irq_en,
170
171                         (uint32_t)desc->enc_req.k0,
172                         (uint32_t)desc->enc_req.ncb,
173                         (uint32_t)desc->enc_req.num_null);
174         uint32_t *word = (uint32_t *) desc;
175         rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
176                         "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
177                         word[0], word[1], word[2], word[3],
178                         word[4], word[5], word[6], word[7]);
179 }
180
181 #endif
182
183 static int
184 fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
185 {
186         /* Number of queues bound to a PF/VF */
187         uint32_t hw_q_num = 0;
188         uint32_t ring_size, payload, address, q_id, offset;
189         rte_iova_t phys_addr;
190         struct fpga_ring_ctrl_reg ring_reg;
191         struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
192
193         address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
194         if (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {
195                 rte_bbdev_log(ERR,
196                                 "Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?",
197                                 dev->data->name);
198                 return -EPERM;
199         }
200
201         /* Clear queue registers structure */
202         memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
203
204         /* Scan queue map.
205          * If a queue is valid and mapped to a calling PF/VF the read value is
206          * replaced with a queue ID and if it's not then
207          * FPGA_INVALID_HW_QUEUE_ID is returned.
208          */
209         for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
210                 uint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,
211                                 FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
212
213                 rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
214                                 dev->device->name, q_id, hw_q_id);
215
216                 if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {
217                         fpga_dev->q_bound_bit_map |= (1ULL << q_id);
218                         /* Clear queue register of found queue */
219                         offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
220                                 (sizeof(struct fpga_ring_ctrl_reg) * q_id);
221                         fpga_ring_reg_write(fpga_dev->mmio_base,
222                                         offset, ring_reg);
223                         ++hw_q_num;
224                 }
225         }
226         if (hw_q_num == 0) {
227                 rte_bbdev_log(ERR,
228                         "No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!");
229                 return -ENODEV;
230         }
231
232         if (num_queues > hw_q_num) {
233                 rte_bbdev_log(ERR,
234                         "Not enough queues for device %s! Requested: %u, available: %u",
235                         dev->device->name, num_queues, hw_q_num);
236                 return -EINVAL;
237         }
238
239         ring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
240
241         /* Enforce 32 byte alignment */
242         RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
243
244         /* Allocate memory for SW descriptor rings */
245         fpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
246                         num_queues * ring_size, RTE_CACHE_LINE_SIZE,
247                         socket_id);
248         if (fpga_dev->sw_rings == NULL) {
249                 rte_bbdev_log(ERR,
250                                 "Failed to allocate memory for %s:%u sw_rings",
251                                 dev->device->driver->name, dev->data->dev_id);
252                 return -ENOMEM;
253         }
254
255         fpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);
256         fpga_dev->sw_ring_size = ring_size;
257         fpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;
258
259         /* Allocate memory for ring flush status */
260         fpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,
261                         sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
262         if (fpga_dev->flush_queue_status == NULL) {
263                 rte_bbdev_log(ERR,
264                                 "Failed to allocate memory for %s:%u flush_queue_status",
265                                 dev->device->driver->name, dev->data->dev_id);
266                 return -ENOMEM;
267         }
268
269         /* Set the flush status address registers */
270         phys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);
271
272         address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
273         payload = (uint32_t)(phys_addr);
274         fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
275
276         address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;
277         payload = (uint32_t)(phys_addr >> 32);
278         fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
279
280         return 0;
281 }
282
283 static int
284 fpga_dev_close(struct rte_bbdev *dev)
285 {
286         struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
287
288         rte_free(fpga_dev->sw_rings);
289         rte_free(fpga_dev->flush_queue_status);
290
291         return 0;
292 }
293
294 static void
295 fpga_dev_info_get(struct rte_bbdev *dev,
296                 struct rte_bbdev_driver_info *dev_info)
297 {
298         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
299         uint32_t q_id = 0;
300
301         static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
302                 {
303                         .type   = RTE_BBDEV_OP_LDPC_ENC,
304                         .cap.ldpc_enc = {
305                                 .capability_flags =
306                                                 RTE_BBDEV_LDPC_RATE_MATCH |
307                                                 RTE_BBDEV_LDPC_ENC_INTERRUPTS |
308                                                 RTE_BBDEV_LDPC_CRC_24B_ATTACH,
309                                 .num_buffers_src =
310                                                 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
311                                 .num_buffers_dst =
312                                                 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
313                         }
314                 },
315                 {
316                 .type   = RTE_BBDEV_OP_LDPC_DEC,
317                 .cap.ldpc_dec = {
318                         .capability_flags =
319                                 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
320                                 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
321                                 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
322                                 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
323                                 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
324                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
325                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
326                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,
327                         .llr_size = 6,
328                         .llr_decimals = 2,
329                         .num_buffers_src =
330                                         RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
331                         .num_buffers_hard_out =
332                                         RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
333                         .num_buffers_soft_out = 0,
334                 }
335                 },
336                 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
337         };
338
339         /* Check the HARQ DDR size available */
340         uint8_t timeout_counter = 0;
341         uint32_t harq_buf_ready = fpga_reg_read_32(d->mmio_base,
342                         FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
343         while (harq_buf_ready != 1) {
344                 usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
345                 timeout_counter++;
346                 harq_buf_ready = fpga_reg_read_32(d->mmio_base,
347                                 FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
348                 if (timeout_counter > FPGA_HARQ_RDY_TIMEOUT) {
349                         rte_bbdev_log(ERR, "HARQ Buffer not ready %d",
350                                         harq_buf_ready);
351                         harq_buf_ready = 1;
352                 }
353         }
354         uint32_t harq_buf_size = fpga_reg_read_32(d->mmio_base,
355                         FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
356
357         static struct rte_bbdev_queue_conf default_queue_conf;
358         default_queue_conf.socket = dev->data->socket_id;
359         default_queue_conf.queue_size = FPGA_RING_MAX_SIZE;
360
361         dev_info->driver_name = dev->device->driver->name;
362         dev_info->queue_size_lim = FPGA_RING_MAX_SIZE;
363         dev_info->hardware_accelerated = true;
364         dev_info->min_alignment = 64;
365         dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
366         dev_info->default_queue_conf = default_queue_conf;
367         dev_info->capabilities = bbdev_capabilities;
368         dev_info->cpu_flag_reqs = NULL;
369
370         /* Calculates number of queues assigned to device */
371         dev_info->max_num_queues = 0;
372         for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
373                 uint32_t hw_q_id = fpga_reg_read_32(d->mmio_base,
374                                 FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
375                 if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)
376                         dev_info->max_num_queues++;
377         }
378 }
379
380 /**
381  * Find index of queue bound to current PF/VF which is unassigned. Return -1
382  * when there is no available queue
383  */
384 static inline int
385 fpga_find_free_queue_idx(struct rte_bbdev *dev,
386                 const struct rte_bbdev_queue_conf *conf)
387 {
388         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
389         uint64_t q_idx;
390         uint8_t i = 0;
391         uint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;
392
393         if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
394                 i = FPGA_NUM_DL_QUEUES;
395                 range = FPGA_TOTAL_NUM_QUEUES;
396         }
397
398         for (; i < range; ++i) {
399                 q_idx = 1ULL << i;
400                 /* Check if index of queue is bound to current PF/VF */
401                 if (d->q_bound_bit_map & q_idx)
402                         /* Check if found queue was not already assigned */
403                         if (!(d->q_assigned_bit_map & q_idx)) {
404                                 d->q_assigned_bit_map |= q_idx;
405                                 return i;
406                         }
407         }
408
409         rte_bbdev_log(INFO, "Failed to find free queue on %s", dev->data->name);
410
411         return -1;
412 }
413
414 static int
415 fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
416                 const struct rte_bbdev_queue_conf *conf)
417 {
418         uint32_t address, ring_offset;
419         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
420         struct fpga_queue *q;
421         int8_t q_idx;
422
423         /* Check if there is a free queue to assign */
424         q_idx = fpga_find_free_queue_idx(dev, conf);
425         if (q_idx == -1)
426                 return -1;
427
428         /* Allocate the queue data structure. */
429         q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
430                         RTE_CACHE_LINE_SIZE, conf->socket);
431         if (q == NULL) {
432                 /* Mark queue as un-assigned */
433                 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
434                 rte_bbdev_log(ERR, "Failed to allocate queue memory");
435                 return -ENOMEM;
436         }
437
438         q->d = d;
439         q->q_idx = q_idx;
440
441         /* Set ring_base_addr */
442         q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
443         q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +
444                         (d->sw_ring_size * queue_id);
445
446         /* Allocate memory for Completion Head variable*/
447         q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
448                         sizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);
449         if (q->ring_head_addr == NULL) {
450                 /* Mark queue as un-assigned */
451                 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
452                 rte_free(q);
453                 rte_bbdev_log(ERR,
454                                 "Failed to allocate memory for %s:%u completion_head",
455                                 dev->device->driver->name, dev->data->dev_id);
456                 return -ENOMEM;
457         }
458         /* Set ring_head_addr */
459         q->ring_ctrl_reg.ring_head_addr =
460                         rte_malloc_virt2iova(q->ring_head_addr);
461
462         /* Clear shadow_completion_head */
463         q->shadow_completion_head = 0;
464
465         /* Set ring_size */
466         if (conf->queue_size > FPGA_RING_MAX_SIZE) {
467                 /* Mark queue as un-assigned */
468                 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
469                 rte_free(q->ring_head_addr);
470                 rte_free(q);
471                 rte_bbdev_log(ERR,
472                                 "Size of queue is too big %d (MAX: %d ) for %s:%u",
473                                 conf->queue_size, FPGA_RING_MAX_SIZE,
474                                 dev->device->driver->name, dev->data->dev_id);
475                 return -EINVAL;
476         }
477         q->ring_ctrl_reg.ring_size = conf->queue_size;
478
479         /* Set Miscellaneous FPGA register*/
480         /* Max iteration number for TTI mitigation - todo */
481         q->ring_ctrl_reg.max_ul_dec = 0;
482         /* Enable max iteration number for TTI - todo */
483         q->ring_ctrl_reg.max_ul_dec_en = 0;
484
485         /* Enable the ring */
486         q->ring_ctrl_reg.enable = 1;
487
488         /* Set FPGA head_point and tail registers */
489         q->ring_ctrl_reg.head_point = q->tail = 0;
490
491         /* Set FPGA shadow_tail register */
492         q->ring_ctrl_reg.shadow_tail = q->tail;
493
494         /* Calculates the ring offset for found queue */
495         ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
496                         (sizeof(struct fpga_ring_ctrl_reg) * q_idx);
497
498         /* Set FPGA Ring Control Registers */
499         fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
500
501         /* Store MMIO register of shadow_tail */
502         address = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;
503         q->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);
504
505         q->head_free_desc = q->tail;
506
507         /* Set wrap mask */
508         q->sw_ring_wrap_mask = conf->queue_size - 1;
509
510         rte_bbdev_log_debug("Setup dev%u q%u: queue_idx=%u",
511                         dev->data->dev_id, queue_id, q->q_idx);
512
513         dev->data->queues[queue_id].queue_private = q;
514
515         rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA queue[%d]",
516                         queue_id, q_idx);
517
518 #ifdef RTE_LIBRTE_BBDEV_DEBUG
519         /* Read FPGA Ring Control Registers after configuration*/
520         print_ring_reg_debug_info(d->mmio_base, ring_offset);
521 #endif
522         return 0;
523 }
524
525 static int
526 fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
527 {
528         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
529         struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
530         struct fpga_ring_ctrl_reg ring_reg;
531         uint32_t offset;
532
533         rte_bbdev_log_debug("FPGA Queue[%d] released", queue_id);
534
535         if (q != NULL) {
536                 memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
537                 offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
538                         (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
539                 /* Disable queue */
540                 fpga_reg_write_8(d->mmio_base,
541                                 offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
542                 /* Clear queue registers */
543                 fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
544
545                 /* Mark the Queue as un-assigned */
546                 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));
547                 rte_free(q->ring_head_addr);
548                 rte_free(q);
549                 dev->data->queues[queue_id].queue_private = NULL;
550         }
551
552         return 0;
553 }
554
555 /* Function starts a device queue. */
556 static int
557 fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
558 {
559         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
560 #ifdef RTE_LIBRTE_BBDEV_DEBUG
561         if (d == NULL) {
562                 rte_bbdev_log(ERR, "Invalid device pointer");
563                 return -1;
564         }
565 #endif
566         struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
567         uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
568                         (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
569         uint8_t enable = 0x01;
570         uint16_t zero = 0x0000;
571
572         /* Clear queue head and tail variables */
573         q->tail = q->head_free_desc = 0;
574
575         /* Clear FPGA head_point and tail registers */
576         fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
577                         zero);
578         fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
579                         zero);
580
581         /* Enable queue */
582         fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
583                         enable);
584
585         rte_bbdev_log_debug("FPGA Queue[%d] started", queue_id);
586         return 0;
587 }
588
589 /* Function stops a device queue. */
590 static int
591 fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
592 {
593         struct fpga_5gnr_fec_device *d = dev->data->dev_private;
594 #ifdef RTE_LIBRTE_BBDEV_DEBUG
595         if (d == NULL) {
596                 rte_bbdev_log(ERR, "Invalid device pointer");
597                 return -1;
598         }
599 #endif
600         struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
601         uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
602                         (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
603         uint8_t payload = 0x01;
604         uint8_t counter = 0;
605         uint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /
606                         FPGA_TIMEOUT_CHECK_INTERVAL;
607
608         /* Set flush_queue_en bit to trigger queue flushing */
609         fpga_reg_write_8(d->mmio_base,
610                         offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
611
612         /** Check if queue flush is completed.
613          * FPGA will update the completion flag after queue flushing is
614          * completed. If completion flag is not updated within 1ms it is
615          * considered as a failure.
616          */
617         while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx)
618                         & payload)) {
619                 if (counter > timeout) {
620                         rte_bbdev_log(ERR, "FPGA Queue Flush failed for queue %d",
621                                         queue_id);
622                         return -1;
623                 }
624                 usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
625                 counter++;
626         }
627
628         /* Disable queue */
629         payload = 0x00;
630         fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
631                         payload);
632
633         rte_bbdev_log_debug("FPGA Queue[%d] stopped", queue_id);
634         return 0;
635 }
636
637 static const struct rte_bbdev_ops fpga_ops = {
638         .setup_queues = fpga_setup_queues,
639         .close = fpga_dev_close,
640         .info_get = fpga_dev_info_get,
641         .queue_setup = fpga_queue_setup,
642         .queue_stop = fpga_queue_stop,
643         .queue_start = fpga_queue_start,
644         .queue_release = fpga_queue_release,
645 };
646
647 static inline void
648 fpga_dma_enqueue(struct fpga_queue *q, uint16_t num_desc,
649                 struct rte_bbdev_stats *queue_stats)
650 {
651 #ifdef RTE_BBDEV_OFFLOAD_COST
652         uint64_t start_time = 0;
653         queue_stats->acc_offload_cycles = 0;
654 #else
655         RTE_SET_USED(queue_stats);
656 #endif
657
658         /* Update tail and shadow_tail register */
659         q->tail = (q->tail + num_desc) & q->sw_ring_wrap_mask;
660
661         rte_wmb();
662
663 #ifdef RTE_BBDEV_OFFLOAD_COST
664         /* Start time measurement for enqueue function offload. */
665         start_time = rte_rdtsc_precise();
666 #endif
667         mmio_write_16(q->shadow_tail_addr, q->tail);
668
669 #ifdef RTE_BBDEV_OFFLOAD_COST
670         rte_wmb();
671         queue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
672 #endif
673 }
674
675 /* Read flag value 0/1/ from bitmap */
676 static inline bool
677 check_bit(uint32_t bitmap, uint32_t bitmask)
678 {
679         return bitmap & bitmask;
680 }
681
682 /* Print an error if a descriptor error has occurred.
683  *  Return 0 on success, 1 on failure
684  */
685 static inline int
686 check_desc_error(uint32_t error_code) {
687         switch (error_code) {
688         case DESC_ERR_NO_ERR:
689                 return 0;
690         case DESC_ERR_K_P_OUT_OF_RANGE:
691                 rte_bbdev_log(ERR, "Encode block size K' is out of range");
692                 break;
693         case DESC_ERR_Z_C_NOT_LEGAL:
694                 rte_bbdev_log(ERR, "Zc is illegal");
695                 break;
696         case DESC_ERR_DESC_OFFSET_ERR:
697                 rte_bbdev_log(ERR,
698                                 "Queue offset does not meet the expectation in the FPGA"
699                                 );
700                 break;
701         case DESC_ERR_DESC_READ_FAIL:
702                 rte_bbdev_log(ERR, "Unsuccessful completion for descriptor read");
703                 break;
704         case DESC_ERR_DESC_READ_TIMEOUT:
705                 rte_bbdev_log(ERR, "Descriptor read time-out");
706                 break;
707         case DESC_ERR_DESC_READ_TLP_POISONED:
708                 rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
709                 break;
710         case DESC_ERR_CB_READ_FAIL:
711                 rte_bbdev_log(ERR, "Unsuccessful completion for code block");
712                 break;
713         case DESC_ERR_CB_READ_TIMEOUT:
714                 rte_bbdev_log(ERR, "Code block read time-out");
715                 break;
716         case DESC_ERR_CB_READ_TLP_POISONED:
717                 rte_bbdev_log(ERR, "Code block read TLP poisoned");
718                 break;
719         case DESC_ERR_HBSTORE_ERR:
720                 rte_bbdev_log(ERR, "Hbstroe exceeds HARQ buffer size.");
721                 break;
722         default:
723                 rte_bbdev_log(ERR, "Descriptor error unknown error code %u",
724                                 error_code);
725                 break;
726         }
727         return 1;
728 }
729
730 /* Compute value of k0.
731  * Based on 3GPP 38.212 Table 5.4.2.1-2
732  * Starting position of different redundancy versions, k0
733  */
734 static inline uint16_t
735 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
736 {
737         if (rv_index == 0)
738                 return 0;
739         uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
740         if (n_cb == n) {
741                 if (rv_index == 1)
742                         return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
743                 else if (rv_index == 2)
744                         return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
745                 else
746                         return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
747         }
748         /* LBRM case - includes a division by N */
749         if (rv_index == 1)
750                 return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
751                                 / n) * z_c;
752         else if (rv_index == 2)
753                 return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
754                                 / n) * z_c;
755         else
756                 return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
757                                 / n) * z_c;
758 }
759
760 /**
761  * Set DMA descriptor for encode operation (1 Code Block)
762  *
763  * @param op
764  *   Pointer to a single encode operation.
765  * @param desc
766  *   Pointer to DMA descriptor.
767  * @param input
768  *   Pointer to pointer to input data which will be decoded.
769  * @param e
770  *   E value (length of output in bits).
771  * @param ncb
772  *   Ncb value (size of the soft buffer).
773  * @param out_length
774  *   Length of output buffer
775  * @param in_offset
776  *   Input offset in rte_mbuf structure. It is used for calculating the point
777  *   where data is starting.
778  * @param out_offset
779  *   Output offset in rte_mbuf structure. It is used for calculating the point
780  *   where hard output data will be stored.
781  * @param cbs_in_op
782  *   Number of CBs contained in one operation.
783  */
784 static inline int
785 fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
786                 struct fpga_dma_enc_desc *desc, struct rte_mbuf *input,
787                 struct rte_mbuf *output, uint16_t k_,  uint16_t e,
788                 uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
789                 uint8_t cbs_in_op)
790 {
791         /* reset */
792         desc->done = 0;
793         desc->error = 0;
794         desc->k_ = k_;
795         desc->rm_e = e;
796         desc->desc_idx = desc_offset;
797         desc->zc = op->ldpc_enc.z_c;
798         desc->bg_idx = op->ldpc_enc.basegraph - 1;
799         desc->qm_idx = op->ldpc_enc.q_m / 2;
800         desc->crc_en = check_bit(op->ldpc_enc.op_flags,
801                         RTE_BBDEV_LDPC_CRC_24B_ATTACH);
802         desc->irq_en = 0;
803         desc->k0 = get_k0(op->ldpc_enc.n_cb, op->ldpc_enc.z_c,
804                         op->ldpc_enc.basegraph, op->ldpc_enc.rv_index);
805         desc->ncb = op->ldpc_enc.n_cb;
806         desc->num_null = op->ldpc_enc.n_filler;
807         /* Set inbound data buffer address */
808         desc->in_addr_hi = (uint32_t)(
809                         rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
810         desc->in_addr_lw = (uint32_t)(
811                         rte_pktmbuf_mtophys_offset(input, in_offset));
812
813         desc->out_addr_hi = (uint32_t)(
814                         rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
815         desc->out_addr_lw = (uint32_t)(
816                         rte_pktmbuf_mtophys_offset(output, out_offset));
817         /* Save software context needed for dequeue */
818         desc->op_addr = op;
819         /* Set total number of CBs in an op */
820         desc->cbs_in_op = cbs_in_op;
821         return 0;
822 }
823
824 /**
825  * Set DMA descriptor for decode operation (1 Code Block)
826  *
827  * @param op
828  *   Pointer to a single encode operation.
829  * @param desc
830  *   Pointer to DMA descriptor.
831  * @param input
832  *   Pointer to pointer to input data which will be decoded.
833  * @param in_offset
834  *   Input offset in rte_mbuf structure. It is used for calculating the point
835  *   where data is starting.
836  * @param out_offset
837  *   Output offset in rte_mbuf structure. It is used for calculating the point
838  *   where hard output data will be stored.
839  * @param cbs_in_op
840  *   Number of CBs contained in one operation.
841  */
842 static inline int
843 fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
844                 struct fpga_dma_dec_desc *desc,
845                 struct rte_mbuf *input, struct rte_mbuf *output,
846                 uint16_t harq_in_length,
847                 uint32_t in_offset, uint32_t out_offset,
848                 uint32_t harq_offset,
849                 uint16_t desc_offset,
850                 uint8_t cbs_in_op)
851 {
852         /* reset */
853         desc->done = 0;
854         desc->error = 0;
855         /* Set inbound data buffer address */
856         desc->in_addr_hi = (uint32_t)(
857                         rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
858         desc->in_addr_lw = (uint32_t)(
859                         rte_pktmbuf_mtophys_offset(input, in_offset));
860         desc->rm_e = op->ldpc_dec.cb_params.e;
861         desc->harq_input_length = harq_in_length;
862         desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
863                         RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
864         desc->rv = op->ldpc_dec.rv_index;
865         desc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,
866                         RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
867         desc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,
868                         RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);
869         desc->desc_idx = desc_offset;
870         desc->ncb = op->ldpc_dec.n_cb;
871         desc->num_null = op->ldpc_dec.n_filler;
872         desc->hbstroe_offset = harq_offset >> 10;
873         desc->zc = op->ldpc_dec.z_c;
874         desc->harqin_en = check_bit(op->ldpc_dec.op_flags,
875                         RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
876         desc->bg_idx = op->ldpc_dec.basegraph - 1;
877         desc->max_iter = op->ldpc_dec.iter_max;
878         desc->qm_idx = op->ldpc_dec.q_m / 2;
879         desc->out_addr_hi = (uint32_t)(
880                         rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
881         desc->out_addr_lw = (uint32_t)(
882                         rte_pktmbuf_mtophys_offset(output, out_offset));
883         /* Save software context needed for dequeue */
884         desc->op_addr = op;
885         /* Set total number of CBs in an op */
886         desc->cbs_in_op = cbs_in_op;
887
888         return 0;
889 }
890
891 #ifdef RTE_LIBRTE_BBDEV_DEBUG
892 /* Validates LDPC encoder parameters */
893 static int
894 validate_enc_op(struct rte_bbdev_enc_op *op __rte_unused)
895 {
896         struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
897         struct rte_bbdev_op_enc_ldpc_cb_params *cb = NULL;
898         struct rte_bbdev_op_enc_ldpc_tb_params *tb = NULL;
899
900
901         if (ldpc_enc->input.length >
902                         RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {
903                 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
904                                 ldpc_enc->input.length,
905                                 RTE_BBDEV_LDPC_MAX_CB_SIZE);
906                 return -1;
907         }
908
909         if (op->mempool == NULL) {
910                 rte_bbdev_log(ERR, "Invalid mempool pointer");
911                 return -1;
912         }
913         if (ldpc_enc->input.data == NULL) {
914                 rte_bbdev_log(ERR, "Invalid input pointer");
915                 return -1;
916         }
917         if (ldpc_enc->output.data == NULL) {
918                 rte_bbdev_log(ERR, "Invalid output pointer");
919                 return -1;
920         }
921         if ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {
922                 rte_bbdev_log(ERR,
923                                 "basegraph (%u) is out of range 1 <= value <= 2",
924                                 ldpc_enc->basegraph);
925                 return -1;
926         }
927         if (ldpc_enc->code_block_mode > 1) {
928                 rte_bbdev_log(ERR,
929                                 "code_block_mode (%u) is out of range 0:Tb 1:CB",
930                                 ldpc_enc->code_block_mode);
931                 return -1;
932         }
933
934         if (ldpc_enc->code_block_mode == 0) {
935                 tb = &ldpc_enc->tb_params;
936                 if (tb->c == 0) {
937                         rte_bbdev_log(ERR,
938                                         "c (%u) is out of range 1 <= value <= %u",
939                                         tb->c, RTE_BBDEV_LDPC_MAX_CODE_BLOCKS);
940                         return -1;
941                 }
942                 if (tb->cab > tb->c) {
943                         rte_bbdev_log(ERR,
944                                         "cab (%u) is greater than c (%u)",
945                                         tb->cab, tb->c);
946                         return -1;
947                 }
948                 if ((tb->ea < RTE_BBDEV_LDPC_MIN_CB_SIZE)
949                                 && tb->r < tb->cab) {
950                         rte_bbdev_log(ERR,
951                                         "ea (%u) is less than %u or it is not even",
952                                         tb->ea, RTE_BBDEV_LDPC_MIN_CB_SIZE);
953                         return -1;
954                 }
955                 if ((tb->eb < RTE_BBDEV_LDPC_MIN_CB_SIZE)
956                                 && tb->c > tb->cab) {
957                         rte_bbdev_log(ERR,
958                                         "eb (%u) is less than %u",
959                                         tb->eb, RTE_BBDEV_LDPC_MIN_CB_SIZE);
960                         return -1;
961                 }
962                 if (tb->r > (tb->c - 1)) {
963                         rte_bbdev_log(ERR,
964                                         "r (%u) is greater than c - 1 (%u)",
965                                         tb->r, tb->c - 1);
966                         return -1;
967                 }
968         } else {
969                 cb = &ldpc_enc->cb_params;
970                 if (cb->e < RTE_BBDEV_LDPC_MIN_CB_SIZE) {
971                         rte_bbdev_log(ERR,
972                                         "e (%u) is less than %u or it is not even",
973                                         cb->e, RTE_BBDEV_LDPC_MIN_CB_SIZE);
974                         return -1;
975                 }
976         }
977         return 0;
978 }
979 #endif
980
981 static inline char *
982 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
983 {
984         if (unlikely(len > rte_pktmbuf_tailroom(m)))
985                 return NULL;
986
987         char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
988         m->data_len = (uint16_t)(m->data_len + len);
989         m_head->pkt_len  = (m_head->pkt_len + len);
990         return tail;
991 }
992
993 #ifdef RTE_LIBRTE_BBDEV_DEBUG
994 /* Validates LDPC decoder parameters */
995 static int
996 validate_dec_op(struct rte_bbdev_dec_op *op __rte_unused)
997 {
998         struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
999         struct rte_bbdev_op_dec_ldpc_cb_params *cb = NULL;
1000         struct rte_bbdev_op_dec_ldpc_tb_params *tb = NULL;
1001
1002         if (op->mempool == NULL) {
1003                 rte_bbdev_log(ERR, "Invalid mempool pointer");
1004                 return -1;
1005         }
1006         if (ldpc_dec->rv_index > 3) {
1007                 rte_bbdev_log(ERR,
1008                                 "rv_index (%u) is out of range 0 <= value <= 3",
1009                                 ldpc_dec->rv_index);
1010                 return -1;
1011         }
1012
1013         if (ldpc_dec->iter_max == 0) {
1014                 rte_bbdev_log(ERR,
1015                                 "iter_max (%u) is equal to 0",
1016                                 ldpc_dec->iter_max);
1017                 return -1;
1018         }
1019
1020         if (ldpc_dec->code_block_mode > 1) {
1021                 rte_bbdev_log(ERR,
1022                                 "code_block_mode (%u) is out of range 0 <= value <= 1",
1023                                 ldpc_dec->code_block_mode);
1024                 return -1;
1025         }
1026
1027         if (ldpc_dec->code_block_mode == 0) {
1028                 tb = &ldpc_dec->tb_params;
1029                 if (tb->c < 1) {
1030                         rte_bbdev_log(ERR,
1031                                         "c (%u) is out of range 1 <= value <= %u",
1032                                         tb->c, RTE_BBDEV_LDPC_MAX_CODE_BLOCKS);
1033                         return -1;
1034                 }
1035                 if (tb->cab > tb->c) {
1036                         rte_bbdev_log(ERR,
1037                                         "cab (%u) is greater than c (%u)",
1038                                         tb->cab, tb->c);
1039                         return -1;
1040                 }
1041         } else {
1042                 cb = &ldpc_dec->cb_params;
1043                 if (cb->e < RTE_BBDEV_LDPC_MIN_CB_SIZE) {
1044                         rte_bbdev_log(ERR,
1045                                         "e (%u) is out of range %u <= value <= %u",
1046                                         cb->e, RTE_BBDEV_LDPC_MIN_CB_SIZE,
1047                                         RTE_BBDEV_LDPC_MAX_CB_SIZE);
1048                         return -1;
1049                 }
1050         }
1051
1052         return 0;
1053 }
1054 #endif
1055
1056 static inline int
1057 enqueue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,
1058                 uint16_t desc_offset)
1059 {
1060         union fpga_dma_desc *desc;
1061         int ret;
1062         uint8_t c, crc24_bits = 0;
1063         struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1064         uint16_t in_offset = enc->input.offset;
1065         uint16_t out_offset = enc->output.offset;
1066         struct rte_mbuf *m_in = enc->input.data;
1067         struct rte_mbuf *m_out = enc->output.data;
1068         struct rte_mbuf *m_out_head = enc->output.data;
1069         uint32_t in_length, out_length, e;
1070         uint16_t total_left = enc->input.length;
1071         uint16_t ring_offset;
1072         uint16_t K, k_;
1073
1074 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1075         /* Validate op structure */
1076         /* FIXME */
1077         if (validate_enc_op(op) == -1) {
1078                 rte_bbdev_log(ERR, "LDPC encoder validation failed");
1079                 return -EINVAL;
1080         }
1081 #endif
1082
1083         /* Clear op status */
1084         op->status = 0;
1085
1086         if (m_in == NULL || m_out == NULL) {
1087                 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1088                 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1089                 return -EINVAL;
1090         }
1091
1092         if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)
1093                 crc24_bits = 24;
1094
1095         if (enc->code_block_mode == 0) {
1096                 /* For Transport Block mode */
1097                 /* FIXME */
1098                 c = enc->tb_params.c;
1099                 e = enc->tb_params.ea;
1100         } else { /* For Code Block mode */
1101                 c = 1;
1102                 e = enc->cb_params.e;
1103         }
1104
1105         /* Update total_left */
1106         K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1107         k_ = K - enc->n_filler;
1108         in_length = (k_ - crc24_bits) >> 3;
1109         out_length = (e + 7) >> 3;
1110
1111         total_left = rte_pktmbuf_data_len(m_in) - in_offset;
1112
1113         /* Update offsets */
1114         if (total_left != in_length) {
1115                 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1116                 rte_bbdev_log(ERR,
1117                                 "Mismatch between mbuf length and included CBs sizes %d",
1118                                 total_left);
1119         }
1120
1121         mbuf_append(m_out_head, m_out, out_length);
1122
1123         /* Offset into the ring */
1124         ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
1125         /* Setup DMA Descriptor */
1126         desc = q->ring_addr + ring_offset;
1127
1128         ret = fpga_dma_desc_te_fill(op, &desc->enc_req, m_in, m_out,
1129                         k_, e, in_offset, out_offset, ring_offset, c);
1130         if (unlikely(ret < 0))
1131                 return ret;
1132
1133         /* Update lengths */
1134         total_left -= in_length;
1135         op->ldpc_enc.output.length += out_length;
1136
1137         if (total_left > 0) {
1138                 rte_bbdev_log(ERR,
1139                         "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1140                                 total_left, in_length);
1141                 return -1;
1142         }
1143
1144 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1145         print_dma_enc_desc_debug_info(desc);
1146 #endif
1147         return 1;
1148 }
1149
1150 static inline int
1151 enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
1152                 uint16_t desc_offset)
1153 {
1154         union fpga_dma_desc *desc;
1155         int ret;
1156         uint16_t ring_offset;
1157         uint8_t c;
1158         uint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;
1159         uint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;
1160         uint16_t crc24_overlap = 0;
1161         struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1162         struct rte_mbuf *m_in = dec->input.data;
1163         struct rte_mbuf *m_out = dec->hard_output.data;
1164         struct rte_mbuf *m_out_head = dec->hard_output.data;
1165         uint16_t in_offset = dec->input.offset;
1166         uint16_t out_offset = dec->hard_output.offset;
1167         uint32_t harq_offset = 0;
1168
1169 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1170                 /* Validate op structure */
1171                 if (validate_dec_op(op) == -1) {
1172                         rte_bbdev_log(ERR, "LDPC decoder validation failed");
1173                         return -EINVAL;
1174                 }
1175 #endif
1176
1177         /* Clear op status */
1178         op->status = 0;
1179
1180         /* Setup DMA Descriptor */
1181         ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
1182         desc = q->ring_addr + ring_offset;
1183
1184         if (m_in == NULL || m_out == NULL) {
1185                 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1186                 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1187                 return -1;
1188         }
1189
1190         c = 1;
1191         e = dec->cb_params.e;
1192
1193         if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1194                 crc24_overlap = 24;
1195
1196         sys_cols = (dec->basegraph == 1) ? 22 : 10;
1197         K = sys_cols * dec->z_c;
1198         parity_offset = K - 2 * dec->z_c;
1199
1200         out_length = ((K - crc24_overlap - dec->n_filler) >> 3);
1201         in_length = e;
1202         seg_total_left = dec->input.length;
1203
1204         if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1205                 harq_in_length = RTE_MIN(dec->harq_combined_input.length,
1206                                 (uint32_t)dec->n_cb);
1207         }
1208
1209         if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1210                 k0 = get_k0(dec->n_cb, dec->z_c,
1211                                 dec->basegraph, dec->rv_index);
1212                 if (k0 > parity_offset)
1213                         l = k0 + e;
1214                 else
1215                         l = k0 + e + dec->n_filler;
1216                 harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l),
1217                                 dec->n_cb - dec->n_filler);
1218                 dec->harq_combined_output.length = harq_out_length;
1219         }
1220
1221         mbuf_append(m_out_head, m_out, out_length);
1222         if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
1223                 harq_offset = dec->harq_combined_input.offset;
1224         else if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
1225                 harq_offset = dec->harq_combined_output.offset;
1226
1227         if ((harq_offset & 0x3FF) > 0) {
1228                 rte_bbdev_log(ERR, "Invalid HARQ offset %d", harq_offset);
1229                 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1230                 return -1;
1231         }
1232
1233         ret = fpga_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
1234                 harq_in_length, in_offset, out_offset, harq_offset,
1235                 ring_offset, c);
1236         if (unlikely(ret < 0))
1237                 return ret;
1238         /* Update lengths */
1239         seg_total_left -= in_length;
1240         op->ldpc_dec.hard_output.length += out_length;
1241         if (seg_total_left > 0) {
1242                 rte_bbdev_log(ERR,
1243                                 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1244                                 seg_total_left, in_length);
1245                 return -1;
1246         }
1247
1248 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1249         print_dma_dec_desc_debug_info(desc);
1250 #endif
1251
1252         return 1;
1253 }
1254
1255 static uint16_t
1256 fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1257                 struct rte_bbdev_enc_op **ops, uint16_t num)
1258 {
1259         uint16_t i, total_enqueued_cbs = 0;
1260         int32_t avail;
1261         int enqueued_cbs;
1262         struct fpga_queue *q = q_data->queue_private;
1263         union fpga_dma_desc *desc;
1264
1265         /* Check if queue is not full */
1266         if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
1267                         q->head_free_desc))
1268                 return 0;
1269
1270         /* Calculates available space */
1271         avail = (q->head_free_desc > q->tail) ?
1272                 q->head_free_desc - q->tail - 1 :
1273                 q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
1274
1275         for (i = 0; i < num; ++i) {
1276
1277                 /* Check if there is available space for further
1278                  * processing
1279                  */
1280                 if (unlikely(avail - 1 < 0))
1281                         break;
1282                 avail -= 1;
1283                 enqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i],
1284                                 total_enqueued_cbs);
1285
1286                 if (enqueued_cbs < 0)
1287                         break;
1288
1289                 total_enqueued_cbs += enqueued_cbs;
1290
1291                 rte_bbdev_log_debug("enqueuing enc ops [%d/%d] | head %d | tail %d",
1292                                 total_enqueued_cbs, num,
1293                                 q->head_free_desc, q->tail);
1294         }
1295
1296         /* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1297          * only when all previous CBs were already processed.
1298          */
1299         desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1300                         & q->sw_ring_wrap_mask);
1301         desc->enc_req.irq_en = q->irq_enable;
1302
1303         fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1304
1305         /* Update stats */
1306         q_data->queue_stats.enqueued_count += i;
1307         q_data->queue_stats.enqueue_err_count += num - i;
1308
1309         return i;
1310 }
1311
1312 static uint16_t
1313 fpga_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1314                 struct rte_bbdev_dec_op **ops, uint16_t num)
1315 {
1316         uint16_t i, total_enqueued_cbs = 0;
1317         int32_t avail;
1318         int enqueued_cbs;
1319         struct fpga_queue *q = q_data->queue_private;
1320         union fpga_dma_desc *desc;
1321
1322         /* Check if queue is not full */
1323         if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
1324                         q->head_free_desc))
1325                 return 0;
1326
1327         /* Calculates available space */
1328         avail = (q->head_free_desc > q->tail) ?
1329                 q->head_free_desc - q->tail - 1 :
1330                 q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
1331
1332         for (i = 0; i < num; ++i) {
1333
1334                 /* Check if there is available space for further
1335                  * processing
1336                  */
1337                 if (unlikely(avail - 1 < 0))
1338                         break;
1339                 avail -= 1;
1340                 enqueued_cbs = enqueue_ldpc_dec_one_op_cb(q, ops[i],
1341                                 total_enqueued_cbs);
1342
1343                 if (enqueued_cbs < 0)
1344                         break;
1345
1346                 total_enqueued_cbs += enqueued_cbs;
1347
1348                 rte_bbdev_log_debug("enqueuing dec ops [%d/%d] | head %d | tail %d",
1349                                 total_enqueued_cbs, num,
1350                                 q->head_free_desc, q->tail);
1351         }
1352
1353         /* Update stats */
1354         q_data->queue_stats.enqueued_count += i;
1355         q_data->queue_stats.enqueue_err_count += num - i;
1356
1357         /* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1358          * only when all previous CBs were already processed.
1359          */
1360         desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1361                         & q->sw_ring_wrap_mask);
1362         desc->enc_req.irq_en = q->irq_enable;
1363         fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1364         return i;
1365 }
1366
1367
1368 static inline int
1369 dequeue_ldpc_enc_one_op_cb(struct fpga_queue *q,
1370                 struct rte_bbdev_enc_op **op,
1371                 uint16_t desc_offset)
1372 {
1373         union fpga_dma_desc *desc;
1374         int desc_error;
1375         /* Set current desc */
1376         desc = q->ring_addr + ((q->head_free_desc + desc_offset)
1377                         & q->sw_ring_wrap_mask);
1378
1379         /*check if done */
1380         if (desc->enc_req.done == 0)
1381                 return -1;
1382
1383         /* make sure the response is read atomically */
1384         rte_smp_rmb();
1385
1386         rte_bbdev_log_debug("DMA response desc %p", desc);
1387
1388 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1389         print_dma_enc_desc_debug_info(desc);
1390 #endif
1391
1392         *op = desc->enc_req.op_addr;
1393         /* Check the descriptor error field, return 1 on error */
1394         desc_error = check_desc_error(desc->enc_req.error);
1395         (*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
1396
1397         return 1;
1398 }
1399
1400
1401 static inline int
1402 dequeue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
1403                 uint16_t desc_offset)
1404 {
1405         union fpga_dma_desc *desc;
1406         int desc_error;
1407         /* Set descriptor */
1408         desc = q->ring_addr + ((q->head_free_desc + desc_offset)
1409                         & q->sw_ring_wrap_mask);
1410
1411         /* Verify done bit is set */
1412         if (desc->dec_req.done == 0)
1413                 return -1;
1414
1415         /* make sure the response is read atomically */
1416         rte_smp_rmb();
1417
1418 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1419         print_dma_dec_desc_debug_info(desc);
1420 #endif
1421
1422         *op = desc->dec_req.op_addr;
1423
1424         if (check_bit((*op)->ldpc_dec.op_flags,
1425                         RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1426                 (*op)->status = 0;
1427                 return 1;
1428         }
1429
1430         /* FPGA reports iterations based on round-up minus 1 */
1431         (*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;
1432         /* CRC Check criteria */
1433         if (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))
1434                 (*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
1435         /* et_pass = 0 when decoder fails */
1436         (*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;
1437         /* Check the descriptor error field, return 1 on error */
1438         desc_error = check_desc_error(desc->dec_req.error);
1439         (*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
1440         return 1;
1441 }
1442
1443 static uint16_t
1444 fpga_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1445                 struct rte_bbdev_enc_op **ops, uint16_t num)
1446 {
1447         struct fpga_queue *q = q_data->queue_private;
1448         uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
1449         uint16_t i;
1450         uint16_t dequeued_cbs = 0;
1451         int ret;
1452
1453         for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
1454                 ret = dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
1455
1456                 if (ret < 0)
1457                         break;
1458
1459                 dequeued_cbs += ret;
1460
1461                 rte_bbdev_log_debug("dequeuing enc ops [%d/%d] | head %d | tail %d",
1462                                 dequeued_cbs, num, q->head_free_desc, q->tail);
1463         }
1464
1465         /* Update head */
1466         q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
1467                         q->sw_ring_wrap_mask;
1468
1469         /* Update stats */
1470         q_data->queue_stats.dequeued_count += i;
1471
1472         return i;
1473 }
1474
1475 static uint16_t
1476 fpga_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1477                 struct rte_bbdev_dec_op **ops, uint16_t num)
1478 {
1479         struct fpga_queue *q = q_data->queue_private;
1480         uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
1481         uint16_t i;
1482         uint16_t dequeued_cbs = 0;
1483         int ret;
1484
1485         for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
1486                 ret = dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
1487
1488                 if (ret < 0)
1489                         break;
1490
1491                 dequeued_cbs += ret;
1492
1493                 rte_bbdev_log_debug("dequeuing dec ops [%d/%d] | head %d | tail %d",
1494                                 dequeued_cbs, num, q->head_free_desc, q->tail);
1495         }
1496
1497         /* Update head */
1498         q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
1499                         q->sw_ring_wrap_mask;
1500
1501         /* Update stats */
1502         q_data->queue_stats.dequeued_count += i;
1503
1504         return i;
1505 }
1506
1507
1508 /* Initialization Function */
1509 static void
1510 fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
1511 {
1512         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1513
1514         dev->dev_ops = &fpga_ops;
1515         dev->enqueue_ldpc_enc_ops = fpga_enqueue_ldpc_enc;
1516         dev->enqueue_ldpc_dec_ops = fpga_enqueue_ldpc_dec;
1517         dev->dequeue_ldpc_enc_ops = fpga_dequeue_ldpc_enc;
1518         dev->dequeue_ldpc_dec_ops = fpga_dequeue_ldpc_dec;
1519
1520         ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
1521                         !strcmp(drv->driver.name,
1522                                         RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
1523         ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
1524                         pci_dev->mem_resource[0].addr;
1525
1526         rte_bbdev_log_debug(
1527                         "Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
1528                         dev->device->driver->name, dev->data->name,
1529                         (void *)pci_dev->mem_resource[0].addr,
1530                         pci_dev->mem_resource[0].phys_addr);
1531 }
1532
1533 static int
1534 fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
1535         struct rte_pci_device *pci_dev)
1536 {
1537         struct rte_bbdev *bbdev = NULL;
1538         char dev_name[RTE_BBDEV_NAME_MAX_LEN];
1539
1540         if (pci_dev == NULL) {
1541                 rte_bbdev_log(ERR, "NULL PCI device");
1542                 return -EINVAL;
1543         }
1544
1545         rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
1546
1547         /* Allocate memory to be used privately by drivers */
1548         bbdev = rte_bbdev_allocate(pci_dev->device.name);
1549         if (bbdev == NULL)
1550                 return -ENODEV;
1551
1552         /* allocate device private memory */
1553         bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
1554                         sizeof(struct fpga_5gnr_fec_device),
1555                         RTE_CACHE_LINE_SIZE,
1556                         pci_dev->device.numa_node);
1557
1558         if (bbdev->data->dev_private == NULL) {
1559                 rte_bbdev_log(CRIT,
1560                                 "Allocate of %zu bytes for device \"%s\" failed",
1561                                 sizeof(struct fpga_5gnr_fec_device), dev_name);
1562                                 rte_bbdev_release(bbdev);
1563                         return -ENOMEM;
1564         }
1565
1566         /* Fill HW specific part of device structure */
1567         bbdev->device = &pci_dev->device;
1568         bbdev->intr_handle = &pci_dev->intr_handle;
1569         bbdev->data->socket_id = pci_dev->device.numa_node;
1570
1571         /* Invoke FEC FPGA device initialization function */
1572         fpga_5gnr_fec_init(bbdev, pci_drv);
1573
1574         rte_bbdev_log_debug("bbdev id = %u [%s]",
1575                         bbdev->data->dev_id, dev_name);
1576
1577         struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
1578         uint32_t version_id = fpga_reg_read_32(d->mmio_base,
1579                         FPGA_5GNR_FEC_VERSION_ID);
1580         rte_bbdev_log(INFO, "FEC FPGA RTL v%u.%u",
1581                 ((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
1582
1583 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1584         if (!strcmp(bbdev->device->driver->name,
1585                         RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME)))
1586                 print_static_reg_debug_info(d->mmio_base);
1587 #endif
1588         return 0;
1589 }
1590
1591 static int
1592 fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
1593 {
1594         struct rte_bbdev *bbdev;
1595         int ret;
1596         uint8_t dev_id;
1597
1598         if (pci_dev == NULL)
1599                 return -EINVAL;
1600
1601         /* Find device */
1602         bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
1603         if (bbdev == NULL) {
1604                 rte_bbdev_log(CRIT,
1605                                 "Couldn't find HW dev \"%s\" to uninitialise it",
1606                                 pci_dev->device.name);
1607                 return -ENODEV;
1608         }
1609         dev_id = bbdev->data->dev_id;
1610
1611         /* free device private memory before close */
1612         rte_free(bbdev->data->dev_private);
1613
1614         /* Close device */
1615         ret = rte_bbdev_close(dev_id);
1616         if (ret < 0)
1617                 rte_bbdev_log(ERR,
1618                                 "Device %i failed to close during uninit: %i",
1619                                 dev_id, ret);
1620
1621         /* release bbdev from library */
1622         ret = rte_bbdev_release(bbdev);
1623         if (ret)
1624                 rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id,
1625                                 ret);
1626
1627         rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
1628
1629         return 0;
1630 }
1631
1632 /* FPGA 5GNR FEC PCI PF address map */
1633 static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
1634         {
1635                 RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
1636                                 FPGA_5GNR_FEC_PF_DEVICE_ID)
1637         },
1638         {.device_id = 0},
1639 };
1640
1641 static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
1642         .probe = fpga_5gnr_fec_probe,
1643         .remove = fpga_5gnr_fec_remove,
1644         .id_table = pci_id_fpga_5gnr_fec_pf_map,
1645         .drv_flags = RTE_PCI_DRV_NEED_MAPPING
1646 };
1647
1648 /* FPGA 5GNR FEC PCI VF address map */
1649 static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
1650         {
1651                 RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
1652                                 FPGA_5GNR_FEC_VF_DEVICE_ID)
1653         },
1654         {.device_id = 0},
1655 };
1656
1657 static struct rte_pci_driver fpga_5gnr_fec_pci_vf_driver = {
1658         .probe = fpga_5gnr_fec_probe,
1659         .remove = fpga_5gnr_fec_remove,
1660         .id_table = pci_id_fpga_5gnr_fec_vf_map,
1661         .drv_flags = RTE_PCI_DRV_NEED_MAPPING
1662 };
1663
1664
1665 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_PF_DRIVER_NAME, fpga_5gnr_fec_pci_pf_driver);
1666 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME,
1667                 pci_id_fpga_5gnr_fec_pf_map);
1668 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_VF_DRIVER_NAME, fpga_5gnr_fec_pci_vf_driver);
1669 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME,
1670                 pci_id_fpga_5gnr_fec_vf_map);
1671
1672 RTE_INIT(fpga_5gnr_fec_init_log)
1673 {
1674         fpga_5gnr_fec_logtype = rte_log_register("pmd.bb.fpga_5gnr_fec");
1675         if (fpga_5gnr_fec_logtype >= 0)
1676 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1677                 rte_log_set_level(fpga_5gnr_fec_logtype, RTE_LOG_DEBUG);
1678 #else
1679                 rte_log_set_level(fpga_5gnr_fec_logtype, RTE_LOG_NOTICE);
1680 #endif
1681 }