1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
7 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
14 #include <rte_bus_pci.h>
15 #include <rte_byteorder.h>
16 #ifdef RTE_BBDEV_OFFLOAD_COST
17 #include <rte_cycles.h>
20 #include <rte_bbdev.h>
21 #include <rte_bbdev_pmd.h>
23 #include "fpga_5gnr_fec.h"
25 /* 5GNR SW PMD logging ID */
26 static int fpga_5gnr_fec_logtype;
28 #ifdef RTE_LIBRTE_BBDEV_DEBUG
30 /* Read Ring Control Register of FPGA 5GNR FEC device */
32 print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
35 "FPGA MMIO base address @ %p | Ring Control Register @ offset = 0x%08"
36 PRIx32, mmio_base, offset);
38 "RING_BASE_ADDR = 0x%016"PRIx64,
39 fpga_reg_read_64(mmio_base, offset));
41 "RING_HEAD_ADDR = 0x%016"PRIx64,
42 fpga_reg_read_64(mmio_base, offset +
43 FPGA_5GNR_FEC_RING_HEAD_ADDR));
45 "RING_SIZE = 0x%04"PRIx16,
46 fpga_reg_read_16(mmio_base, offset +
47 FPGA_5GNR_FEC_RING_SIZE));
49 "RING_MISC = 0x%02"PRIx8,
50 fpga_reg_read_8(mmio_base, offset +
51 FPGA_5GNR_FEC_RING_MISC));
53 "RING_ENABLE = 0x%02"PRIx8,
54 fpga_reg_read_8(mmio_base, offset +
55 FPGA_5GNR_FEC_RING_ENABLE));
57 "RING_FLUSH_QUEUE_EN = 0x%02"PRIx8,
58 fpga_reg_read_8(mmio_base, offset +
59 FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN));
61 "RING_SHADOW_TAIL = 0x%04"PRIx16,
62 fpga_reg_read_16(mmio_base, offset +
63 FPGA_5GNR_FEC_RING_SHADOW_TAIL));
65 "RING_HEAD_POINT = 0x%04"PRIx16,
66 fpga_reg_read_16(mmio_base, offset +
67 FPGA_5GNR_FEC_RING_HEAD_POINT));
70 /* Read Static Register of FPGA 5GNR FEC device */
72 print_static_reg_debug_info(void *mmio_base)
74 uint16_t config = fpga_reg_read_16(mmio_base,
75 FPGA_5GNR_FEC_CONFIGURATION);
76 uint8_t qmap_done = fpga_reg_read_8(mmio_base,
77 FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
78 uint16_t lb_factor = fpga_reg_read_16(mmio_base,
79 FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);
80 uint16_t ring_desc_len = fpga_reg_read_16(mmio_base,
81 FPGA_5GNR_FEC_RING_DESC_LEN);
82 uint16_t flr_time_out = fpga_reg_read_16(mmio_base,
83 FPGA_5GNR_FEC_FLR_TIME_OUT);
85 rte_bbdev_log_debug("UL.DL Weights = %u.%u",
86 ((uint8_t)config), ((uint8_t)(config >> 8)));
87 rte_bbdev_log_debug("UL.DL Load Balance = %u.%u",
88 ((uint8_t)lb_factor), ((uint8_t)(lb_factor >> 8)));
89 rte_bbdev_log_debug("Queue-PF/VF Mapping Table = %s",
90 (qmap_done > 0) ? "READY" : "NOT-READY");
91 rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
92 ring_desc_len*FPGA_RING_DESC_LEN_UNIT_BYTES);
93 rte_bbdev_log_debug("FLR Timeout = %f usec",
94 (float)flr_time_out*FPGA_FLR_TIMEOUT_UNIT);
97 /* Print decode DMA Descriptor of FPGA 5GNR Decoder device */
99 print_dma_dec_desc_debug_info(union fpga_dma_desc *desc)
101 rte_bbdev_log_debug("DMA response desc %p\n"
102 "\t-- done(%"PRIu32") | iter(%"PRIu32") | et_pass(%"PRIu32")"
103 " | crcb_pass (%"PRIu32") | error(%"PRIu32")\n"
104 "\t-- qm_idx(%"PRIu32") | max_iter(%"PRIu32") | "
105 "bg_idx (%"PRIu32") | harqin_en(%"PRIu32") | zc(%"PRIu32")\n"
106 "\t-- hbstroe_offset(%"PRIu32") | num_null (%"PRIu32") "
107 "| irq_en(%"PRIu32")\n"
108 "\t-- ncb(%"PRIu32") | desc_idx (%"PRIu32") | "
109 "drop_crc24b(%"PRIu32") | RV (%"PRIu32")\n"
110 "\t-- crc24b_ind(%"PRIu32") | et_dis (%"PRIu32")\n"
111 "\t-- harq_input_length(%"PRIu32") | rm_e(%"PRIu32")\n"
112 "\t-- cbs_in_op(%"PRIu32") | in_add (0x%08"PRIx32"%08"PRIx32")"
113 "| out_add (0x%08"PRIx32"%08"PRIx32")",
115 (uint32_t)desc->dec_req.done,
116 (uint32_t)desc->dec_req.iter,
117 (uint32_t)desc->dec_req.et_pass,
118 (uint32_t)desc->dec_req.crcb_pass,
119 (uint32_t)desc->dec_req.error,
120 (uint32_t)desc->dec_req.qm_idx,
121 (uint32_t)desc->dec_req.max_iter,
122 (uint32_t)desc->dec_req.bg_idx,
123 (uint32_t)desc->dec_req.harqin_en,
124 (uint32_t)desc->dec_req.zc,
125 (uint32_t)desc->dec_req.hbstroe_offset,
126 (uint32_t)desc->dec_req.num_null,
127 (uint32_t)desc->dec_req.irq_en,
128 (uint32_t)desc->dec_req.ncb,
129 (uint32_t)desc->dec_req.desc_idx,
130 (uint32_t)desc->dec_req.drop_crc24b,
131 (uint32_t)desc->dec_req.rv,
132 (uint32_t)desc->dec_req.crc24b_ind,
133 (uint32_t)desc->dec_req.et_dis,
134 (uint32_t)desc->dec_req.harq_input_length,
135 (uint32_t)desc->dec_req.rm_e,
136 (uint32_t)desc->dec_req.cbs_in_op,
137 (uint32_t)desc->dec_req.in_addr_hi,
138 (uint32_t)desc->dec_req.in_addr_lw,
139 (uint32_t)desc->dec_req.out_addr_hi,
140 (uint32_t)desc->dec_req.out_addr_lw);
141 uint32_t *word = (uint32_t *) desc;
142 rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
143 "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
144 word[0], word[1], word[2], word[3],
145 word[4], word[5], word[6], word[7]);
148 /* Print decode DMA Descriptor of FPGA 5GNR encoder device */
150 print_dma_enc_desc_debug_info(union fpga_dma_desc *desc)
152 rte_bbdev_log_debug("DMA response desc %p\n"
153 "%"PRIu32" %"PRIu32"\n"
154 "K' %"PRIu32" E %"PRIu32" desc %"PRIu32" Z %"PRIu32"\n"
155 "BG %"PRIu32" Qm %"PRIu32" CRC %"PRIu32" IRQ %"PRIu32"\n"
156 "k0 %"PRIu32" Ncb %"PRIu32" F %"PRIu32"\n",
158 (uint32_t)desc->enc_req.done,
159 (uint32_t)desc->enc_req.error,
161 (uint32_t)desc->enc_req.k_,
162 (uint32_t)desc->enc_req.rm_e,
163 (uint32_t)desc->enc_req.desc_idx,
164 (uint32_t)desc->enc_req.zc,
166 (uint32_t)desc->enc_req.bg_idx,
167 (uint32_t)desc->enc_req.qm_idx,
168 (uint32_t)desc->enc_req.crc_en,
169 (uint32_t)desc->enc_req.irq_en,
171 (uint32_t)desc->enc_req.k0,
172 (uint32_t)desc->enc_req.ncb,
173 (uint32_t)desc->enc_req.num_null);
174 uint32_t *word = (uint32_t *) desc;
175 rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
176 "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
177 word[0], word[1], word[2], word[3],
178 word[4], word[5], word[6], word[7]);
184 fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
186 /* Number of queues bound to a PF/VF */
187 uint32_t hw_q_num = 0;
188 uint32_t ring_size, payload, address, q_id, offset;
189 rte_iova_t phys_addr;
190 struct fpga_ring_ctrl_reg ring_reg;
191 struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
193 address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
194 if (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {
196 "Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?",
201 /* Clear queue registers structure */
202 memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
205 * If a queue is valid and mapped to a calling PF/VF the read value is
206 * replaced with a queue ID and if it's not then
207 * FPGA_INVALID_HW_QUEUE_ID is returned.
209 for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
210 uint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,
211 FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
213 rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
214 dev->device->name, q_id, hw_q_id);
216 if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {
217 fpga_dev->q_bound_bit_map |= (1ULL << q_id);
218 /* Clear queue register of found queue */
219 offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
220 (sizeof(struct fpga_ring_ctrl_reg) * q_id);
221 fpga_ring_reg_write(fpga_dev->mmio_base,
228 "No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!");
232 if (num_queues > hw_q_num) {
234 "Not enough queues for device %s! Requested: %u, available: %u",
235 dev->device->name, num_queues, hw_q_num);
239 ring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
241 /* Enforce 32 byte alignment */
242 RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
244 /* Allocate memory for SW descriptor rings */
245 fpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
246 num_queues * ring_size, RTE_CACHE_LINE_SIZE,
248 if (fpga_dev->sw_rings == NULL) {
250 "Failed to allocate memory for %s:%u sw_rings",
251 dev->device->driver->name, dev->data->dev_id);
255 fpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);
256 fpga_dev->sw_ring_size = ring_size;
257 fpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;
259 /* Allocate memory for ring flush status */
260 fpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,
261 sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
262 if (fpga_dev->flush_queue_status == NULL) {
264 "Failed to allocate memory for %s:%u flush_queue_status",
265 dev->device->driver->name, dev->data->dev_id);
269 /* Set the flush status address registers */
270 phys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);
272 address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
273 payload = (uint32_t)(phys_addr);
274 fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
276 address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;
277 payload = (uint32_t)(phys_addr >> 32);
278 fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
284 fpga_dev_close(struct rte_bbdev *dev)
286 struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
288 rte_free(fpga_dev->sw_rings);
289 rte_free(fpga_dev->flush_queue_status);
295 fpga_dev_info_get(struct rte_bbdev *dev,
296 struct rte_bbdev_driver_info *dev_info)
298 struct fpga_5gnr_fec_device *d = dev->data->dev_private;
301 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
303 .type = RTE_BBDEV_OP_LDPC_ENC,
306 RTE_BBDEV_LDPC_RATE_MATCH |
307 RTE_BBDEV_LDPC_ENC_INTERRUPTS |
308 RTE_BBDEV_LDPC_CRC_24B_ATTACH,
310 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
312 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
316 .type = RTE_BBDEV_OP_LDPC_DEC,
319 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
320 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
321 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
322 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
323 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
324 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
325 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
326 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,
330 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
331 .num_buffers_hard_out =
332 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
333 .num_buffers_soft_out = 0,
336 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
339 /* Check the HARQ DDR size available */
340 uint8_t timeout_counter = 0;
341 uint32_t harq_buf_ready = fpga_reg_read_32(d->mmio_base,
342 FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
343 while (harq_buf_ready != 1) {
344 usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
346 harq_buf_ready = fpga_reg_read_32(d->mmio_base,
347 FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
348 if (timeout_counter > FPGA_HARQ_RDY_TIMEOUT) {
349 rte_bbdev_log(ERR, "HARQ Buffer not ready %d",
354 uint32_t harq_buf_size = fpga_reg_read_32(d->mmio_base,
355 FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
357 static struct rte_bbdev_queue_conf default_queue_conf;
358 default_queue_conf.socket = dev->data->socket_id;
359 default_queue_conf.queue_size = FPGA_RING_MAX_SIZE;
361 dev_info->driver_name = dev->device->driver->name;
362 dev_info->queue_size_lim = FPGA_RING_MAX_SIZE;
363 dev_info->hardware_accelerated = true;
364 dev_info->min_alignment = 64;
365 dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
366 dev_info->default_queue_conf = default_queue_conf;
367 dev_info->capabilities = bbdev_capabilities;
368 dev_info->cpu_flag_reqs = NULL;
370 /* Calculates number of queues assigned to device */
371 dev_info->max_num_queues = 0;
372 for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
373 uint32_t hw_q_id = fpga_reg_read_32(d->mmio_base,
374 FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
375 if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)
376 dev_info->max_num_queues++;
381 * Find index of queue bound to current PF/VF which is unassigned. Return -1
382 * when there is no available queue
385 fpga_find_free_queue_idx(struct rte_bbdev *dev,
386 const struct rte_bbdev_queue_conf *conf)
388 struct fpga_5gnr_fec_device *d = dev->data->dev_private;
391 uint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;
393 if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
394 i = FPGA_NUM_DL_QUEUES;
395 range = FPGA_TOTAL_NUM_QUEUES;
398 for (; i < range; ++i) {
400 /* Check if index of queue is bound to current PF/VF */
401 if (d->q_bound_bit_map & q_idx)
402 /* Check if found queue was not already assigned */
403 if (!(d->q_assigned_bit_map & q_idx)) {
404 d->q_assigned_bit_map |= q_idx;
409 rte_bbdev_log(INFO, "Failed to find free queue on %s", dev->data->name);
415 fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
416 const struct rte_bbdev_queue_conf *conf)
418 uint32_t address, ring_offset;
419 struct fpga_5gnr_fec_device *d = dev->data->dev_private;
420 struct fpga_queue *q;
423 /* Check if there is a free queue to assign */
424 q_idx = fpga_find_free_queue_idx(dev, conf);
428 /* Allocate the queue data structure. */
429 q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
430 RTE_CACHE_LINE_SIZE, conf->socket);
432 /* Mark queue as un-assigned */
433 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
434 rte_bbdev_log(ERR, "Failed to allocate queue memory");
441 /* Set ring_base_addr */
442 q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
443 q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +
444 (d->sw_ring_size * queue_id);
446 /* Allocate memory for Completion Head variable*/
447 q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
448 sizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);
449 if (q->ring_head_addr == NULL) {
450 /* Mark queue as un-assigned */
451 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
454 "Failed to allocate memory for %s:%u completion_head",
455 dev->device->driver->name, dev->data->dev_id);
458 /* Set ring_head_addr */
459 q->ring_ctrl_reg.ring_head_addr =
460 rte_malloc_virt2iova(q->ring_head_addr);
462 /* Clear shadow_completion_head */
463 q->shadow_completion_head = 0;
466 if (conf->queue_size > FPGA_RING_MAX_SIZE) {
467 /* Mark queue as un-assigned */
468 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
469 rte_free(q->ring_head_addr);
472 "Size of queue is too big %d (MAX: %d ) for %s:%u",
473 conf->queue_size, FPGA_RING_MAX_SIZE,
474 dev->device->driver->name, dev->data->dev_id);
477 q->ring_ctrl_reg.ring_size = conf->queue_size;
479 /* Set Miscellaneous FPGA register*/
480 /* Max iteration number for TTI mitigation - todo */
481 q->ring_ctrl_reg.max_ul_dec = 0;
482 /* Enable max iteration number for TTI - todo */
483 q->ring_ctrl_reg.max_ul_dec_en = 0;
485 /* Enable the ring */
486 q->ring_ctrl_reg.enable = 1;
488 /* Set FPGA head_point and tail registers */
489 q->ring_ctrl_reg.head_point = q->tail = 0;
491 /* Set FPGA shadow_tail register */
492 q->ring_ctrl_reg.shadow_tail = q->tail;
494 /* Calculates the ring offset for found queue */
495 ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
496 (sizeof(struct fpga_ring_ctrl_reg) * q_idx);
498 /* Set FPGA Ring Control Registers */
499 fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
501 /* Store MMIO register of shadow_tail */
502 address = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;
503 q->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);
505 q->head_free_desc = q->tail;
508 q->sw_ring_wrap_mask = conf->queue_size - 1;
510 rte_bbdev_log_debug("Setup dev%u q%u: queue_idx=%u",
511 dev->data->dev_id, queue_id, q->q_idx);
513 dev->data->queues[queue_id].queue_private = q;
515 rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA queue[%d]",
518 #ifdef RTE_LIBRTE_BBDEV_DEBUG
519 /* Read FPGA Ring Control Registers after configuration*/
520 print_ring_reg_debug_info(d->mmio_base, ring_offset);
526 fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
528 struct fpga_5gnr_fec_device *d = dev->data->dev_private;
529 struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
530 struct fpga_ring_ctrl_reg ring_reg;
533 rte_bbdev_log_debug("FPGA Queue[%d] released", queue_id);
536 memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
537 offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
538 (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
540 fpga_reg_write_8(d->mmio_base,
541 offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
542 /* Clear queue registers */
543 fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
545 /* Mark the Queue as un-assigned */
546 d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));
547 rte_free(q->ring_head_addr);
549 dev->data->queues[queue_id].queue_private = NULL;
555 /* Function starts a device queue. */
557 fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
559 struct fpga_5gnr_fec_device *d = dev->data->dev_private;
560 #ifdef RTE_LIBRTE_BBDEV_DEBUG
562 rte_bbdev_log(ERR, "Invalid device pointer");
566 struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
567 uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
568 (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
569 uint8_t enable = 0x01;
570 uint16_t zero = 0x0000;
572 /* Clear queue head and tail variables */
573 q->tail = q->head_free_desc = 0;
575 /* Clear FPGA head_point and tail registers */
576 fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
578 fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
582 fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
585 rte_bbdev_log_debug("FPGA Queue[%d] started", queue_id);
589 /* Function stops a device queue. */
591 fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
593 struct fpga_5gnr_fec_device *d = dev->data->dev_private;
594 #ifdef RTE_LIBRTE_BBDEV_DEBUG
596 rte_bbdev_log(ERR, "Invalid device pointer");
600 struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
601 uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
602 (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
603 uint8_t payload = 0x01;
605 uint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /
606 FPGA_TIMEOUT_CHECK_INTERVAL;
608 /* Set flush_queue_en bit to trigger queue flushing */
609 fpga_reg_write_8(d->mmio_base,
610 offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
612 /** Check if queue flush is completed.
613 * FPGA will update the completion flag after queue flushing is
614 * completed. If completion flag is not updated within 1ms it is
615 * considered as a failure.
617 while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx)
619 if (counter > timeout) {
620 rte_bbdev_log(ERR, "FPGA Queue Flush failed for queue %d",
624 usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
630 fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
633 rte_bbdev_log_debug("FPGA Queue[%d] stopped", queue_id);
637 static const struct rte_bbdev_ops fpga_ops = {
638 .setup_queues = fpga_setup_queues,
639 .close = fpga_dev_close,
640 .info_get = fpga_dev_info_get,
641 .queue_setup = fpga_queue_setup,
642 .queue_stop = fpga_queue_stop,
643 .queue_start = fpga_queue_start,
644 .queue_release = fpga_queue_release,
648 fpga_dma_enqueue(struct fpga_queue *q, uint16_t num_desc,
649 struct rte_bbdev_stats *queue_stats)
651 #ifdef RTE_BBDEV_OFFLOAD_COST
652 uint64_t start_time = 0;
653 queue_stats->acc_offload_cycles = 0;
655 RTE_SET_USED(queue_stats);
658 /* Update tail and shadow_tail register */
659 q->tail = (q->tail + num_desc) & q->sw_ring_wrap_mask;
663 #ifdef RTE_BBDEV_OFFLOAD_COST
664 /* Start time measurement for enqueue function offload. */
665 start_time = rte_rdtsc_precise();
667 mmio_write_16(q->shadow_tail_addr, q->tail);
669 #ifdef RTE_BBDEV_OFFLOAD_COST
671 queue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
675 /* Read flag value 0/1/ from bitmap */
677 check_bit(uint32_t bitmap, uint32_t bitmask)
679 return bitmap & bitmask;
682 /* Print an error if a descriptor error has occurred.
683 * Return 0 on success, 1 on failure
686 check_desc_error(uint32_t error_code) {
687 switch (error_code) {
688 case DESC_ERR_NO_ERR:
690 case DESC_ERR_K_P_OUT_OF_RANGE:
691 rte_bbdev_log(ERR, "Encode block size K' is out of range");
693 case DESC_ERR_Z_C_NOT_LEGAL:
694 rte_bbdev_log(ERR, "Zc is illegal");
696 case DESC_ERR_DESC_OFFSET_ERR:
698 "Queue offset does not meet the expectation in the FPGA"
701 case DESC_ERR_DESC_READ_FAIL:
702 rte_bbdev_log(ERR, "Unsuccessful completion for descriptor read");
704 case DESC_ERR_DESC_READ_TIMEOUT:
705 rte_bbdev_log(ERR, "Descriptor read time-out");
707 case DESC_ERR_DESC_READ_TLP_POISONED:
708 rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
710 case DESC_ERR_CB_READ_FAIL:
711 rte_bbdev_log(ERR, "Unsuccessful completion for code block");
713 case DESC_ERR_CB_READ_TIMEOUT:
714 rte_bbdev_log(ERR, "Code block read time-out");
716 case DESC_ERR_CB_READ_TLP_POISONED:
717 rte_bbdev_log(ERR, "Code block read TLP poisoned");
719 case DESC_ERR_HBSTORE_ERR:
720 rte_bbdev_log(ERR, "Hbstroe exceeds HARQ buffer size.");
723 rte_bbdev_log(ERR, "Descriptor error unknown error code %u",
730 /* Compute value of k0.
731 * Based on 3GPP 38.212 Table 5.4.2.1-2
732 * Starting position of different redundancy versions, k0
734 static inline uint16_t
735 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
739 uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
742 return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
743 else if (rv_index == 2)
744 return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
746 return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
748 /* LBRM case - includes a division by N */
750 return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
752 else if (rv_index == 2)
753 return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
756 return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
761 * Set DMA descriptor for encode operation (1 Code Block)
764 * Pointer to a single encode operation.
766 * Pointer to DMA descriptor.
768 * Pointer to pointer to input data which will be decoded.
770 * E value (length of output in bits).
772 * Ncb value (size of the soft buffer).
774 * Length of output buffer
776 * Input offset in rte_mbuf structure. It is used for calculating the point
777 * where data is starting.
779 * Output offset in rte_mbuf structure. It is used for calculating the point
780 * where hard output data will be stored.
782 * Number of CBs contained in one operation.
785 fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
786 struct fpga_dma_enc_desc *desc, struct rte_mbuf *input,
787 struct rte_mbuf *output, uint16_t k_, uint16_t e,
788 uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
796 desc->desc_idx = desc_offset;
797 desc->zc = op->ldpc_enc.z_c;
798 desc->bg_idx = op->ldpc_enc.basegraph - 1;
799 desc->qm_idx = op->ldpc_enc.q_m / 2;
800 desc->crc_en = check_bit(op->ldpc_enc.op_flags,
801 RTE_BBDEV_LDPC_CRC_24B_ATTACH);
803 desc->k0 = get_k0(op->ldpc_enc.n_cb, op->ldpc_enc.z_c,
804 op->ldpc_enc.basegraph, op->ldpc_enc.rv_index);
805 desc->ncb = op->ldpc_enc.n_cb;
806 desc->num_null = op->ldpc_enc.n_filler;
807 /* Set inbound data buffer address */
808 desc->in_addr_hi = (uint32_t)(
809 rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
810 desc->in_addr_lw = (uint32_t)(
811 rte_pktmbuf_mtophys_offset(input, in_offset));
813 desc->out_addr_hi = (uint32_t)(
814 rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
815 desc->out_addr_lw = (uint32_t)(
816 rte_pktmbuf_mtophys_offset(output, out_offset));
817 /* Save software context needed for dequeue */
819 /* Set total number of CBs in an op */
820 desc->cbs_in_op = cbs_in_op;
825 * Set DMA descriptor for decode operation (1 Code Block)
828 * Pointer to a single encode operation.
830 * Pointer to DMA descriptor.
832 * Pointer to pointer to input data which will be decoded.
834 * Input offset in rte_mbuf structure. It is used for calculating the point
835 * where data is starting.
837 * Output offset in rte_mbuf structure. It is used for calculating the point
838 * where hard output data will be stored.
840 * Number of CBs contained in one operation.
843 fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
844 struct fpga_dma_dec_desc *desc,
845 struct rte_mbuf *input, struct rte_mbuf *output,
846 uint16_t harq_in_length,
847 uint32_t in_offset, uint32_t out_offset,
848 uint32_t harq_offset,
849 uint16_t desc_offset,
855 /* Set inbound data buffer address */
856 desc->in_addr_hi = (uint32_t)(
857 rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
858 desc->in_addr_lw = (uint32_t)(
859 rte_pktmbuf_mtophys_offset(input, in_offset));
860 desc->rm_e = op->ldpc_dec.cb_params.e;
861 desc->harq_input_length = harq_in_length;
862 desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
863 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
864 desc->rv = op->ldpc_dec.rv_index;
865 desc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,
866 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
867 desc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,
868 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);
869 desc->desc_idx = desc_offset;
870 desc->ncb = op->ldpc_dec.n_cb;
871 desc->num_null = op->ldpc_dec.n_filler;
872 desc->hbstroe_offset = harq_offset >> 10;
873 desc->zc = op->ldpc_dec.z_c;
874 desc->harqin_en = check_bit(op->ldpc_dec.op_flags,
875 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
876 desc->bg_idx = op->ldpc_dec.basegraph - 1;
877 desc->max_iter = op->ldpc_dec.iter_max;
878 desc->qm_idx = op->ldpc_dec.q_m / 2;
879 desc->out_addr_hi = (uint32_t)(
880 rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
881 desc->out_addr_lw = (uint32_t)(
882 rte_pktmbuf_mtophys_offset(output, out_offset));
883 /* Save software context needed for dequeue */
885 /* Set total number of CBs in an op */
886 desc->cbs_in_op = cbs_in_op;
891 #ifdef RTE_LIBRTE_BBDEV_DEBUG
892 /* Validates LDPC encoder parameters */
894 validate_enc_op(struct rte_bbdev_enc_op *op __rte_unused)
896 struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
897 struct rte_bbdev_op_enc_ldpc_cb_params *cb = NULL;
898 struct rte_bbdev_op_enc_ldpc_tb_params *tb = NULL;
901 if (ldpc_enc->input.length >
902 RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {
903 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
904 ldpc_enc->input.length,
905 RTE_BBDEV_LDPC_MAX_CB_SIZE);
909 if (op->mempool == NULL) {
910 rte_bbdev_log(ERR, "Invalid mempool pointer");
913 if (ldpc_enc->input.data == NULL) {
914 rte_bbdev_log(ERR, "Invalid input pointer");
917 if (ldpc_enc->output.data == NULL) {
918 rte_bbdev_log(ERR, "Invalid output pointer");
921 if ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {
923 "basegraph (%u) is out of range 1 <= value <= 2",
924 ldpc_enc->basegraph);
927 if (ldpc_enc->code_block_mode > 1) {
929 "code_block_mode (%u) is out of range 0:Tb 1:CB",
930 ldpc_enc->code_block_mode);
934 if (ldpc_enc->code_block_mode == 0) {
935 tb = &ldpc_enc->tb_params;
938 "c (%u) is out of range 1 <= value <= %u",
939 tb->c, RTE_BBDEV_LDPC_MAX_CODE_BLOCKS);
942 if (tb->cab > tb->c) {
944 "cab (%u) is greater than c (%u)",
948 if ((tb->ea < RTE_BBDEV_LDPC_MIN_CB_SIZE)
949 && tb->r < tb->cab) {
951 "ea (%u) is less than %u or it is not even",
952 tb->ea, RTE_BBDEV_LDPC_MIN_CB_SIZE);
955 if ((tb->eb < RTE_BBDEV_LDPC_MIN_CB_SIZE)
956 && tb->c > tb->cab) {
958 "eb (%u) is less than %u",
959 tb->eb, RTE_BBDEV_LDPC_MIN_CB_SIZE);
962 if (tb->r > (tb->c - 1)) {
964 "r (%u) is greater than c - 1 (%u)",
969 cb = &ldpc_enc->cb_params;
970 if (cb->e < RTE_BBDEV_LDPC_MIN_CB_SIZE) {
972 "e (%u) is less than %u or it is not even",
973 cb->e, RTE_BBDEV_LDPC_MIN_CB_SIZE);
982 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
984 if (unlikely(len > rte_pktmbuf_tailroom(m)))
987 char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
988 m->data_len = (uint16_t)(m->data_len + len);
989 m_head->pkt_len = (m_head->pkt_len + len);
993 #ifdef RTE_LIBRTE_BBDEV_DEBUG
994 /* Validates LDPC decoder parameters */
996 validate_dec_op(struct rte_bbdev_dec_op *op __rte_unused)
998 struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
999 struct rte_bbdev_op_dec_ldpc_cb_params *cb = NULL;
1000 struct rte_bbdev_op_dec_ldpc_tb_params *tb = NULL;
1002 if (op->mempool == NULL) {
1003 rte_bbdev_log(ERR, "Invalid mempool pointer");
1006 if (ldpc_dec->rv_index > 3) {
1008 "rv_index (%u) is out of range 0 <= value <= 3",
1009 ldpc_dec->rv_index);
1013 if (ldpc_dec->iter_max == 0) {
1015 "iter_max (%u) is equal to 0",
1016 ldpc_dec->iter_max);
1020 if (ldpc_dec->code_block_mode > 1) {
1022 "code_block_mode (%u) is out of range 0 <= value <= 1",
1023 ldpc_dec->code_block_mode);
1027 if (ldpc_dec->code_block_mode == 0) {
1028 tb = &ldpc_dec->tb_params;
1031 "c (%u) is out of range 1 <= value <= %u",
1032 tb->c, RTE_BBDEV_LDPC_MAX_CODE_BLOCKS);
1035 if (tb->cab > tb->c) {
1037 "cab (%u) is greater than c (%u)",
1042 cb = &ldpc_dec->cb_params;
1043 if (cb->e < RTE_BBDEV_LDPC_MIN_CB_SIZE) {
1045 "e (%u) is out of range %u <= value <= %u",
1046 cb->e, RTE_BBDEV_LDPC_MIN_CB_SIZE,
1047 RTE_BBDEV_LDPC_MAX_CB_SIZE);
1057 enqueue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,
1058 uint16_t desc_offset)
1060 union fpga_dma_desc *desc;
1062 uint8_t c, crc24_bits = 0;
1063 struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1064 uint16_t in_offset = enc->input.offset;
1065 uint16_t out_offset = enc->output.offset;
1066 struct rte_mbuf *m_in = enc->input.data;
1067 struct rte_mbuf *m_out = enc->output.data;
1068 struct rte_mbuf *m_out_head = enc->output.data;
1069 uint32_t in_length, out_length, e;
1070 uint16_t total_left = enc->input.length;
1071 uint16_t ring_offset;
1074 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1075 /* Validate op structure */
1077 if (validate_enc_op(op) == -1) {
1078 rte_bbdev_log(ERR, "LDPC encoder validation failed");
1083 /* Clear op status */
1086 if (m_in == NULL || m_out == NULL) {
1087 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1088 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1092 if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)
1095 if (enc->code_block_mode == 0) {
1096 /* For Transport Block mode */
1098 c = enc->tb_params.c;
1099 e = enc->tb_params.ea;
1100 } else { /* For Code Block mode */
1102 e = enc->cb_params.e;
1105 /* Update total_left */
1106 K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1107 k_ = K - enc->n_filler;
1108 in_length = (k_ - crc24_bits) >> 3;
1109 out_length = (e + 7) >> 3;
1111 total_left = rte_pktmbuf_data_len(m_in) - in_offset;
1113 /* Update offsets */
1114 if (total_left != in_length) {
1115 op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1117 "Mismatch between mbuf length and included CBs sizes %d",
1121 mbuf_append(m_out_head, m_out, out_length);
1123 /* Offset into the ring */
1124 ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
1125 /* Setup DMA Descriptor */
1126 desc = q->ring_addr + ring_offset;
1128 ret = fpga_dma_desc_te_fill(op, &desc->enc_req, m_in, m_out,
1129 k_, e, in_offset, out_offset, ring_offset, c);
1130 if (unlikely(ret < 0))
1133 /* Update lengths */
1134 total_left -= in_length;
1135 op->ldpc_enc.output.length += out_length;
1137 if (total_left > 0) {
1139 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1140 total_left, in_length);
1144 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1145 print_dma_enc_desc_debug_info(desc);
1151 enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
1152 uint16_t desc_offset)
1154 union fpga_dma_desc *desc;
1156 uint16_t ring_offset;
1158 uint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;
1159 uint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;
1160 uint16_t crc24_overlap = 0;
1161 struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1162 struct rte_mbuf *m_in = dec->input.data;
1163 struct rte_mbuf *m_out = dec->hard_output.data;
1164 struct rte_mbuf *m_out_head = dec->hard_output.data;
1165 uint16_t in_offset = dec->input.offset;
1166 uint16_t out_offset = dec->hard_output.offset;
1167 uint32_t harq_offset = 0;
1169 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1170 /* Validate op structure */
1171 if (validate_dec_op(op) == -1) {
1172 rte_bbdev_log(ERR, "LDPC decoder validation failed");
1177 /* Clear op status */
1180 /* Setup DMA Descriptor */
1181 ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
1182 desc = q->ring_addr + ring_offset;
1184 if (m_in == NULL || m_out == NULL) {
1185 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1186 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1191 e = dec->cb_params.e;
1193 if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1196 sys_cols = (dec->basegraph == 1) ? 22 : 10;
1197 K = sys_cols * dec->z_c;
1198 parity_offset = K - 2 * dec->z_c;
1200 out_length = ((K - crc24_overlap - dec->n_filler) >> 3);
1202 seg_total_left = dec->input.length;
1204 if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1205 harq_in_length = RTE_MIN(dec->harq_combined_input.length,
1206 (uint32_t)dec->n_cb);
1209 if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1210 k0 = get_k0(dec->n_cb, dec->z_c,
1211 dec->basegraph, dec->rv_index);
1212 if (k0 > parity_offset)
1215 l = k0 + e + dec->n_filler;
1216 harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l),
1217 dec->n_cb - dec->n_filler);
1218 dec->harq_combined_output.length = harq_out_length;
1221 mbuf_append(m_out_head, m_out, out_length);
1222 if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
1223 harq_offset = dec->harq_combined_input.offset;
1224 else if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
1225 harq_offset = dec->harq_combined_output.offset;
1227 if ((harq_offset & 0x3FF) > 0) {
1228 rte_bbdev_log(ERR, "Invalid HARQ offset %d", harq_offset);
1229 op->status = 1 << RTE_BBDEV_DATA_ERROR;
1233 ret = fpga_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
1234 harq_in_length, in_offset, out_offset, harq_offset,
1236 if (unlikely(ret < 0))
1238 /* Update lengths */
1239 seg_total_left -= in_length;
1240 op->ldpc_dec.hard_output.length += out_length;
1241 if (seg_total_left > 0) {
1243 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1244 seg_total_left, in_length);
1248 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1249 print_dma_dec_desc_debug_info(desc);
1256 fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1257 struct rte_bbdev_enc_op **ops, uint16_t num)
1259 uint16_t i, total_enqueued_cbs = 0;
1262 struct fpga_queue *q = q_data->queue_private;
1263 union fpga_dma_desc *desc;
1265 /* Check if queue is not full */
1266 if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
1270 /* Calculates available space */
1271 avail = (q->head_free_desc > q->tail) ?
1272 q->head_free_desc - q->tail - 1 :
1273 q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
1275 for (i = 0; i < num; ++i) {
1277 /* Check if there is available space for further
1280 if (unlikely(avail - 1 < 0))
1283 enqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i],
1284 total_enqueued_cbs);
1286 if (enqueued_cbs < 0)
1289 total_enqueued_cbs += enqueued_cbs;
1291 rte_bbdev_log_debug("enqueuing enc ops [%d/%d] | head %d | tail %d",
1292 total_enqueued_cbs, num,
1293 q->head_free_desc, q->tail);
1296 /* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1297 * only when all previous CBs were already processed.
1299 desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1300 & q->sw_ring_wrap_mask);
1301 desc->enc_req.irq_en = q->irq_enable;
1303 fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1306 q_data->queue_stats.enqueued_count += i;
1307 q_data->queue_stats.enqueue_err_count += num - i;
1313 fpga_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1314 struct rte_bbdev_dec_op **ops, uint16_t num)
1316 uint16_t i, total_enqueued_cbs = 0;
1319 struct fpga_queue *q = q_data->queue_private;
1320 union fpga_dma_desc *desc;
1322 /* Check if queue is not full */
1323 if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
1327 /* Calculates available space */
1328 avail = (q->head_free_desc > q->tail) ?
1329 q->head_free_desc - q->tail - 1 :
1330 q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
1332 for (i = 0; i < num; ++i) {
1334 /* Check if there is available space for further
1337 if (unlikely(avail - 1 < 0))
1340 enqueued_cbs = enqueue_ldpc_dec_one_op_cb(q, ops[i],
1341 total_enqueued_cbs);
1343 if (enqueued_cbs < 0)
1346 total_enqueued_cbs += enqueued_cbs;
1348 rte_bbdev_log_debug("enqueuing dec ops [%d/%d] | head %d | tail %d",
1349 total_enqueued_cbs, num,
1350 q->head_free_desc, q->tail);
1354 q_data->queue_stats.enqueued_count += i;
1355 q_data->queue_stats.enqueue_err_count += num - i;
1357 /* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1358 * only when all previous CBs were already processed.
1360 desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1361 & q->sw_ring_wrap_mask);
1362 desc->enc_req.irq_en = q->irq_enable;
1363 fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1369 dequeue_ldpc_enc_one_op_cb(struct fpga_queue *q,
1370 struct rte_bbdev_enc_op **op,
1371 uint16_t desc_offset)
1373 union fpga_dma_desc *desc;
1375 /* Set current desc */
1376 desc = q->ring_addr + ((q->head_free_desc + desc_offset)
1377 & q->sw_ring_wrap_mask);
1380 if (desc->enc_req.done == 0)
1383 /* make sure the response is read atomically */
1386 rte_bbdev_log_debug("DMA response desc %p", desc);
1388 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1389 print_dma_enc_desc_debug_info(desc);
1392 *op = desc->enc_req.op_addr;
1393 /* Check the descriptor error field, return 1 on error */
1394 desc_error = check_desc_error(desc->enc_req.error);
1395 (*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
1402 dequeue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
1403 uint16_t desc_offset)
1405 union fpga_dma_desc *desc;
1407 /* Set descriptor */
1408 desc = q->ring_addr + ((q->head_free_desc + desc_offset)
1409 & q->sw_ring_wrap_mask);
1411 /* Verify done bit is set */
1412 if (desc->dec_req.done == 0)
1415 /* make sure the response is read atomically */
1418 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1419 print_dma_dec_desc_debug_info(desc);
1422 *op = desc->dec_req.op_addr;
1424 if (check_bit((*op)->ldpc_dec.op_flags,
1425 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1430 /* FPGA reports iterations based on round-up minus 1 */
1431 (*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;
1432 /* CRC Check criteria */
1433 if (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))
1434 (*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
1435 /* et_pass = 0 when decoder fails */
1436 (*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;
1437 /* Check the descriptor error field, return 1 on error */
1438 desc_error = check_desc_error(desc->dec_req.error);
1439 (*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
1444 fpga_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1445 struct rte_bbdev_enc_op **ops, uint16_t num)
1447 struct fpga_queue *q = q_data->queue_private;
1448 uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
1450 uint16_t dequeued_cbs = 0;
1453 for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
1454 ret = dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
1459 dequeued_cbs += ret;
1461 rte_bbdev_log_debug("dequeuing enc ops [%d/%d] | head %d | tail %d",
1462 dequeued_cbs, num, q->head_free_desc, q->tail);
1466 q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
1467 q->sw_ring_wrap_mask;
1470 q_data->queue_stats.dequeued_count += i;
1476 fpga_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1477 struct rte_bbdev_dec_op **ops, uint16_t num)
1479 struct fpga_queue *q = q_data->queue_private;
1480 uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
1482 uint16_t dequeued_cbs = 0;
1485 for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
1486 ret = dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
1491 dequeued_cbs += ret;
1493 rte_bbdev_log_debug("dequeuing dec ops [%d/%d] | head %d | tail %d",
1494 dequeued_cbs, num, q->head_free_desc, q->tail);
1498 q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
1499 q->sw_ring_wrap_mask;
1502 q_data->queue_stats.dequeued_count += i;
1508 /* Initialization Function */
1510 fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
1512 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1514 dev->dev_ops = &fpga_ops;
1515 dev->enqueue_ldpc_enc_ops = fpga_enqueue_ldpc_enc;
1516 dev->enqueue_ldpc_dec_ops = fpga_enqueue_ldpc_dec;
1517 dev->dequeue_ldpc_enc_ops = fpga_dequeue_ldpc_enc;
1518 dev->dequeue_ldpc_dec_ops = fpga_dequeue_ldpc_dec;
1520 ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
1521 !strcmp(drv->driver.name,
1522 RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
1523 ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
1524 pci_dev->mem_resource[0].addr;
1526 rte_bbdev_log_debug(
1527 "Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
1528 dev->device->driver->name, dev->data->name,
1529 (void *)pci_dev->mem_resource[0].addr,
1530 pci_dev->mem_resource[0].phys_addr);
1534 fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
1535 struct rte_pci_device *pci_dev)
1537 struct rte_bbdev *bbdev = NULL;
1538 char dev_name[RTE_BBDEV_NAME_MAX_LEN];
1540 if (pci_dev == NULL) {
1541 rte_bbdev_log(ERR, "NULL PCI device");
1545 rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
1547 /* Allocate memory to be used privately by drivers */
1548 bbdev = rte_bbdev_allocate(pci_dev->device.name);
1552 /* allocate device private memory */
1553 bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
1554 sizeof(struct fpga_5gnr_fec_device),
1555 RTE_CACHE_LINE_SIZE,
1556 pci_dev->device.numa_node);
1558 if (bbdev->data->dev_private == NULL) {
1560 "Allocate of %zu bytes for device \"%s\" failed",
1561 sizeof(struct fpga_5gnr_fec_device), dev_name);
1562 rte_bbdev_release(bbdev);
1566 /* Fill HW specific part of device structure */
1567 bbdev->device = &pci_dev->device;
1568 bbdev->intr_handle = &pci_dev->intr_handle;
1569 bbdev->data->socket_id = pci_dev->device.numa_node;
1571 /* Invoke FEC FPGA device initialization function */
1572 fpga_5gnr_fec_init(bbdev, pci_drv);
1574 rte_bbdev_log_debug("bbdev id = %u [%s]",
1575 bbdev->data->dev_id, dev_name);
1577 struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
1578 uint32_t version_id = fpga_reg_read_32(d->mmio_base,
1579 FPGA_5GNR_FEC_VERSION_ID);
1580 rte_bbdev_log(INFO, "FEC FPGA RTL v%u.%u",
1581 ((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
1583 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1584 if (!strcmp(bbdev->device->driver->name,
1585 RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME)))
1586 print_static_reg_debug_info(d->mmio_base);
1592 fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
1594 struct rte_bbdev *bbdev;
1598 if (pci_dev == NULL)
1602 bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
1603 if (bbdev == NULL) {
1605 "Couldn't find HW dev \"%s\" to uninitialise it",
1606 pci_dev->device.name);
1609 dev_id = bbdev->data->dev_id;
1611 /* free device private memory before close */
1612 rte_free(bbdev->data->dev_private);
1615 ret = rte_bbdev_close(dev_id);
1618 "Device %i failed to close during uninit: %i",
1621 /* release bbdev from library */
1622 ret = rte_bbdev_release(bbdev);
1624 rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id,
1627 rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
1632 /* FPGA 5GNR FEC PCI PF address map */
1633 static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
1635 RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
1636 FPGA_5GNR_FEC_PF_DEVICE_ID)
1641 static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
1642 .probe = fpga_5gnr_fec_probe,
1643 .remove = fpga_5gnr_fec_remove,
1644 .id_table = pci_id_fpga_5gnr_fec_pf_map,
1645 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
1648 /* FPGA 5GNR FEC PCI VF address map */
1649 static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
1651 RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
1652 FPGA_5GNR_FEC_VF_DEVICE_ID)
1657 static struct rte_pci_driver fpga_5gnr_fec_pci_vf_driver = {
1658 .probe = fpga_5gnr_fec_probe,
1659 .remove = fpga_5gnr_fec_remove,
1660 .id_table = pci_id_fpga_5gnr_fec_vf_map,
1661 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
1665 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_PF_DRIVER_NAME, fpga_5gnr_fec_pci_pf_driver);
1666 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME,
1667 pci_id_fpga_5gnr_fec_pf_map);
1668 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_VF_DRIVER_NAME, fpga_5gnr_fec_pci_vf_driver);
1669 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME,
1670 pci_id_fpga_5gnr_fec_vf_map);
1672 RTE_INIT(fpga_5gnr_fec_init_log)
1674 fpga_5gnr_fec_logtype = rte_log_register("pmd.bb.fpga_5gnr_fec");
1675 if (fpga_5gnr_fec_logtype >= 0)
1676 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1677 rte_log_set_level(fpga_5gnr_fec_logtype, RTE_LOG_DEBUG);
1679 rte_log_set_level(fpga_5gnr_fec_logtype, RTE_LOG_NOTICE);