1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
7 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_byteorder.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_hexdump.h>
17 #include <rte_bus_pci.h>
18 #ifdef RTE_BBDEV_OFFLOAD_COST
19 #include <rte_cycles.h>
22 #include <rte_bbdev.h>
23 #include <rte_bbdev_pmd.h>
24 #include "rte_acc100_pmd.h"
26 #ifdef RTE_LIBRTE_BBDEV_DEBUG
27 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, DEBUG);
29 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, NOTICE);
32 /* Write to MMIO register address */
34 mmio_write(void *addr, uint32_t value)
36 *((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
39 /* Write a register of a ACC100 device */
41 acc100_reg_write(struct acc100_device *d, uint32_t offset, uint32_t payload)
43 void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
44 mmio_write(reg_addr, payload);
45 usleep(ACC100_LONG_WAIT);
48 /* Read a register of a ACC100 device */
49 static inline uint32_t
50 acc100_reg_read(struct acc100_device *d, uint32_t offset)
53 void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
54 uint32_t ret = *((volatile uint32_t *)(reg_addr));
55 return rte_le_to_cpu_32(ret);
58 /* Basic Implementation of Log2 for exact 2^N */
59 static inline uint32_t
60 log2_basic(uint32_t value)
62 return (value == 0) ? 0 : rte_bsf32(value);
65 /* Calculate memory alignment offset assuming alignment is 2^N */
66 static inline uint32_t
67 calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)
69 rte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);
70 return (uint32_t)(alignment -
71 (unaligned_phy_mem & (alignment-1)));
74 /* Calculate the offset of the enqueue register */
75 static inline uint32_t
76 queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)
79 return ((vf_id << 12) + (qgrp_id << 7) + (aq_id << 3) +
82 return ((qgrp_id << 7) + (aq_id << 3) +
86 enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, NUM_ACC};
88 /* Return the queue topology for a Queue Group Index */
90 qtopFromAcc(struct rte_acc100_queue_topology **qtop, int acc_enum,
91 struct rte_acc100_conf *acc100_conf)
93 struct rte_acc100_queue_topology *p_qtop;
97 p_qtop = &(acc100_conf->q_ul_4g);
100 p_qtop = &(acc100_conf->q_ul_5g);
103 p_qtop = &(acc100_conf->q_dl_4g);
106 p_qtop = &(acc100_conf->q_dl_5g);
110 rte_bbdev_log(ERR, "Unexpected error evaluating qtopFromAcc");
117 initQTop(struct rte_acc100_conf *acc100_conf)
119 acc100_conf->q_ul_4g.num_aqs_per_groups = 0;
120 acc100_conf->q_ul_4g.num_qgroups = 0;
121 acc100_conf->q_ul_4g.first_qgroup_index = -1;
122 acc100_conf->q_ul_5g.num_aqs_per_groups = 0;
123 acc100_conf->q_ul_5g.num_qgroups = 0;
124 acc100_conf->q_ul_5g.first_qgroup_index = -1;
125 acc100_conf->q_dl_4g.num_aqs_per_groups = 0;
126 acc100_conf->q_dl_4g.num_qgroups = 0;
127 acc100_conf->q_dl_4g.first_qgroup_index = -1;
128 acc100_conf->q_dl_5g.num_aqs_per_groups = 0;
129 acc100_conf->q_dl_5g.num_qgroups = 0;
130 acc100_conf->q_dl_5g.first_qgroup_index = -1;
134 updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,
135 struct acc100_device *d) {
137 struct rte_acc100_queue_topology *q_top = NULL;
138 qtopFromAcc(&q_top, acc, acc100_conf);
139 if (unlikely(q_top == NULL))
142 q_top->num_qgroups++;
143 if (q_top->first_qgroup_index == -1) {
144 q_top->first_qgroup_index = qg;
145 /* Can be optimized to assume all are enabled by default */
146 reg = acc100_reg_read(d, queue_offset(d->pf_device,
147 0, qg, ACC100_NUM_AQS - 1));
148 if (reg & ACC100_QUEUE_ENABLE) {
149 q_top->num_aqs_per_groups = ACC100_NUM_AQS;
152 q_top->num_aqs_per_groups = 0;
153 for (aq = 0; aq < ACC100_NUM_AQS; aq++) {
154 reg = acc100_reg_read(d, queue_offset(d->pf_device,
156 if (reg & ACC100_QUEUE_ENABLE)
157 q_top->num_aqs_per_groups++;
162 /* Fetch configuration enabled for the PF/VF using MMIO Read (slow) */
164 fetch_acc100_config(struct rte_bbdev *dev)
166 struct acc100_device *d = dev->data->dev_private;
167 struct rte_acc100_conf *acc100_conf = &d->acc100_conf;
168 const struct acc100_registry_addr *reg_addr;
170 uint32_t reg, reg_aq, reg_len0, reg_len1;
173 /* No need to retrieve the configuration is already done */
177 /* Choose correct registry addresses for the device type */
179 reg_addr = &pf_reg_addr;
181 reg_addr = &vf_reg_addr;
183 d->ddr_size = (1 + acc100_reg_read(d, reg_addr->ddr_range)) << 10;
185 /* Single VF Bundle by VF */
186 acc100_conf->num_vf_bundles = 1;
187 initQTop(acc100_conf);
189 struct rte_acc100_queue_topology *q_top = NULL;
190 int qman_func_id[ACC100_NUM_ACCS] = {ACC100_ACCMAP_0, ACC100_ACCMAP_1,
191 ACC100_ACCMAP_2, ACC100_ACCMAP_3, ACC100_ACCMAP_4};
192 reg = acc100_reg_read(d, reg_addr->qman_group_func);
193 for (qg = 0; qg < ACC100_NUM_QGRPS_PER_WORD; qg++) {
194 reg_aq = acc100_reg_read(d,
195 queue_offset(d->pf_device, 0, qg, 0));
196 if (reg_aq & ACC100_QUEUE_ENABLE) {
197 uint32_t idx = (reg >> (qg * 4)) & 0x7;
198 if (idx < ACC100_NUM_ACCS) {
199 acc = qman_func_id[idx];
200 updateQtop(acc, qg, acc100_conf, d);
205 /* Check the depth of the AQs*/
206 reg_len0 = acc100_reg_read(d, reg_addr->depth_log0_offset);
207 reg_len1 = acc100_reg_read(d, reg_addr->depth_log1_offset);
208 for (acc = 0; acc < NUM_ACC; acc++) {
209 qtopFromAcc(&q_top, acc, acc100_conf);
210 if (q_top->first_qgroup_index < ACC100_NUM_QGRPS_PER_WORD)
211 q_top->aq_depth_log2 = (reg_len0 >>
212 (q_top->first_qgroup_index * 4))
215 q_top->aq_depth_log2 = (reg_len1 >>
216 ((q_top->first_qgroup_index -
217 ACC100_NUM_QGRPS_PER_WORD) * 4))
223 reg_mode = acc100_reg_read(d, HWPfHiPfMode);
224 acc100_conf->pf_mode_en = (reg_mode == ACC100_PF_VAL) ? 1 : 0;
228 "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u\n",
229 (d->pf_device) ? "PF" : "VF",
230 (acc100_conf->input_pos_llr_1_bit) ? "POS" : "NEG",
231 (acc100_conf->output_pos_llr_1_bit) ? "POS" : "NEG",
232 acc100_conf->q_ul_4g.num_qgroups,
233 acc100_conf->q_dl_4g.num_qgroups,
234 acc100_conf->q_ul_5g.num_qgroups,
235 acc100_conf->q_dl_5g.num_qgroups,
236 acc100_conf->q_ul_4g.num_aqs_per_groups,
237 acc100_conf->q_dl_4g.num_aqs_per_groups,
238 acc100_conf->q_ul_5g.num_aqs_per_groups,
239 acc100_conf->q_dl_5g.num_aqs_per_groups,
240 acc100_conf->q_ul_4g.aq_depth_log2,
241 acc100_conf->q_dl_4g.aq_depth_log2,
242 acc100_conf->q_ul_5g.aq_depth_log2,
243 acc100_conf->q_dl_5g.aq_depth_log2);
247 free_base_addresses(void **base_addrs, int size)
250 for (i = 0; i < size; i++)
251 rte_free(base_addrs[i]);
254 static inline uint32_t
257 return sizeof(union acc100_dma_desc);
260 /* Allocate the 2 * 64MB block for the sw rings */
262 alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc100_device *d,
265 uint32_t sw_ring_size = ACC100_SIZE_64MBYTE;
266 d->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,
267 2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);
268 if (d->sw_rings_base == NULL) {
269 rte_bbdev_log(ERR, "Failed to allocate memory for %s:%u",
270 dev->device->driver->name,
274 uint32_t next_64mb_align_offset = calc_mem_alignment_offset(
275 d->sw_rings_base, ACC100_SIZE_64MBYTE);
276 d->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);
277 d->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +
278 next_64mb_align_offset;
279 d->sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
280 d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
285 /* Attempt to allocate minimised memory space for sw rings */
287 alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc100_device *d,
288 uint16_t num_queues, int socket)
290 rte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;
291 uint32_t next_64mb_align_offset;
292 rte_iova_t sw_ring_iova_end_addr;
293 void *base_addrs[ACC100_SW_RING_MEM_ALLOC_ATTEMPTS];
296 uint32_t q_sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
297 uint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;
299 /* Find an aligned block of memory to store sw rings */
300 while (i < ACC100_SW_RING_MEM_ALLOC_ATTEMPTS) {
302 * sw_ring allocated memory is guaranteed to be aligned to
303 * q_sw_ring_size at the condition that the requested size is
304 * less than the page size
306 sw_rings_base = rte_zmalloc_socket(
307 dev->device->driver->name,
308 dev_sw_ring_size, q_sw_ring_size, socket);
310 if (sw_rings_base == NULL) {
312 "Failed to allocate memory for %s:%u",
313 dev->device->driver->name,
318 sw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);
319 next_64mb_align_offset = calc_mem_alignment_offset(
320 sw_rings_base, ACC100_SIZE_64MBYTE);
321 next_64mb_align_addr_iova = sw_rings_base_iova +
322 next_64mb_align_offset;
323 sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;
325 /* Check if the end of the sw ring memory block is before the
326 * start of next 64MB aligned mem address
328 if (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {
329 d->sw_rings_iova = sw_rings_base_iova;
330 d->sw_rings = sw_rings_base;
331 d->sw_rings_base = sw_rings_base;
332 d->sw_ring_size = q_sw_ring_size;
333 d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
336 /* Store the address of the unaligned mem block */
337 base_addrs[i] = sw_rings_base;
341 /* Free all unaligned blocks of mem allocated in the loop */
342 free_base_addresses(base_addrs, i);
346 /* Allocate 64MB memory used for all software rings */
348 acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
350 uint32_t phys_low, phys_high, payload;
351 struct acc100_device *d = dev->data->dev_private;
352 const struct acc100_registry_addr *reg_addr;
354 if (d->pf_device && !d->acc100_conf.pf_mode_en) {
355 rte_bbdev_log(NOTICE,
356 "%s has PF mode disabled. This PF can't be used.",
361 alloc_sw_rings_min_mem(dev, d, num_queues, socket_id);
363 /* If minimal memory space approach failed, then allocate
364 * the 2 * 64MB block for the sw rings
366 if (d->sw_rings == NULL)
367 alloc_2x64mb_sw_rings_mem(dev, d, socket_id);
369 if (d->sw_rings == NULL) {
370 rte_bbdev_log(NOTICE,
371 "Failure allocating sw_rings memory");
375 /* Configure ACC100 with the base address for DMA descriptor rings
376 * Same descriptor rings used for UL and DL DMA Engines
377 * Note : Assuming only VF0 bundle is used for PF mode
379 phys_high = (uint32_t)(d->sw_rings_iova >> 32);
380 phys_low = (uint32_t)(d->sw_rings_iova & ~(ACC100_SIZE_64MBYTE-1));
382 /* Choose correct registry addresses for the device type */
384 reg_addr = &pf_reg_addr;
386 reg_addr = &vf_reg_addr;
388 /* Read the populated cfg from ACC100 registers */
389 fetch_acc100_config(dev);
391 /* Release AXI from PF */
393 acc100_reg_write(d, HWPfDmaAxiControl, 1);
395 acc100_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);
396 acc100_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);
397 acc100_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);
398 acc100_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);
399 acc100_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);
400 acc100_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);
401 acc100_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);
402 acc100_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);
405 * Configure Ring Size to the max queue ring size
406 * (used for wrapping purpose)
408 payload = log2_basic(d->sw_ring_size / 64);
409 acc100_reg_write(d, reg_addr->ring_size, payload);
411 /* Configure tail pointer for use when SDONE enabled */
412 d->tail_ptrs = rte_zmalloc_socket(
413 dev->device->driver->name,
414 ACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t),
415 RTE_CACHE_LINE_SIZE, socket_id);
416 if (d->tail_ptrs == NULL) {
417 rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u",
418 dev->device->driver->name,
420 rte_free(d->sw_rings);
423 d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs);
425 phys_high = (uint32_t)(d->tail_ptr_iova >> 32);
426 phys_low = (uint32_t)(d->tail_ptr_iova);
427 acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);
428 acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);
429 acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);
430 acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);
431 acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);
432 acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);
433 acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);
434 acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);
436 d->harq_layout = rte_zmalloc_socket("HARQ Layout",
437 ACC100_HARQ_LAYOUT * sizeof(*d->harq_layout),
438 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
439 if (d->harq_layout == NULL) {
440 rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u",
441 dev->device->driver->name,
443 rte_free(d->sw_rings);
447 /* Mark as configured properly */
448 d->configured = true;
451 "ACC100 (%s) configured sw_rings = %p, sw_rings_iova = %#"
452 PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova);
457 /* Free memory used for software rings */
459 acc100_dev_close(struct rte_bbdev *dev)
461 struct acc100_device *d = dev->data->dev_private;
462 if (d->sw_rings_base != NULL) {
463 rte_free(d->tail_ptrs);
464 rte_free(d->sw_rings_base);
465 d->sw_rings_base = NULL;
467 /* Ensure all in flight HW transactions are completed */
468 usleep(ACC100_LONG_WAIT);
473 * Report a ACC100 queue index which is free
474 * Return 0 to 16k for a valid queue_idx or -1 when no queue is available
475 * Note : Only supporting VF0 Bundle for PF mode
478 acc100_find_free_queue_idx(struct rte_bbdev *dev,
479 const struct rte_bbdev_queue_conf *conf)
481 struct acc100_device *d = dev->data->dev_private;
482 int op_2_acc[5] = {0, UL_4G, DL_4G, UL_5G, DL_5G};
483 int acc = op_2_acc[conf->op_type];
484 struct rte_acc100_queue_topology *qtop = NULL;
486 qtopFromAcc(&qtop, acc, &(d->acc100_conf));
489 /* Identify matching QGroup Index which are sorted in priority order */
490 uint16_t group_idx = qtop->first_qgroup_index;
491 group_idx += conf->priority;
492 if (group_idx >= ACC100_NUM_QGRPS ||
493 conf->priority >= qtop->num_qgroups) {
494 rte_bbdev_log(INFO, "Invalid Priority on %s, priority %u",
495 dev->data->name, conf->priority);
498 /* Find a free AQ_idx */
500 for (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {
501 if (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {
502 /* Mark the Queue as assigned */
503 d->q_assigned_bit_map[group_idx] |= (1 << aq_idx);
504 /* Report the AQ Index */
505 return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx;
508 rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u",
509 dev->data->name, conf->priority);
513 /* Setup ACC100 queue */
515 acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
516 const struct rte_bbdev_queue_conf *conf)
518 struct acc100_device *d = dev->data->dev_private;
519 struct acc100_queue *q;
522 /* Allocate the queue data structure. */
523 q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
524 RTE_CACHE_LINE_SIZE, conf->socket);
526 rte_bbdev_log(ERR, "Failed to allocate queue memory");
530 rte_bbdev_log(ERR, "Undefined device");
535 q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
536 q->ring_addr_iova = d->sw_rings_iova + (d->sw_ring_size * queue_id);
538 /* Prepare the Ring with default descriptor format */
539 union acc100_dma_desc *desc = NULL;
540 unsigned int desc_idx, b_idx;
541 int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
542 ACC100_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?
543 ACC100_FCW_TD_BLEN : ACC100_FCW_LD_BLEN));
545 for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
546 desc = q->ring_addr + desc_idx;
547 desc->req.word0 = ACC100_DMA_DESC_TYPE;
548 desc->req.word1 = 0; /**< Timestamp */
551 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
552 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
553 desc->req.data_ptrs[0].blen = fcw_len;
554 desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
555 desc->req.data_ptrs[0].last = 0;
556 desc->req.data_ptrs[0].dma_ext = 0;
557 for (b_idx = 1; b_idx < ACC100_DMA_MAX_NUM_POINTERS - 1;
559 desc->req.data_ptrs[b_idx].blkid = ACC100_DMA_BLKID_IN;
560 desc->req.data_ptrs[b_idx].last = 1;
561 desc->req.data_ptrs[b_idx].dma_ext = 0;
563 desc->req.data_ptrs[b_idx].blkid =
564 ACC100_DMA_BLKID_OUT_ENC;
565 desc->req.data_ptrs[b_idx].last = 1;
566 desc->req.data_ptrs[b_idx].dma_ext = 0;
568 /* Preset some fields of LDPC FCW */
569 desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
570 desc->req.fcw_ld.gain_i = 1;
571 desc->req.fcw_ld.gain_h = 1;
574 q->lb_in = rte_zmalloc_socket(dev->device->driver->name,
576 RTE_CACHE_LINE_SIZE, conf->socket);
577 if (q->lb_in == NULL) {
578 rte_bbdev_log(ERR, "Failed to allocate lb_in memory");
582 q->lb_in_addr_iova = rte_malloc_virt2iova(q->lb_in);
583 q->lb_out = rte_zmalloc_socket(dev->device->driver->name,
585 RTE_CACHE_LINE_SIZE, conf->socket);
586 if (q->lb_out == NULL) {
587 rte_bbdev_log(ERR, "Failed to allocate lb_out memory");
592 q->lb_out_addr_iova = rte_malloc_virt2iova(q->lb_out);
595 * Software queue ring wraps synchronously with the HW when it reaches
596 * the boundary of the maximum allocated queue size, no matter what the
597 * sw queue size is. This wrapping is guarded by setting the wrap_mask
598 * to represent the maximum queue size as allocated at the time when
599 * the device has been setup (in configure()).
601 * The queue depth is set to the queue size value (conf->queue_size).
602 * This limits the occupancy of the queue at any point of time, so that
603 * the queue does not get swamped with enqueue requests.
605 q->sw_ring_depth = conf->queue_size;
606 q->sw_ring_wrap_mask = d->sw_ring_max_depth - 1;
608 q->op_type = conf->op_type;
610 q_idx = acc100_find_free_queue_idx(dev, conf);
618 q->qgrp_id = (q_idx >> ACC100_GRP_ID_SHIFT) & 0xF;
619 q->vf_id = (q_idx >> ACC100_VF_ID_SHIFT) & 0x3F;
620 q->aq_id = q_idx & 0xF;
621 q->aq_depth = (conf->op_type == RTE_BBDEV_OP_TURBO_DEC) ?
622 (1 << d->acc100_conf.q_ul_4g.aq_depth_log2) :
623 (1 << d->acc100_conf.q_dl_4g.aq_depth_log2);
625 q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,
626 queue_offset(d->pf_device,
627 q->vf_id, q->qgrp_id, q->aq_id));
630 "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p",
631 dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id,
632 q->aq_id, q->aq_depth, q->mmio_reg_enqueue);
634 dev->data->queues[queue_id].queue_private = q;
638 /* Release ACC100 queue */
640 acc100_queue_release(struct rte_bbdev *dev, uint16_t q_id)
642 struct acc100_device *d = dev->data->dev_private;
643 struct acc100_queue *q = dev->data->queues[q_id].queue_private;
646 /* Mark the Queue as un-assigned */
647 d->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -
652 dev->data->queues[q_id].queue_private = NULL;
658 /* Get ACC100 device info */
660 acc100_dev_info_get(struct rte_bbdev *dev,
661 struct rte_bbdev_driver_info *dev_info)
663 struct acc100_device *d = dev->data->dev_private;
665 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
667 .type = RTE_BBDEV_OP_LDPC_ENC,
670 RTE_BBDEV_LDPC_RATE_MATCH |
671 RTE_BBDEV_LDPC_CRC_24B_ATTACH |
672 RTE_BBDEV_LDPC_INTERLEAVER_BYPASS,
674 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
676 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
680 .type = RTE_BBDEV_OP_LDPC_DEC,
683 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
684 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
685 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
686 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
687 #ifdef ACC100_EXT_MEM
688 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
689 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
691 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
692 RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS |
693 RTE_BBDEV_LDPC_DECODE_BYPASS |
694 RTE_BBDEV_LDPC_DEC_SCATTER_GATHER |
695 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION |
696 RTE_BBDEV_LDPC_LLR_COMPRESSION,
700 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
701 .num_buffers_hard_out =
702 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
703 .num_buffers_soft_out = 0,
706 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
709 static struct rte_bbdev_queue_conf default_queue_conf;
710 default_queue_conf.socket = dev->data->socket_id;
711 default_queue_conf.queue_size = ACC100_MAX_QUEUE_DEPTH;
713 dev_info->driver_name = dev->device->driver->name;
715 /* Read and save the populated config from ACC100 registers */
716 fetch_acc100_config(dev);
718 /* This isn't ideal because it reports the maximum number of queues but
719 * does not provide info on how many can be uplink/downlink or different
722 dev_info->max_num_queues =
723 d->acc100_conf.q_dl_5g.num_aqs_per_groups *
724 d->acc100_conf.q_dl_5g.num_qgroups +
725 d->acc100_conf.q_ul_5g.num_aqs_per_groups *
726 d->acc100_conf.q_ul_5g.num_qgroups +
727 d->acc100_conf.q_dl_4g.num_aqs_per_groups *
728 d->acc100_conf.q_dl_4g.num_qgroups +
729 d->acc100_conf.q_ul_4g.num_aqs_per_groups *
730 d->acc100_conf.q_ul_4g.num_qgroups;
731 dev_info->queue_size_lim = ACC100_MAX_QUEUE_DEPTH;
732 dev_info->hardware_accelerated = true;
733 dev_info->max_dl_queue_priority =
734 d->acc100_conf.q_dl_4g.num_qgroups - 1;
735 dev_info->max_ul_queue_priority =
736 d->acc100_conf.q_ul_4g.num_qgroups - 1;
737 dev_info->default_queue_conf = default_queue_conf;
738 dev_info->cpu_flag_reqs = NULL;
739 dev_info->min_alignment = 64;
740 dev_info->capabilities = bbdev_capabilities;
741 #ifdef ACC100_EXT_MEM
742 dev_info->harq_buffer_size = d->ddr_size;
744 dev_info->harq_buffer_size = 0;
749 static const struct rte_bbdev_ops acc100_bbdev_ops = {
750 .setup_queues = acc100_setup_queues,
751 .close = acc100_dev_close,
752 .info_get = acc100_dev_info_get,
753 .queue_setup = acc100_queue_setup,
754 .queue_release = acc100_queue_release,
757 /* ACC100 PCI PF address map */
758 static struct rte_pci_id pci_id_acc100_pf_map[] = {
760 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_PF_DEVICE_ID)
765 /* ACC100 PCI VF address map */
766 static struct rte_pci_id pci_id_acc100_vf_map[] = {
768 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_VF_DEVICE_ID)
773 /* Read flag value 0/1 from bitmap */
775 check_bit(uint32_t bitmap, uint32_t bitmask)
777 return bitmap & bitmask;
781 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
783 if (unlikely(len > rte_pktmbuf_tailroom(m)))
786 char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
787 m->data_len = (uint16_t)(m->data_len + len);
788 m_head->pkt_len = (m_head->pkt_len + len);
792 /* Compute value of k0.
793 * Based on 3GPP 38.212 Table 5.4.2.1-2
794 * Starting position of different redundancy versions, k0
796 static inline uint16_t
797 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
801 uint16_t n = (bg == 1 ? ACC100_N_ZC_1 : ACC100_N_ZC_2) * z_c;
804 return (bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * z_c;
805 else if (rv_index == 2)
806 return (bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * z_c;
808 return (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c;
810 /* LBRM case - includes a division by N */
812 return (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb)
814 else if (rv_index == 2)
815 return (((bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * n_cb)
818 return (((bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * n_cb)
822 /* Fill in a frame control word for LDPC encoding. */
824 acc100_fcw_le_fill(const struct rte_bbdev_enc_op *op,
825 struct acc100_fcw_le *fcw, int num_cb)
827 fcw->qm = op->ldpc_enc.q_m;
828 fcw->nfiller = op->ldpc_enc.n_filler;
829 fcw->BG = (op->ldpc_enc.basegraph - 1);
830 fcw->Zc = op->ldpc_enc.z_c;
831 fcw->ncb = op->ldpc_enc.n_cb;
832 fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,
833 op->ldpc_enc.rv_index);
834 fcw->rm_e = op->ldpc_enc.cb_params.e;
835 fcw->crc_select = check_bit(op->ldpc_enc.op_flags,
836 RTE_BBDEV_LDPC_CRC_24B_ATTACH);
837 fcw->bypass_intlv = check_bit(op->ldpc_enc.op_flags,
838 RTE_BBDEV_LDPC_INTERLEAVER_BYPASS);
839 fcw->mcb_count = num_cb;
842 /* Fill in a frame control word for LDPC decoding. */
844 acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,
845 union acc100_harq_layout_data *harq_layout)
847 uint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;
850 bool harq_prun = false;
852 fcw->qm = op->ldpc_dec.q_m;
853 fcw->nfiller = op->ldpc_dec.n_filler;
854 fcw->BG = (op->ldpc_dec.basegraph - 1);
855 fcw->Zc = op->ldpc_dec.z_c;
856 fcw->ncb = op->ldpc_dec.n_cb;
857 fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_dec.basegraph,
858 op->ldpc_dec.rv_index);
859 if (op->ldpc_dec.code_block_mode == 1)
860 fcw->rm_e = op->ldpc_dec.cb_params.e;
862 fcw->rm_e = (op->ldpc_dec.tb_params.r <
863 op->ldpc_dec.tb_params.cab) ?
864 op->ldpc_dec.tb_params.ea :
865 op->ldpc_dec.tb_params.eb;
867 fcw->hcin_en = check_bit(op->ldpc_dec.op_flags,
868 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
869 fcw->hcout_en = check_bit(op->ldpc_dec.op_flags,
870 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
871 fcw->crc_select = check_bit(op->ldpc_dec.op_flags,
872 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
873 fcw->bypass_dec = check_bit(op->ldpc_dec.op_flags,
874 RTE_BBDEV_LDPC_DECODE_BYPASS);
875 fcw->bypass_intlv = check_bit(op->ldpc_dec.op_flags,
876 RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS);
877 if (op->ldpc_dec.q_m == 1) {
878 fcw->bypass_intlv = 1;
881 fcw->hcin_decomp_mode = check_bit(op->ldpc_dec.op_flags,
882 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
883 fcw->hcout_comp_mode = check_bit(op->ldpc_dec.op_flags,
884 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
885 fcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,
886 RTE_BBDEV_LDPC_LLR_COMPRESSION);
887 harq_index = op->ldpc_dec.harq_combined_output.offset /
889 #ifdef ACC100_EXT_MEM
890 /* Limit cases when HARQ pruning is valid */
891 harq_prun = ((op->ldpc_dec.harq_combined_output.offset %
892 ACC100_HARQ_OFFSET) == 0) &&
893 (op->ldpc_dec.harq_combined_output.offset <= UINT16_MAX
894 * ACC100_HARQ_OFFSET);
896 if (fcw->hcin_en > 0) {
897 harq_in_length = op->ldpc_dec.harq_combined_input.length;
898 if (fcw->hcin_decomp_mode > 0)
899 harq_in_length = harq_in_length * 8 / 6;
900 harq_in_length = RTE_ALIGN(harq_in_length, 64);
901 if ((harq_layout[harq_index].offset > 0) & harq_prun) {
902 rte_bbdev_log_debug("HARQ IN offset unexpected for now\n");
903 fcw->hcin_size0 = harq_layout[harq_index].size0;
904 fcw->hcin_offset = harq_layout[harq_index].offset;
905 fcw->hcin_size1 = harq_in_length -
906 harq_layout[harq_index].offset;
908 fcw->hcin_size0 = harq_in_length;
909 fcw->hcin_offset = 0;
914 fcw->hcin_offset = 0;
918 fcw->itmax = op->ldpc_dec.iter_max;
919 fcw->itstop = check_bit(op->ldpc_dec.op_flags,
920 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
921 fcw->synd_precoder = fcw->itstop;
923 * These are all implicitly set
924 * fcw->synd_post = 0;
926 * fcw->so_bypass_rm = 0;
927 * fcw->so_bypass_intlv = 0;
928 * fcw->dec_convllr = 0;
929 * fcw->hcout_convllr = 0;
930 * fcw->hcout_size1 = 0;
932 * fcw->hcout_offset = 0;
933 * fcw->negstop_th = 0;
934 * fcw->negstop_it = 0;
935 * fcw->negstop_en = 0;
939 if (fcw->hcout_en > 0) {
940 parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)
941 * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
942 k0_p = (fcw->k0 > parity_offset) ?
943 fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;
944 ncb_p = fcw->ncb - op->ldpc_dec.n_filler;
945 l = k0_p + fcw->rm_e;
946 harq_out_length = (uint16_t) fcw->hcin_size0;
947 harq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);
948 harq_out_length = (harq_out_length + 0x3F) & 0xFFC0;
949 if ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) &&
951 fcw->hcout_size0 = (uint16_t) fcw->hcin_size0;
952 fcw->hcout_offset = k0_p & 0xFFC0;
953 fcw->hcout_size1 = harq_out_length - fcw->hcout_offset;
955 fcw->hcout_size0 = harq_out_length;
956 fcw->hcout_size1 = 0;
957 fcw->hcout_offset = 0;
959 harq_layout[harq_index].offset = fcw->hcout_offset;
960 harq_layout[harq_index].size0 = fcw->hcout_size0;
962 fcw->hcout_size0 = 0;
963 fcw->hcout_size1 = 0;
964 fcw->hcout_offset = 0;
969 * Fills descriptor with data pointers of one block type.
972 * Pointer to DMA descriptor.
974 * Pointer to pointer to input data which will be encoded. It can be changed
975 * and points to next segment in scatter-gather case.
977 * Input offset in rte_mbuf structure. It is used for calculating the point
978 * where data is starting.
980 * Length of currently processed Code Block
981 * @param seg_total_left
982 * It indicates how many bytes still left in segment (mbuf) for further
985 * Store information about device capabilities
986 * @param next_triplet
987 * Index for ACC100 DMA Descriptor triplet
990 * Returns index of next triplet on success, other value if lengths of
991 * pkt and processed cb do not match.
995 acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,
996 struct rte_mbuf **input, uint32_t *offset, uint32_t cb_len,
997 uint32_t *seg_total_left, int next_triplet)
1000 struct rte_mbuf *m = *input;
1002 part_len = (*seg_total_left < cb_len) ? *seg_total_left : cb_len;
1004 *seg_total_left -= part_len;
1006 desc->data_ptrs[next_triplet].address =
1007 rte_pktmbuf_iova_offset(m, *offset);
1008 desc->data_ptrs[next_triplet].blen = part_len;
1009 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
1010 desc->data_ptrs[next_triplet].last = 0;
1011 desc->data_ptrs[next_triplet].dma_ext = 0;
1012 *offset += part_len;
1015 while (cb_len > 0) {
1016 if (next_triplet < ACC100_DMA_MAX_NUM_POINTERS &&
1020 *seg_total_left = rte_pktmbuf_data_len(m);
1021 part_len = (*seg_total_left < cb_len) ?
1024 desc->data_ptrs[next_triplet].address =
1025 rte_pktmbuf_iova_offset(m, 0);
1026 desc->data_ptrs[next_triplet].blen = part_len;
1027 desc->data_ptrs[next_triplet].blkid =
1028 ACC100_DMA_BLKID_IN;
1029 desc->data_ptrs[next_triplet].last = 0;
1030 desc->data_ptrs[next_triplet].dma_ext = 0;
1032 *seg_total_left -= part_len;
1033 /* Initializing offset for next segment (mbuf) */
1038 "Some data still left for processing: "
1039 "data_left: %u, next_triplet: %u, next_mbuf: %p",
1040 cb_len, next_triplet, m->next);
1044 /* Storing new mbuf as it could be changed in scatter-gather case*/
1047 return next_triplet;
1050 /* Fills descriptor with data pointers of one block type.
1051 * Returns index of next triplet on success, other value if lengths of
1052 * output data and processed mbuf do not match.
1055 acc100_dma_fill_blk_type_out(struct acc100_dma_req_desc *desc,
1056 struct rte_mbuf *output, uint32_t out_offset,
1057 uint32_t output_len, int next_triplet, int blk_id)
1059 desc->data_ptrs[next_triplet].address =
1060 rte_pktmbuf_iova_offset(output, out_offset);
1061 desc->data_ptrs[next_triplet].blen = output_len;
1062 desc->data_ptrs[next_triplet].blkid = blk_id;
1063 desc->data_ptrs[next_triplet].last = 0;
1064 desc->data_ptrs[next_triplet].dma_ext = 0;
1067 return next_triplet;
1071 acc100_header_init(struct acc100_dma_req_desc *desc)
1073 desc->word0 = ACC100_DMA_DESC_TYPE;
1074 desc->word1 = 0; /**< Timestamp could be disabled */
1080 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1081 /* Check if any input data is unexpectedly left for processing */
1083 check_mbuf_total_left(uint32_t mbuf_total_left)
1085 if (mbuf_total_left == 0)
1088 "Some date still left for processing: mbuf_total_left = %u",
1095 acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,
1096 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1097 struct rte_mbuf *output, uint32_t *in_offset,
1098 uint32_t *out_offset, uint32_t *out_length,
1099 uint32_t *mbuf_total_left, uint32_t *seg_total_left)
1101 int next_triplet = 1; /* FCW already done */
1102 uint16_t K, in_length_in_bits, in_length_in_bytes;
1103 struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1105 acc100_header_init(desc);
1107 K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1108 in_length_in_bits = K - enc->n_filler;
1109 if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) ||
1110 (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH))
1111 in_length_in_bits -= 24;
1112 in_length_in_bytes = in_length_in_bits >> 3;
1114 if (unlikely((*mbuf_total_left == 0) ||
1115 (*mbuf_total_left < in_length_in_bytes))) {
1117 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1118 *mbuf_total_left, in_length_in_bytes);
1122 next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1124 seg_total_left, next_triplet);
1125 if (unlikely(next_triplet < 0)) {
1127 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1131 desc->data_ptrs[next_triplet - 1].last = 1;
1132 desc->m2dlen = next_triplet;
1133 *mbuf_total_left -= in_length_in_bytes;
1135 /* Set output length */
1136 /* Integer round up division by 8 */
1137 *out_length = (enc->cb_params.e + 7) >> 3;
1139 next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1140 *out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1141 op->ldpc_enc.output.length += *out_length;
1142 *out_offset += *out_length;
1143 desc->data_ptrs[next_triplet - 1].last = 1;
1144 desc->data_ptrs[next_triplet - 1].dma_ext = 0;
1145 desc->d2mlen = next_triplet - desc->m2dlen;
1153 acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
1154 struct acc100_dma_req_desc *desc,
1155 struct rte_mbuf **input, struct rte_mbuf *h_output,
1156 uint32_t *in_offset, uint32_t *h_out_offset,
1157 uint32_t *h_out_length, uint32_t *mbuf_total_left,
1158 uint32_t *seg_total_left,
1159 struct acc100_fcw_ld *fcw)
1161 struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1162 int next_triplet = 1; /* FCW already done */
1163 uint32_t input_length;
1164 uint16_t output_length, crc24_overlap = 0;
1165 uint16_t sys_cols, K, h_p_size, h_np_size;
1166 bool h_comp = check_bit(dec->op_flags,
1167 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1169 acc100_header_init(desc);
1171 if (check_bit(op->ldpc_dec.op_flags,
1172 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1175 /* Compute some LDPC BG lengths */
1176 input_length = dec->cb_params.e;
1177 if (check_bit(op->ldpc_dec.op_flags,
1178 RTE_BBDEV_LDPC_LLR_COMPRESSION))
1179 input_length = (input_length * 3 + 3) / 4;
1180 sys_cols = (dec->basegraph == 1) ? 22 : 10;
1181 K = sys_cols * dec->z_c;
1182 output_length = K - dec->n_filler - crc24_overlap;
1184 if (unlikely((*mbuf_total_left == 0) ||
1185 (*mbuf_total_left < input_length))) {
1187 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1188 *mbuf_total_left, input_length);
1192 next_triplet = acc100_dma_fill_blk_type_in(desc, input,
1193 in_offset, input_length,
1194 seg_total_left, next_triplet);
1196 if (unlikely(next_triplet < 0)) {
1198 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1203 if (check_bit(op->ldpc_dec.op_flags,
1204 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1205 h_p_size = fcw->hcin_size0 + fcw->hcin_size1;
1207 h_p_size = (h_p_size * 3 + 3) / 4;
1208 desc->data_ptrs[next_triplet].address =
1209 dec->harq_combined_input.offset;
1210 desc->data_ptrs[next_triplet].blen = h_p_size;
1211 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN_HARQ;
1212 desc->data_ptrs[next_triplet].dma_ext = 1;
1213 #ifndef ACC100_EXT_MEM
1214 acc100_dma_fill_blk_type_out(
1216 op->ldpc_dec.harq_combined_input.data,
1217 op->ldpc_dec.harq_combined_input.offset,
1220 ACC100_DMA_BLKID_IN_HARQ);
1225 desc->data_ptrs[next_triplet - 1].last = 1;
1226 desc->m2dlen = next_triplet;
1227 *mbuf_total_left -= input_length;
1229 next_triplet = acc100_dma_fill_blk_type_out(desc, h_output,
1230 *h_out_offset, output_length >> 3, next_triplet,
1231 ACC100_DMA_BLKID_OUT_HARD);
1233 if (check_bit(op->ldpc_dec.op_flags,
1234 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1235 /* Pruned size of the HARQ */
1236 h_p_size = fcw->hcout_size0 + fcw->hcout_size1;
1237 /* Non-Pruned size of the HARQ */
1238 h_np_size = fcw->hcout_offset > 0 ?
1239 fcw->hcout_offset + fcw->hcout_size1 :
1242 h_np_size = (h_np_size * 3 + 3) / 4;
1243 h_p_size = (h_p_size * 3 + 3) / 4;
1245 dec->harq_combined_output.length = h_np_size;
1246 desc->data_ptrs[next_triplet].address =
1247 dec->harq_combined_output.offset;
1248 desc->data_ptrs[next_triplet].blen = h_p_size;
1249 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARQ;
1250 desc->data_ptrs[next_triplet].dma_ext = 1;
1251 #ifndef ACC100_EXT_MEM
1252 acc100_dma_fill_blk_type_out(
1254 dec->harq_combined_output.data,
1255 dec->harq_combined_output.offset,
1258 ACC100_DMA_BLKID_OUT_HARQ);
1263 *h_out_length = output_length >> 3;
1264 dec->hard_output.length += *h_out_length;
1265 *h_out_offset += *h_out_length;
1266 desc->data_ptrs[next_triplet - 1].last = 1;
1267 desc->d2mlen = next_triplet - desc->m2dlen;
1275 acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,
1276 struct acc100_dma_req_desc *desc,
1277 struct rte_mbuf *input, struct rte_mbuf *h_output,
1278 uint32_t *in_offset, uint32_t *h_out_offset,
1279 uint32_t *h_out_length,
1280 union acc100_harq_layout_data *harq_layout)
1282 int next_triplet = 1; /* FCW already done */
1283 desc->data_ptrs[next_triplet].address =
1284 rte_pktmbuf_iova_offset(input, *in_offset);
1287 if (check_bit(op->ldpc_dec.op_flags,
1288 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1289 struct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;
1290 desc->data_ptrs[next_triplet].address = hi.offset;
1291 #ifndef ACC100_EXT_MEM
1292 desc->data_ptrs[next_triplet].address =
1293 rte_pktmbuf_iova_offset(hi.data, hi.offset);
1298 desc->data_ptrs[next_triplet].address =
1299 rte_pktmbuf_iova_offset(h_output, *h_out_offset);
1300 *h_out_length = desc->data_ptrs[next_triplet].blen;
1303 if (check_bit(op->ldpc_dec.op_flags,
1304 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1305 desc->data_ptrs[next_triplet].address =
1306 op->ldpc_dec.harq_combined_output.offset;
1307 /* Adjust based on previous operation */
1308 struct rte_bbdev_dec_op *prev_op = desc->op_addr;
1309 op->ldpc_dec.harq_combined_output.length =
1310 prev_op->ldpc_dec.harq_combined_output.length;
1311 int16_t hq_idx = op->ldpc_dec.harq_combined_output.offset /
1313 int16_t prev_hq_idx =
1314 prev_op->ldpc_dec.harq_combined_output.offset
1315 / ACC100_HARQ_OFFSET;
1316 harq_layout[hq_idx].val = harq_layout[prev_hq_idx].val;
1317 #ifndef ACC100_EXT_MEM
1318 struct rte_bbdev_op_data ho =
1319 op->ldpc_dec.harq_combined_output;
1320 desc->data_ptrs[next_triplet].address =
1321 rte_pktmbuf_iova_offset(ho.data, ho.offset);
1326 op->ldpc_dec.hard_output.length += *h_out_length;
1331 /* Enqueue a number of operations to HW and update software rings */
1333 acc100_dma_enqueue(struct acc100_queue *q, uint16_t n,
1334 struct rte_bbdev_stats *queue_stats)
1336 union acc100_enqueue_reg_fmt enq_req;
1337 #ifdef RTE_BBDEV_OFFLOAD_COST
1338 uint64_t start_time = 0;
1339 queue_stats->acc_offload_cycles = 0;
1341 RTE_SET_USED(queue_stats);
1345 /* Setting offset, 100b for 256 DMA Desc */
1346 enq_req.addr_offset = ACC100_DESC_OFFSET;
1348 /* Split ops into batches */
1350 union acc100_dma_desc *desc;
1351 uint16_t enq_batch_size;
1353 rte_iova_t req_elem_addr;
1355 enq_batch_size = RTE_MIN(n, MAX_ENQ_BATCH_SIZE);
1357 /* Set flag on last descriptor in a batch */
1358 desc = q->ring_addr + ((q->sw_ring_head + enq_batch_size - 1) &
1359 q->sw_ring_wrap_mask);
1360 desc->req.last_desc_in_batch = 1;
1362 /* Calculate the 1st descriptor's address */
1363 offset = ((q->sw_ring_head & q->sw_ring_wrap_mask) *
1364 sizeof(union acc100_dma_desc));
1365 req_elem_addr = q->ring_addr_iova + offset;
1367 /* Fill enqueue struct */
1368 enq_req.num_elem = enq_batch_size;
1369 /* low 6 bits are not needed */
1370 enq_req.req_elem_addr = (uint32_t)(req_elem_addr >> 6);
1372 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1373 rte_memdump(stderr, "Req sdone", desc, sizeof(*desc));
1375 rte_bbdev_log_debug(
1376 "Enqueue %u reqs (phys %#"PRIx64") to reg %p",
1379 (void *)q->mmio_reg_enqueue);
1383 #ifdef RTE_BBDEV_OFFLOAD_COST
1384 /* Start time measurement for enqueue function offload. */
1385 start_time = rte_rdtsc_precise();
1387 rte_bbdev_log(DEBUG, "Debug : MMIO Enqueue");
1388 mmio_write(q->mmio_reg_enqueue, enq_req.val);
1390 #ifdef RTE_BBDEV_OFFLOAD_COST
1391 queue_stats->acc_offload_cycles +=
1392 rte_rdtsc_precise() - start_time;
1396 q->sw_ring_head += enq_batch_size;
1397 n -= enq_batch_size;
1404 /* Enqueue one encode operations for ACC100 device in CB mode */
1406 enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,
1407 uint16_t total_enqueued_cbs, int16_t num)
1409 union acc100_dma_desc *desc = NULL;
1410 uint32_t out_length;
1411 struct rte_mbuf *output_head, *output;
1412 int i, next_triplet;
1413 uint16_t in_length_in_bytes;
1414 struct rte_bbdev_op_ldpc_enc *enc = &ops[0]->ldpc_enc;
1416 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1417 & q->sw_ring_wrap_mask);
1418 desc = q->ring_addr + desc_idx;
1419 acc100_fcw_le_fill(ops[0], &desc->req.fcw_le, num);
1421 /** This could be done at polling */
1422 desc->req.word0 = ACC100_DMA_DESC_TYPE;
1423 desc->req.word1 = 0; /**< Timestamp could be disabled */
1424 desc->req.word2 = 0;
1425 desc->req.word3 = 0;
1426 desc->req.numCBs = num;
1428 in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;
1429 out_length = (enc->cb_params.e + 7) >> 3;
1430 desc->req.m2dlen = 1 + num;
1431 desc->req.d2mlen = num;
1434 for (i = 0; i < num; i++) {
1435 desc->req.data_ptrs[next_triplet].address =
1436 rte_pktmbuf_iova_offset(ops[i]->ldpc_enc.input.data, 0);
1437 desc->req.data_ptrs[next_triplet].blen = in_length_in_bytes;
1439 desc->req.data_ptrs[next_triplet].address =
1440 rte_pktmbuf_iova_offset(
1441 ops[i]->ldpc_enc.output.data, 0);
1442 desc->req.data_ptrs[next_triplet].blen = out_length;
1444 ops[i]->ldpc_enc.output.length = out_length;
1445 output_head = output = ops[i]->ldpc_enc.output.data;
1446 mbuf_append(output_head, output, out_length);
1447 output->data_len = out_length;
1450 desc->req.op_addr = ops[0];
1452 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1453 rte_memdump(stderr, "FCW", &desc->req.fcw_le,
1454 sizeof(desc->req.fcw_le) - 8);
1455 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1458 /* One CB (one op) was successfully prepared to enqueue */
1462 /* Enqueue one encode operations for ACC100 device in CB mode */
1464 enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
1465 uint16_t total_enqueued_cbs)
1467 union acc100_dma_desc *desc = NULL;
1469 uint32_t in_offset, out_offset, out_length, mbuf_total_left,
1471 struct rte_mbuf *input, *output_head, *output;
1473 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1474 & q->sw_ring_wrap_mask);
1475 desc = q->ring_addr + desc_idx;
1476 acc100_fcw_le_fill(op, &desc->req.fcw_le, 1);
1478 input = op->ldpc_enc.input.data;
1479 output_head = output = op->ldpc_enc.output.data;
1480 in_offset = op->ldpc_enc.input.offset;
1481 out_offset = op->ldpc_enc.output.offset;
1483 mbuf_total_left = op->ldpc_enc.input.length;
1484 seg_total_left = rte_pktmbuf_data_len(op->ldpc_enc.input.data)
1487 ret = acc100_dma_desc_le_fill(op, &desc->req, &input, output,
1488 &in_offset, &out_offset, &out_length, &mbuf_total_left,
1491 if (unlikely(ret < 0))
1494 mbuf_append(output_head, output, out_length);
1496 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1497 rte_memdump(stderr, "FCW", &desc->req.fcw_le,
1498 sizeof(desc->req.fcw_le) - 8);
1499 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1501 if (check_mbuf_total_left(mbuf_total_left) != 0)
1504 /* One CB (one op) was successfully prepared to enqueue */
1508 /** Enqueue one decode operations for ACC100 device in CB mode */
1510 enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
1511 uint16_t total_enqueued_cbs, bool same_op)
1515 union acc100_dma_desc *desc;
1516 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1517 & q->sw_ring_wrap_mask);
1518 desc = q->ring_addr + desc_idx;
1519 struct rte_mbuf *input, *h_output_head, *h_output;
1520 uint32_t in_offset, h_out_offset, mbuf_total_left, h_out_length = 0;
1521 input = op->ldpc_dec.input.data;
1522 h_output_head = h_output = op->ldpc_dec.hard_output.data;
1523 in_offset = op->ldpc_dec.input.offset;
1524 h_out_offset = op->ldpc_dec.hard_output.offset;
1525 mbuf_total_left = op->ldpc_dec.input.length;
1526 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1527 if (unlikely(input == NULL)) {
1528 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1532 union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
1535 union acc100_dma_desc *prev_desc;
1536 desc_idx = ((q->sw_ring_head + total_enqueued_cbs - 1)
1537 & q->sw_ring_wrap_mask);
1538 prev_desc = q->ring_addr + desc_idx;
1539 uint8_t *prev_ptr = (uint8_t *) prev_desc;
1540 uint8_t *new_ptr = (uint8_t *) desc;
1541 /* Copy first 4 words and BDESCs */
1542 rte_memcpy(new_ptr, prev_ptr, ACC100_5GUL_SIZE_0);
1543 rte_memcpy(new_ptr + ACC100_5GUL_OFFSET_0,
1544 prev_ptr + ACC100_5GUL_OFFSET_0,
1545 ACC100_5GUL_SIZE_1);
1546 desc->req.op_addr = prev_desc->req.op_addr;
1548 rte_memcpy(new_ptr + ACC100_DESC_FCW_OFFSET,
1549 prev_ptr + ACC100_DESC_FCW_OFFSET,
1550 ACC100_FCW_LD_BLEN);
1551 acc100_dma_desc_ld_update(op, &desc->req, input, h_output,
1552 &in_offset, &h_out_offset,
1553 &h_out_length, harq_layout);
1555 struct acc100_fcw_ld *fcw;
1556 uint32_t seg_total_left;
1557 fcw = &desc->req.fcw_ld;
1558 acc100_fcw_ld_fill(op, fcw, harq_layout);
1560 /* Special handling when overusing mbuf */
1561 if (fcw->rm_e < ACC100_MAX_E_MBUF)
1562 seg_total_left = rte_pktmbuf_data_len(input)
1565 seg_total_left = fcw->rm_e;
1567 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input, h_output,
1568 &in_offset, &h_out_offset,
1569 &h_out_length, &mbuf_total_left,
1570 &seg_total_left, fcw);
1571 if (unlikely(ret < 0))
1576 mbuf_append(h_output_head, h_output, h_out_length);
1577 #ifndef ACC100_EXT_MEM
1578 if (op->ldpc_dec.harq_combined_output.length > 0) {
1579 /* Push the HARQ output into host memory */
1580 struct rte_mbuf *hq_output_head, *hq_output;
1581 hq_output_head = op->ldpc_dec.harq_combined_output.data;
1582 hq_output = op->ldpc_dec.harq_combined_output.data;
1583 mbuf_append(hq_output_head, hq_output,
1584 op->ldpc_dec.harq_combined_output.length);
1588 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1589 rte_memdump(stderr, "FCW", &desc->req.fcw_ld,
1590 sizeof(desc->req.fcw_ld) - 8);
1591 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1594 /* One CB (one op) was successfully prepared to enqueue */
1599 /* Enqueue one decode operations for ACC100 device in TB mode */
1601 enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
1602 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
1604 union acc100_dma_desc *desc = NULL;
1607 uint32_t in_offset, h_out_offset,
1608 h_out_length, mbuf_total_left, seg_total_left;
1609 struct rte_mbuf *input, *h_output_head, *h_output;
1610 uint16_t current_enqueued_cbs = 0;
1612 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1613 & q->sw_ring_wrap_mask);
1614 desc = q->ring_addr + desc_idx;
1615 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
1616 union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
1617 acc100_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout);
1619 input = op->ldpc_dec.input.data;
1620 h_output_head = h_output = op->ldpc_dec.hard_output.data;
1621 in_offset = op->ldpc_dec.input.offset;
1622 h_out_offset = op->ldpc_dec.hard_output.offset;
1624 mbuf_total_left = op->ldpc_dec.input.length;
1625 c = op->ldpc_dec.tb_params.c;
1626 r = op->ldpc_dec.tb_params.r;
1628 while (mbuf_total_left > 0 && r < c) {
1630 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
1632 /* Set up DMA descriptor */
1633 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
1634 & q->sw_ring_wrap_mask);
1635 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
1636 desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
1637 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input,
1638 h_output, &in_offset, &h_out_offset,
1640 &mbuf_total_left, &seg_total_left,
1643 if (unlikely(ret < 0))
1647 mbuf_append(h_output_head, h_output, h_out_length);
1649 /* Set total number of CBs in TB */
1650 desc->req.cbs_in_tb = cbs_in_tb;
1651 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1652 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
1653 sizeof(desc->req.fcw_td) - 8);
1654 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1657 if (seg_total_left == 0) {
1658 /* Go to the next mbuf */
1659 input = input->next;
1661 h_output = h_output->next;
1664 total_enqueued_cbs++;
1665 current_enqueued_cbs++;
1669 if (unlikely(desc == NULL))
1670 return current_enqueued_cbs;
1672 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1673 if (check_mbuf_total_left(mbuf_total_left) != 0)
1676 /* Set SDone on last CB descriptor for TB mode */
1677 desc->req.sdone_enable = 1;
1678 desc->req.irq_enable = q->irq_enable;
1680 return current_enqueued_cbs;
1684 /* Calculates number of CBs in processed encoder TB based on 'r' and input
1687 static inline uint8_t
1688 get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)
1690 uint8_t c, c_neg, r, crc24_bits = 0;
1691 uint16_t k, k_neg, k_pos;
1692 uint8_t cbs_in_tb = 0;
1695 length = turbo_enc->input.length;
1696 r = turbo_enc->tb_params.r;
1697 c = turbo_enc->tb_params.c;
1698 c_neg = turbo_enc->tb_params.c_neg;
1699 k_neg = turbo_enc->tb_params.k_neg;
1700 k_pos = turbo_enc->tb_params.k_pos;
1702 if (check_bit(turbo_enc->op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
1704 while (length > 0 && r < c) {
1705 k = (r < c_neg) ? k_neg : k_pos;
1706 length -= (k - crc24_bits) >> 3;
1714 /* Calculates number of CBs in processed decoder TB based on 'r' and input
1717 static inline uint16_t
1718 get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)
1720 uint8_t c, c_neg, r = 0;
1721 uint16_t kw, k, k_neg, k_pos, cbs_in_tb = 0;
1724 length = turbo_dec->input.length;
1725 r = turbo_dec->tb_params.r;
1726 c = turbo_dec->tb_params.c;
1727 c_neg = turbo_dec->tb_params.c_neg;
1728 k_neg = turbo_dec->tb_params.k_neg;
1729 k_pos = turbo_dec->tb_params.k_pos;
1730 while (length > 0 && r < c) {
1731 k = (r < c_neg) ? k_neg : k_pos;
1732 kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
1741 /* Calculates number of CBs in processed decoder TB based on 'r' and input
1744 static inline uint16_t
1745 get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)
1747 uint16_t r, cbs_in_tb = 0;
1748 int32_t length = ldpc_dec->input.length;
1749 r = ldpc_dec->tb_params.r;
1750 while (length > 0 && r < ldpc_dec->tb_params.c) {
1751 length -= (r < ldpc_dec->tb_params.cab) ?
1752 ldpc_dec->tb_params.ea :
1753 ldpc_dec->tb_params.eb;
1760 /* Check we can mux encode operations with common FCW */
1762 check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {
1766 for (i = 1; i < num; ++i) {
1767 /* Only mux compatible code blocks */
1768 if (memcmp((uint8_t *)(&ops[i]->ldpc_enc) + ACC100_ENC_OFFSET,
1769 (uint8_t *)(&ops[0]->ldpc_enc) +
1771 ACC100_CMP_ENC_SIZE) != 0)
1777 /** Enqueue encode operations for ACC100 device in CB mode. */
1778 static inline uint16_t
1779 acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
1780 struct rte_bbdev_enc_op **ops, uint16_t num)
1782 struct acc100_queue *q = q_data->queue_private;
1783 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
1785 union acc100_dma_desc *desc;
1786 int ret, desc_idx = 0;
1787 int16_t enq, left = num;
1790 if (unlikely(avail < 1))
1793 enq = RTE_MIN(left, ACC100_MUX_5GDL_DESC);
1794 if (check_mux(&ops[i], enq)) {
1795 ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],
1801 ret = enqueue_ldpc_enc_one_op_cb(q, ops[i], desc_idx);
1810 if (unlikely(i == 0))
1811 return 0; /* Nothing to enqueue */
1813 /* Set SDone in last CB in enqueued ops for CB mode*/
1814 desc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1)
1815 & q->sw_ring_wrap_mask);
1816 desc->req.sdone_enable = 1;
1817 desc->req.irq_enable = q->irq_enable;
1819 acc100_dma_enqueue(q, desc_idx, &q_data->queue_stats);
1822 q_data->queue_stats.enqueued_count += i;
1823 q_data->queue_stats.enqueue_err_count += num - i;
1828 /* Enqueue encode operations for ACC100 device. */
1830 acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1831 struct rte_bbdev_enc_op **ops, uint16_t num)
1833 if (unlikely(num == 0))
1835 return acc100_enqueue_ldpc_enc_cb(q_data, ops, num);
1838 /* Check we can mux encode operations with common FCW */
1840 cmp_ldpc_dec_op(struct rte_bbdev_dec_op **ops) {
1841 /* Only mux compatible code blocks */
1842 if (memcmp((uint8_t *)(&ops[0]->ldpc_dec) + ACC100_DEC_OFFSET,
1843 (uint8_t *)(&ops[1]->ldpc_dec) +
1844 ACC100_DEC_OFFSET, ACC100_CMP_DEC_SIZE) != 0) {
1851 /* Enqueue decode operations for ACC100 device in TB mode */
1853 acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,
1854 struct rte_bbdev_dec_op **ops, uint16_t num)
1856 struct acc100_queue *q = q_data->queue_private;
1857 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
1858 uint16_t i, enqueued_cbs = 0;
1862 for (i = 0; i < num; ++i) {
1863 cbs_in_tb = get_num_cbs_in_tb_ldpc_dec(&ops[i]->ldpc_dec);
1864 /* Check if there are available space for further processing */
1865 if (unlikely(avail - cbs_in_tb < 0))
1869 ret = enqueue_ldpc_dec_one_op_tb(q, ops[i],
1870 enqueued_cbs, cbs_in_tb);
1873 enqueued_cbs += ret;
1876 acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
1879 q_data->queue_stats.enqueued_count += i;
1880 q_data->queue_stats.enqueue_err_count += num - i;
1884 /* Enqueue decode operations for ACC100 device in CB mode */
1886 acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
1887 struct rte_bbdev_dec_op **ops, uint16_t num)
1889 struct acc100_queue *q = q_data->queue_private;
1890 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
1892 union acc100_dma_desc *desc;
1894 bool same_op = false;
1895 for (i = 0; i < num; ++i) {
1896 /* Check if there are available space for further processing */
1897 if (unlikely(avail < 1))
1902 same_op = cmp_ldpc_dec_op(&ops[i-1]);
1903 rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n",
1904 i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index,
1905 ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count,
1906 ops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c,
1907 ops[i]->ldpc_dec.n_cb, ops[i]->ldpc_dec.q_m,
1908 ops[i]->ldpc_dec.n_filler, ops[i]->ldpc_dec.cb_params.e,
1910 ret = enqueue_ldpc_dec_one_op_cb(q, ops[i], i, same_op);
1915 if (unlikely(i == 0))
1916 return 0; /* Nothing to enqueue */
1918 /* Set SDone in last CB in enqueued ops for CB mode*/
1919 desc = q->ring_addr + ((q->sw_ring_head + i - 1)
1920 & q->sw_ring_wrap_mask);
1922 desc->req.sdone_enable = 1;
1923 desc->req.irq_enable = q->irq_enable;
1925 acc100_dma_enqueue(q, i, &q_data->queue_stats);
1928 q_data->queue_stats.enqueued_count += i;
1929 q_data->queue_stats.enqueue_err_count += num - i;
1933 /* Enqueue decode operations for ACC100 device. */
1935 acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1936 struct rte_bbdev_dec_op **ops, uint16_t num)
1938 struct acc100_queue *q = q_data->queue_private;
1939 int32_t aq_avail = q->aq_depth +
1940 (q->aq_dequeued - q->aq_enqueued) / 128;
1942 if (unlikely((aq_avail == 0) || (num == 0)))
1945 if (ops[0]->ldpc_dec.code_block_mode == 0)
1946 return acc100_enqueue_ldpc_dec_tb(q_data, ops, num);
1948 return acc100_enqueue_ldpc_dec_cb(q_data, ops, num);
1952 /* Dequeue one encode operations from ACC100 device in CB mode */
1954 dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
1955 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
1957 union acc100_dma_desc *desc, atom_desc;
1958 union acc100_dma_rsp_desc rsp;
1959 struct rte_bbdev_enc_op *op;
1962 desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
1963 & q->sw_ring_wrap_mask);
1964 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
1967 /* Check fdone bit */
1968 if (!(atom_desc.rsp.val & ACC100_FDONE))
1971 rsp.val = atom_desc.rsp.val;
1972 rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
1975 op = desc->req.op_addr;
1977 /* Clearing status, it will be set based on response */
1980 op->status |= ((rsp.input_err)
1981 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
1982 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
1983 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
1985 if (desc->req.last_desc_in_batch) {
1987 desc->req.last_desc_in_batch = 0;
1989 desc->rsp.val = ACC100_DMA_DESC_TYPE;
1990 desc->rsp.add_info_0 = 0; /*Reserved bits */
1991 desc->rsp.add_info_1 = 0; /*Reserved bits */
1993 /* Flag that the muxing cause loss of opaque data */
1994 op->opaque_data = (void *)-1;
1995 for (i = 0 ; i < desc->req.numCBs; i++)
1998 /* One CB (op) was successfully dequeued */
1999 return desc->req.numCBs;
2002 /* Dequeue one encode operations from ACC100 device in TB mode */
2004 dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
2005 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
2007 union acc100_dma_desc *desc, *last_desc, atom_desc;
2008 union acc100_dma_rsp_desc rsp;
2009 struct rte_bbdev_enc_op *op;
2011 uint16_t current_dequeued_cbs = 0, cbs_in_tb;
2013 desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
2014 & q->sw_ring_wrap_mask);
2015 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2018 /* Check fdone bit */
2019 if (!(atom_desc.rsp.val & ACC100_FDONE))
2022 /* Get number of CBs in dequeued TB */
2023 cbs_in_tb = desc->req.cbs_in_tb;
2025 last_desc = q->ring_addr + ((q->sw_ring_tail
2026 + total_dequeued_cbs + cbs_in_tb - 1)
2027 & q->sw_ring_wrap_mask);
2028 /* Check if last CB in TB is ready to dequeue (and thus
2029 * the whole TB) - checking sdone bit. If not return.
2031 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
2033 if (!(atom_desc.rsp.val & ACC100_SDONE))
2037 op = desc->req.op_addr;
2039 /* Clearing status, it will be set based on response */
2042 while (i < cbs_in_tb) {
2043 desc = q->ring_addr + ((q->sw_ring_tail
2044 + total_dequeued_cbs)
2045 & q->sw_ring_wrap_mask);
2046 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2048 rsp.val = atom_desc.rsp.val;
2049 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
2052 op->status |= ((rsp.input_err)
2053 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2054 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2055 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2057 if (desc->req.last_desc_in_batch) {
2059 desc->req.last_desc_in_batch = 0;
2061 desc->rsp.val = ACC100_DMA_DESC_TYPE;
2062 desc->rsp.add_info_0 = 0;
2063 desc->rsp.add_info_1 = 0;
2064 total_dequeued_cbs++;
2065 current_dequeued_cbs++;
2071 return current_dequeued_cbs;
2074 /* Dequeue one decode operation from ACC100 device in CB mode */
2076 dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
2077 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2078 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2080 union acc100_dma_desc *desc, atom_desc;
2081 union acc100_dma_rsp_desc rsp;
2082 struct rte_bbdev_dec_op *op;
2084 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2085 & q->sw_ring_wrap_mask);
2086 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2089 /* Check fdone bit */
2090 if (!(atom_desc.rsp.val & ACC100_FDONE))
2093 rsp.val = atom_desc.rsp.val;
2094 rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
2097 op = desc->req.op_addr;
2099 /* Clearing status, it will be set based on response */
2101 op->status |= ((rsp.input_err)
2102 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2103 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2104 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2105 if (op->status != 0)
2106 q_data->queue_stats.dequeue_err_count++;
2108 /* CRC invalid if error exists */
2110 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2111 op->turbo_dec.iter_count = (uint8_t) rsp.iter_cnt / 2;
2112 /* Check if this is the last desc in batch (Atomic Queue) */
2113 if (desc->req.last_desc_in_batch) {
2115 desc->req.last_desc_in_batch = 0;
2117 desc->rsp.val = ACC100_DMA_DESC_TYPE;
2118 desc->rsp.add_info_0 = 0;
2119 desc->rsp.add_info_1 = 0;
2122 /* One CB (op) was successfully dequeued */
2126 /* Dequeue one decode operations from ACC100 device in CB mode */
2128 dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
2129 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2130 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2132 union acc100_dma_desc *desc, atom_desc;
2133 union acc100_dma_rsp_desc rsp;
2134 struct rte_bbdev_dec_op *op;
2136 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2137 & q->sw_ring_wrap_mask);
2138 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2141 /* Check fdone bit */
2142 if (!(atom_desc.rsp.val & ACC100_FDONE))
2145 rsp.val = atom_desc.rsp.val;
2148 op = desc->req.op_addr;
2150 /* Clearing status, it will be set based on response */
2152 op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;
2153 op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;
2154 op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;
2155 if (op->status != 0)
2156 q_data->queue_stats.dequeue_err_count++;
2158 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2159 if (op->ldpc_dec.hard_output.length > 0 && !rsp.synd_ok)
2160 op->status |= 1 << RTE_BBDEV_SYNDROME_ERROR;
2161 op->ldpc_dec.iter_count = (uint8_t) rsp.iter_cnt;
2163 /* Check if this is the last desc in batch (Atomic Queue) */
2164 if (desc->req.last_desc_in_batch) {
2166 desc->req.last_desc_in_batch = 0;
2169 desc->rsp.val = ACC100_DMA_DESC_TYPE;
2170 desc->rsp.add_info_0 = 0;
2171 desc->rsp.add_info_1 = 0;
2175 /* One CB (op) was successfully dequeued */
2179 /* Dequeue one decode operations from ACC100 device in TB mode. */
2181 dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2182 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2184 union acc100_dma_desc *desc, *last_desc, atom_desc;
2185 union acc100_dma_rsp_desc rsp;
2186 struct rte_bbdev_dec_op *op;
2187 uint8_t cbs_in_tb = 1, cb_idx = 0;
2189 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2190 & q->sw_ring_wrap_mask);
2191 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2194 /* Check fdone bit */
2195 if (!(atom_desc.rsp.val & ACC100_FDONE))
2199 op = desc->req.op_addr;
2201 /* Get number of CBs in dequeued TB */
2202 cbs_in_tb = desc->req.cbs_in_tb;
2204 last_desc = q->ring_addr + ((q->sw_ring_tail
2205 + dequeued_cbs + cbs_in_tb - 1)
2206 & q->sw_ring_wrap_mask);
2207 /* Check if last CB in TB is ready to dequeue (and thus
2208 * the whole TB) - checking sdone bit. If not return.
2210 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
2212 if (!(atom_desc.rsp.val & ACC100_SDONE))
2215 /* Clearing status, it will be set based on response */
2218 /* Read remaining CBs if exists */
2219 while (cb_idx < cbs_in_tb) {
2220 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2221 & q->sw_ring_wrap_mask);
2222 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2224 rsp.val = atom_desc.rsp.val;
2225 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
2228 op->status |= ((rsp.input_err)
2229 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2230 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2231 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2233 /* CRC invalid if error exists */
2235 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2236 op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
2237 op->turbo_dec.iter_count);
2239 /* Check if this is the last desc in batch (Atomic Queue) */
2240 if (desc->req.last_desc_in_batch) {
2242 desc->req.last_desc_in_batch = 0;
2244 desc->rsp.val = ACC100_DMA_DESC_TYPE;
2245 desc->rsp.add_info_0 = 0;
2246 desc->rsp.add_info_1 = 0;
2256 /* Dequeue LDPC encode operations from ACC100 device. */
2258 acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
2259 struct rte_bbdev_enc_op **ops, uint16_t num)
2261 struct acc100_queue *q = q_data->queue_private;
2262 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
2263 uint32_t aq_dequeued = 0;
2264 uint16_t dequeue_num, i, dequeued_cbs = 0, dequeued_descs = 0;
2267 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2268 if (unlikely(ops == 0 && q == NULL))
2272 dequeue_num = RTE_MIN(avail, num);
2274 for (i = 0; i < dequeue_num; i++) {
2275 ret = dequeue_enc_one_op_cb(q, &ops[dequeued_cbs],
2276 dequeued_descs, &aq_dequeued);
2279 dequeued_cbs += ret;
2281 if (dequeued_cbs >= num)
2285 q->aq_dequeued += aq_dequeued;
2286 q->sw_ring_tail += dequeued_descs;
2288 /* Update enqueue stats */
2289 q_data->queue_stats.dequeued_count += dequeued_cbs;
2291 return dequeued_cbs;
2294 /* Dequeue decode operations from ACC100 device. */
2296 acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
2297 struct rte_bbdev_dec_op **ops, uint16_t num)
2299 struct acc100_queue *q = q_data->queue_private;
2300 uint16_t dequeue_num;
2301 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
2302 uint32_t aq_dequeued = 0;
2304 uint16_t dequeued_cbs = 0;
2305 struct rte_bbdev_dec_op *op;
2308 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2309 if (unlikely(ops == 0 && q == NULL))
2313 dequeue_num = RTE_MIN(avail, num);
2315 for (i = 0; i < dequeue_num; ++i) {
2316 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2317 & q->sw_ring_wrap_mask))->req.op_addr;
2318 if (op->ldpc_dec.code_block_mode == 0)
2319 ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
2322 ret = dequeue_ldpc_dec_one_op_cb(
2323 q_data, q, &ops[i], dequeued_cbs,
2328 dequeued_cbs += ret;
2331 q->aq_dequeued += aq_dequeued;
2332 q->sw_ring_tail += dequeued_cbs;
2334 /* Update enqueue stats */
2335 q_data->queue_stats.dequeued_count += i;
2340 /* Initialization Function */
2342 acc100_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
2344 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2346 dev->dev_ops = &acc100_bbdev_ops;
2347 dev->enqueue_ldpc_enc_ops = acc100_enqueue_ldpc_enc;
2348 dev->enqueue_ldpc_dec_ops = acc100_enqueue_ldpc_dec;
2349 dev->dequeue_ldpc_enc_ops = acc100_dequeue_ldpc_enc;
2350 dev->dequeue_ldpc_dec_ops = acc100_dequeue_ldpc_dec;
2352 ((struct acc100_device *) dev->data->dev_private)->pf_device =
2353 !strcmp(drv->driver.name,
2354 RTE_STR(ACC100PF_DRIVER_NAME));
2355 ((struct acc100_device *) dev->data->dev_private)->mmio_base =
2356 pci_dev->mem_resource[0].addr;
2358 rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"",
2359 drv->driver.name, dev->data->name,
2360 (void *)pci_dev->mem_resource[0].addr,
2361 pci_dev->mem_resource[0].phys_addr);
2364 static int acc100_pci_probe(struct rte_pci_driver *pci_drv,
2365 struct rte_pci_device *pci_dev)
2367 struct rte_bbdev *bbdev = NULL;
2368 char dev_name[RTE_BBDEV_NAME_MAX_LEN];
2370 if (pci_dev == NULL) {
2371 rte_bbdev_log(ERR, "NULL PCI device");
2375 rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
2377 /* Allocate memory to be used privately by drivers */
2378 bbdev = rte_bbdev_allocate(pci_dev->device.name);
2382 /* allocate device private memory */
2383 bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
2384 sizeof(struct acc100_device), RTE_CACHE_LINE_SIZE,
2385 pci_dev->device.numa_node);
2387 if (bbdev->data->dev_private == NULL) {
2389 "Allocate of %zu bytes for device \"%s\" failed",
2390 sizeof(struct acc100_device), dev_name);
2391 rte_bbdev_release(bbdev);
2395 /* Fill HW specific part of device structure */
2396 bbdev->device = &pci_dev->device;
2397 bbdev->intr_handle = &pci_dev->intr_handle;
2398 bbdev->data->socket_id = pci_dev->device.numa_node;
2400 /* Invoke ACC100 device initialization function */
2401 acc100_bbdev_init(bbdev, pci_drv);
2403 rte_bbdev_log_debug("Initialised bbdev %s (id = %u)",
2404 dev_name, bbdev->data->dev_id);
2408 static int acc100_pci_remove(struct rte_pci_device *pci_dev)
2410 struct rte_bbdev *bbdev;
2414 if (pci_dev == NULL)
2418 bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
2419 if (bbdev == NULL) {
2421 "Couldn't find HW dev \"%s\" to uninitialise it",
2422 pci_dev->device.name);
2425 dev_id = bbdev->data->dev_id;
2427 /* free device private memory before close */
2428 rte_free(bbdev->data->dev_private);
2431 ret = rte_bbdev_close(dev_id);
2434 "Device %i failed to close during uninit: %i",
2437 /* release bbdev from library */
2438 rte_bbdev_release(bbdev);
2440 rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
2445 static struct rte_pci_driver acc100_pci_pf_driver = {
2446 .probe = acc100_pci_probe,
2447 .remove = acc100_pci_remove,
2448 .id_table = pci_id_acc100_pf_map,
2449 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
2452 static struct rte_pci_driver acc100_pci_vf_driver = {
2453 .probe = acc100_pci_probe,
2454 .remove = acc100_pci_remove,
2455 .id_table = pci_id_acc100_vf_map,
2456 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
2459 RTE_PMD_REGISTER_PCI(ACC100PF_DRIVER_NAME, acc100_pci_pf_driver);
2460 RTE_PMD_REGISTER_PCI_TABLE(ACC100PF_DRIVER_NAME, pci_id_acc100_pf_map);
2461 RTE_PMD_REGISTER_PCI(ACC100VF_DRIVER_NAME, acc100_pci_vf_driver);
2462 RTE_PMD_REGISTER_PCI_TABLE(ACC100VF_DRIVER_NAME, pci_id_acc100_vf_map);