1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
7 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_byteorder.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_hexdump.h>
17 #include <rte_bus_pci.h>
18 #ifdef RTE_BBDEV_OFFLOAD_COST
19 #include <rte_cycles.h>
22 #include <rte_bbdev.h>
23 #include <rte_bbdev_pmd.h>
24 #include "rte_acc100_pmd.h"
26 #ifdef RTE_LIBRTE_BBDEV_DEBUG
27 RTE_LOG_REGISTER_DEFAULT(acc100_logtype, DEBUG);
29 RTE_LOG_REGISTER_DEFAULT(acc100_logtype, NOTICE);
32 /* Write to MMIO register address */
34 mmio_write(void *addr, uint32_t value)
36 *((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
39 /* Write a register of a ACC100 device */
41 acc100_reg_write(struct acc100_device *d, uint32_t offset, uint32_t value)
43 void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
44 mmio_write(reg_addr, value);
45 usleep(ACC100_LONG_WAIT);
48 /* Read a register of a ACC100 device */
49 static inline uint32_t
50 acc100_reg_read(struct acc100_device *d, uint32_t offset)
53 void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
54 uint32_t ret = *((volatile uint32_t *)(reg_addr));
55 return rte_le_to_cpu_32(ret);
58 /* Basic Implementation of Log2 for exact 2^N */
59 static inline uint32_t
60 log2_basic(uint32_t value)
62 return (value == 0) ? 0 : rte_bsf32(value);
65 /* Calculate memory alignment offset assuming alignment is 2^N */
66 static inline uint32_t
67 calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)
69 rte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);
70 return (uint32_t)(alignment -
71 (unaligned_phy_mem & (alignment-1)));
74 /* Calculate the offset of the enqueue register */
75 static inline uint32_t
76 queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)
79 return ((vf_id << 12) + (qgrp_id << 7) + (aq_id << 3) +
82 return ((qgrp_id << 7) + (aq_id << 3) +
86 enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, NUM_ACC};
88 /* Return the accelerator enum for a Queue Group Index */
90 accFromQgid(int qg_idx, const struct rte_acc100_conf *acc100_conf)
92 int accQg[ACC100_NUM_QGRPS];
93 int NumQGroupsPerFn[NUM_ACC];
94 int acc, qgIdx, qgIndex = 0;
95 for (qgIdx = 0; qgIdx < ACC100_NUM_QGRPS; qgIdx++)
97 NumQGroupsPerFn[UL_4G] = acc100_conf->q_ul_4g.num_qgroups;
98 NumQGroupsPerFn[UL_5G] = acc100_conf->q_ul_5g.num_qgroups;
99 NumQGroupsPerFn[DL_4G] = acc100_conf->q_dl_4g.num_qgroups;
100 NumQGroupsPerFn[DL_5G] = acc100_conf->q_dl_5g.num_qgroups;
101 for (acc = UL_4G; acc < NUM_ACC; acc++)
102 for (qgIdx = 0; qgIdx < NumQGroupsPerFn[acc]; qgIdx++)
103 accQg[qgIndex++] = acc;
108 /* Return the queue topology for a Queue Group Index */
110 qtopFromAcc(struct rte_acc100_queue_topology **qtop, int acc_enum,
111 struct rte_acc100_conf *acc100_conf)
113 struct rte_acc100_queue_topology *p_qtop;
117 p_qtop = &(acc100_conf->q_ul_4g);
120 p_qtop = &(acc100_conf->q_ul_5g);
123 p_qtop = &(acc100_conf->q_dl_4g);
126 p_qtop = &(acc100_conf->q_dl_5g);
130 rte_bbdev_log(ERR, "Unexpected error evaluating qtopFromAcc");
136 /* Return the AQ depth for a Queue Group Index */
138 aqDepth(int qg_idx, struct rte_acc100_conf *acc100_conf)
140 struct rte_acc100_queue_topology *q_top = NULL;
141 int acc_enum = accFromQgid(qg_idx, acc100_conf);
142 qtopFromAcc(&q_top, acc_enum, acc100_conf);
143 if (unlikely(q_top == NULL))
145 return q_top->aq_depth_log2;
148 /* Return the AQ depth for a Queue Group Index */
150 aqNum(int qg_idx, struct rte_acc100_conf *acc100_conf)
152 struct rte_acc100_queue_topology *q_top = NULL;
153 int acc_enum = accFromQgid(qg_idx, acc100_conf);
154 qtopFromAcc(&q_top, acc_enum, acc100_conf);
155 if (unlikely(q_top == NULL))
157 return q_top->num_aqs_per_groups;
161 initQTop(struct rte_acc100_conf *acc100_conf)
163 acc100_conf->q_ul_4g.num_aqs_per_groups = 0;
164 acc100_conf->q_ul_4g.num_qgroups = 0;
165 acc100_conf->q_ul_4g.first_qgroup_index = -1;
166 acc100_conf->q_ul_5g.num_aqs_per_groups = 0;
167 acc100_conf->q_ul_5g.num_qgroups = 0;
168 acc100_conf->q_ul_5g.first_qgroup_index = -1;
169 acc100_conf->q_dl_4g.num_aqs_per_groups = 0;
170 acc100_conf->q_dl_4g.num_qgroups = 0;
171 acc100_conf->q_dl_4g.first_qgroup_index = -1;
172 acc100_conf->q_dl_5g.num_aqs_per_groups = 0;
173 acc100_conf->q_dl_5g.num_qgroups = 0;
174 acc100_conf->q_dl_5g.first_qgroup_index = -1;
178 updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,
179 struct acc100_device *d) {
181 struct rte_acc100_queue_topology *q_top = NULL;
182 qtopFromAcc(&q_top, acc, acc100_conf);
183 if (unlikely(q_top == NULL))
186 q_top->num_qgroups++;
187 if (q_top->first_qgroup_index == -1) {
188 q_top->first_qgroup_index = qg;
189 /* Can be optimized to assume all are enabled by default */
190 reg = acc100_reg_read(d, queue_offset(d->pf_device,
191 0, qg, ACC100_NUM_AQS - 1));
192 if (reg & ACC100_QUEUE_ENABLE) {
193 q_top->num_aqs_per_groups = ACC100_NUM_AQS;
196 q_top->num_aqs_per_groups = 0;
197 for (aq = 0; aq < ACC100_NUM_AQS; aq++) {
198 reg = acc100_reg_read(d, queue_offset(d->pf_device,
200 if (reg & ACC100_QUEUE_ENABLE)
201 q_top->num_aqs_per_groups++;
206 /* Fetch configuration enabled for the PF/VF using MMIO Read (slow) */
208 fetch_acc100_config(struct rte_bbdev *dev)
210 struct acc100_device *d = dev->data->dev_private;
211 struct rte_acc100_conf *acc100_conf = &d->acc100_conf;
212 const struct acc100_registry_addr *reg_addr;
214 uint32_t reg, reg_aq, reg_len0, reg_len1;
217 /* No need to retrieve the configuration is already done */
221 /* Choose correct registry addresses for the device type */
223 reg_addr = &pf_reg_addr;
225 reg_addr = &vf_reg_addr;
227 d->ddr_size = (1 + acc100_reg_read(d, reg_addr->ddr_range)) << 10;
229 /* Single VF Bundle by VF */
230 acc100_conf->num_vf_bundles = 1;
231 initQTop(acc100_conf);
233 struct rte_acc100_queue_topology *q_top = NULL;
234 int qman_func_id[ACC100_NUM_ACCS] = {ACC100_ACCMAP_0, ACC100_ACCMAP_1,
235 ACC100_ACCMAP_2, ACC100_ACCMAP_3, ACC100_ACCMAP_4};
236 reg = acc100_reg_read(d, reg_addr->qman_group_func);
237 for (qg = 0; qg < ACC100_NUM_QGRPS_PER_WORD; qg++) {
238 reg_aq = acc100_reg_read(d,
239 queue_offset(d->pf_device, 0, qg, 0));
240 if (reg_aq & ACC100_QUEUE_ENABLE) {
241 uint32_t idx = (reg >> (qg * 4)) & 0x7;
242 if (idx < ACC100_NUM_ACCS) {
243 acc = qman_func_id[idx];
244 updateQtop(acc, qg, acc100_conf, d);
249 /* Check the depth of the AQs*/
250 reg_len0 = acc100_reg_read(d, reg_addr->depth_log0_offset);
251 reg_len1 = acc100_reg_read(d, reg_addr->depth_log1_offset);
252 for (acc = 0; acc < NUM_ACC; acc++) {
253 qtopFromAcc(&q_top, acc, acc100_conf);
254 if (q_top->first_qgroup_index < ACC100_NUM_QGRPS_PER_WORD)
255 q_top->aq_depth_log2 = (reg_len0 >>
256 (q_top->first_qgroup_index * 4))
259 q_top->aq_depth_log2 = (reg_len1 >>
260 ((q_top->first_qgroup_index -
261 ACC100_NUM_QGRPS_PER_WORD) * 4))
267 reg_mode = acc100_reg_read(d, HWPfHiPfMode);
268 acc100_conf->pf_mode_en = (reg_mode == ACC100_PF_VAL) ? 1 : 0;
272 "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u\n",
273 (d->pf_device) ? "PF" : "VF",
274 (acc100_conf->input_pos_llr_1_bit) ? "POS" : "NEG",
275 (acc100_conf->output_pos_llr_1_bit) ? "POS" : "NEG",
276 acc100_conf->q_ul_4g.num_qgroups,
277 acc100_conf->q_dl_4g.num_qgroups,
278 acc100_conf->q_ul_5g.num_qgroups,
279 acc100_conf->q_dl_5g.num_qgroups,
280 acc100_conf->q_ul_4g.num_aqs_per_groups,
281 acc100_conf->q_dl_4g.num_aqs_per_groups,
282 acc100_conf->q_ul_5g.num_aqs_per_groups,
283 acc100_conf->q_dl_5g.num_aqs_per_groups,
284 acc100_conf->q_ul_4g.aq_depth_log2,
285 acc100_conf->q_dl_4g.aq_depth_log2,
286 acc100_conf->q_ul_5g.aq_depth_log2,
287 acc100_conf->q_dl_5g.aq_depth_log2);
291 free_base_addresses(void **base_addrs, int size)
294 for (i = 0; i < size; i++)
295 rte_free(base_addrs[i]);
298 static inline uint32_t
301 return sizeof(union acc100_dma_desc);
304 /* Allocate the 2 * 64MB block for the sw rings */
306 alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc100_device *d,
309 uint32_t sw_ring_size = ACC100_SIZE_64MBYTE;
310 d->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,
311 2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);
312 if (d->sw_rings_base == NULL) {
313 rte_bbdev_log(ERR, "Failed to allocate memory for %s:%u",
314 dev->device->driver->name,
318 uint32_t next_64mb_align_offset = calc_mem_alignment_offset(
319 d->sw_rings_base, ACC100_SIZE_64MBYTE);
320 d->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);
321 d->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +
322 next_64mb_align_offset;
323 d->sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
324 d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
329 /* Attempt to allocate minimised memory space for sw rings */
331 alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc100_device *d,
332 uint16_t num_queues, int socket)
334 rte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;
335 uint32_t next_64mb_align_offset;
336 rte_iova_t sw_ring_iova_end_addr;
337 void *base_addrs[ACC100_SW_RING_MEM_ALLOC_ATTEMPTS];
340 uint32_t q_sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
341 uint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;
343 /* Find an aligned block of memory to store sw rings */
344 while (i < ACC100_SW_RING_MEM_ALLOC_ATTEMPTS) {
346 * sw_ring allocated memory is guaranteed to be aligned to
347 * q_sw_ring_size at the condition that the requested size is
348 * less than the page size
350 sw_rings_base = rte_zmalloc_socket(
351 dev->device->driver->name,
352 dev_sw_ring_size, q_sw_ring_size, socket);
354 if (sw_rings_base == NULL) {
356 "Failed to allocate memory for %s:%u",
357 dev->device->driver->name,
362 sw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);
363 next_64mb_align_offset = calc_mem_alignment_offset(
364 sw_rings_base, ACC100_SIZE_64MBYTE);
365 next_64mb_align_addr_iova = sw_rings_base_iova +
366 next_64mb_align_offset;
367 sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;
369 /* Check if the end of the sw ring memory block is before the
370 * start of next 64MB aligned mem address
372 if (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {
373 d->sw_rings_iova = sw_rings_base_iova;
374 d->sw_rings = sw_rings_base;
375 d->sw_rings_base = sw_rings_base;
376 d->sw_ring_size = q_sw_ring_size;
377 d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
380 /* Store the address of the unaligned mem block */
381 base_addrs[i] = sw_rings_base;
385 /* Free all unaligned blocks of mem allocated in the loop */
386 free_base_addresses(base_addrs, i);
390 * Find queue_id of a device queue based on details from the Info Ring.
391 * If a queue isn't found UINT16_MAX is returned.
393 static inline uint16_t
394 get_queue_id_from_ring_info(struct rte_bbdev_data *data,
395 const union acc100_info_ring_data ring_data)
399 for (queue_id = 0; queue_id < data->num_queues; ++queue_id) {
400 struct acc100_queue *acc100_q =
401 data->queues[queue_id].queue_private;
402 if (acc100_q != NULL && acc100_q->aq_id == ring_data.aq_id &&
403 acc100_q->qgrp_id == ring_data.qg_id &&
404 acc100_q->vf_id == ring_data.vf_id)
411 /* Checks PF Info Ring to find the interrupt cause and handles it accordingly */
413 acc100_check_ir(struct acc100_device *acc100_dev)
415 volatile union acc100_info_ring_data *ring_data;
416 uint16_t info_ring_head = acc100_dev->info_ring_head;
417 if (acc100_dev->info_ring == NULL)
420 ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
421 ACC100_INFO_RING_MASK);
423 while (ring_data->valid) {
424 if ((ring_data->int_nb < ACC100_PF_INT_DMA_DL_DESC_IRQ) || (
426 ACC100_PF_INT_DMA_DL5G_DESC_IRQ))
427 rte_bbdev_log(WARNING, "InfoRing: ITR:%d Info:0x%x",
428 ring_data->int_nb, ring_data->detailed_info);
429 /* Initialize Info Ring entry and move forward */
432 ring_data = acc100_dev->info_ring +
433 (info_ring_head & ACC100_INFO_RING_MASK);
437 /* Checks PF Info Ring to find the interrupt cause and handles it accordingly */
439 acc100_pf_interrupt_handler(struct rte_bbdev *dev)
441 struct acc100_device *acc100_dev = dev->data->dev_private;
442 volatile union acc100_info_ring_data *ring_data;
443 struct acc100_deq_intr_details deq_intr_det;
445 ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
446 ACC100_INFO_RING_MASK);
448 while (ring_data->valid) {
451 "ACC100 PF Interrupt received, Info Ring data: 0x%x",
454 switch (ring_data->int_nb) {
455 case ACC100_PF_INT_DMA_DL_DESC_IRQ:
456 case ACC100_PF_INT_DMA_UL_DESC_IRQ:
457 case ACC100_PF_INT_DMA_UL5G_DESC_IRQ:
458 case ACC100_PF_INT_DMA_DL5G_DESC_IRQ:
459 deq_intr_det.queue_id = get_queue_id_from_ring_info(
460 dev->data, *ring_data);
461 if (deq_intr_det.queue_id == UINT16_MAX) {
463 "Couldn't find queue: aq_id: %u, qg_id: %u, vf_id: %u",
469 rte_bbdev_pmd_callback_process(dev,
470 RTE_BBDEV_EVENT_DEQUEUE, &deq_intr_det);
473 rte_bbdev_pmd_callback_process(dev,
474 RTE_BBDEV_EVENT_ERROR, NULL);
478 /* Initialize Info Ring entry and move forward */
480 ++acc100_dev->info_ring_head;
481 ring_data = acc100_dev->info_ring +
482 (acc100_dev->info_ring_head &
483 ACC100_INFO_RING_MASK);
487 /* Checks VF Info Ring to find the interrupt cause and handles it accordingly */
489 acc100_vf_interrupt_handler(struct rte_bbdev *dev)
491 struct acc100_device *acc100_dev = dev->data->dev_private;
492 volatile union acc100_info_ring_data *ring_data;
493 struct acc100_deq_intr_details deq_intr_det;
495 ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
496 ACC100_INFO_RING_MASK);
498 while (ring_data->valid) {
501 "ACC100 VF Interrupt received, Info Ring data: 0x%x",
504 switch (ring_data->int_nb) {
505 case ACC100_VF_INT_DMA_DL_DESC_IRQ:
506 case ACC100_VF_INT_DMA_UL_DESC_IRQ:
507 case ACC100_VF_INT_DMA_UL5G_DESC_IRQ:
508 case ACC100_VF_INT_DMA_DL5G_DESC_IRQ:
509 /* VFs are not aware of their vf_id - it's set to 0 in
512 ring_data->vf_id = 0;
513 deq_intr_det.queue_id = get_queue_id_from_ring_info(
514 dev->data, *ring_data);
515 if (deq_intr_det.queue_id == UINT16_MAX) {
517 "Couldn't find queue: aq_id: %u, qg_id: %u",
522 rte_bbdev_pmd_callback_process(dev,
523 RTE_BBDEV_EVENT_DEQUEUE, &deq_intr_det);
526 rte_bbdev_pmd_callback_process(dev,
527 RTE_BBDEV_EVENT_ERROR, NULL);
531 /* Initialize Info Ring entry and move forward */
532 ring_data->valid = 0;
533 ++acc100_dev->info_ring_head;
534 ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head
535 & ACC100_INFO_RING_MASK);
539 /* Interrupt handler triggered by ACC100 dev for handling specific interrupt */
541 acc100_dev_interrupt_handler(void *cb_arg)
543 struct rte_bbdev *dev = cb_arg;
544 struct acc100_device *acc100_dev = dev->data->dev_private;
547 if (acc100_dev->pf_device)
548 acc100_pf_interrupt_handler(dev);
550 acc100_vf_interrupt_handler(dev);
553 /* Allocate and setup inforing */
555 allocate_info_ring(struct rte_bbdev *dev)
557 struct acc100_device *d = dev->data->dev_private;
558 const struct acc100_registry_addr *reg_addr;
559 rte_iova_t info_ring_iova;
560 uint32_t phys_low, phys_high;
562 if (d->info_ring != NULL)
563 return 0; /* Already configured */
565 /* Choose correct registry addresses for the device type */
567 reg_addr = &pf_reg_addr;
569 reg_addr = &vf_reg_addr;
570 /* Allocate InfoRing */
571 d->info_ring = rte_zmalloc_socket("Info Ring",
572 ACC100_INFO_RING_NUM_ENTRIES *
573 sizeof(*d->info_ring), RTE_CACHE_LINE_SIZE,
574 dev->data->socket_id);
575 if (d->info_ring == NULL) {
577 "Failed to allocate Info Ring for %s:%u",
578 dev->device->driver->name,
582 info_ring_iova = rte_malloc_virt2iova(d->info_ring);
584 /* Setup Info Ring */
585 phys_high = (uint32_t)(info_ring_iova >> 32);
586 phys_low = (uint32_t)(info_ring_iova);
587 acc100_reg_write(d, reg_addr->info_ring_hi, phys_high);
588 acc100_reg_write(d, reg_addr->info_ring_lo, phys_low);
589 acc100_reg_write(d, reg_addr->info_ring_en, ACC100_REG_IRQ_EN_ALL);
590 d->info_ring_head = (acc100_reg_read(d, reg_addr->info_ring_ptr) &
591 0xFFF) / sizeof(union acc100_info_ring_data);
596 /* Allocate 64MB memory used for all software rings */
598 acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
600 uint32_t phys_low, phys_high, value;
601 struct acc100_device *d = dev->data->dev_private;
602 const struct acc100_registry_addr *reg_addr;
605 if (d->pf_device && !d->acc100_conf.pf_mode_en) {
606 rte_bbdev_log(NOTICE,
607 "%s has PF mode disabled. This PF can't be used.",
612 alloc_sw_rings_min_mem(dev, d, num_queues, socket_id);
614 /* If minimal memory space approach failed, then allocate
615 * the 2 * 64MB block for the sw rings
617 if (d->sw_rings == NULL)
618 alloc_2x64mb_sw_rings_mem(dev, d, socket_id);
620 if (d->sw_rings == NULL) {
621 rte_bbdev_log(NOTICE,
622 "Failure allocating sw_rings memory");
626 /* Configure ACC100 with the base address for DMA descriptor rings
627 * Same descriptor rings used for UL and DL DMA Engines
628 * Note : Assuming only VF0 bundle is used for PF mode
630 phys_high = (uint32_t)(d->sw_rings_iova >> 32);
631 phys_low = (uint32_t)(d->sw_rings_iova & ~(ACC100_SIZE_64MBYTE-1));
633 /* Choose correct registry addresses for the device type */
635 reg_addr = &pf_reg_addr;
637 reg_addr = &vf_reg_addr;
639 /* Read the populated cfg from ACC100 registers */
640 fetch_acc100_config(dev);
642 /* Release AXI from PF */
644 acc100_reg_write(d, HWPfDmaAxiControl, 1);
646 acc100_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);
647 acc100_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);
648 acc100_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);
649 acc100_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);
650 acc100_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);
651 acc100_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);
652 acc100_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);
653 acc100_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);
656 * Configure Ring Size to the max queue ring size
657 * (used for wrapping purpose)
659 value = log2_basic(d->sw_ring_size / 64);
660 acc100_reg_write(d, reg_addr->ring_size, value);
662 /* Configure tail pointer for use when SDONE enabled */
663 d->tail_ptrs = rte_zmalloc_socket(
664 dev->device->driver->name,
665 ACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t),
666 RTE_CACHE_LINE_SIZE, socket_id);
667 if (d->tail_ptrs == NULL) {
668 rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u",
669 dev->device->driver->name,
671 rte_free(d->sw_rings);
674 d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs);
676 phys_high = (uint32_t)(d->tail_ptr_iova >> 32);
677 phys_low = (uint32_t)(d->tail_ptr_iova);
678 acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);
679 acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);
680 acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);
681 acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);
682 acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);
683 acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);
684 acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);
685 acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);
687 ret = allocate_info_ring(dev);
689 rte_bbdev_log(ERR, "Failed to allocate info_ring for %s:%u",
690 dev->device->driver->name,
695 d->harq_layout = rte_zmalloc_socket("HARQ Layout",
696 ACC100_HARQ_LAYOUT * sizeof(*d->harq_layout),
697 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
698 if (d->harq_layout == NULL) {
699 rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u",
700 dev->device->driver->name,
702 rte_free(d->sw_rings);
706 /* Mark as configured properly */
707 d->configured = true;
710 "ACC100 (%s) configured sw_rings = %p, sw_rings_iova = %#"
711 PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova);
717 acc100_intr_enable(struct rte_bbdev *dev)
720 struct acc100_device *d = dev->data->dev_private;
722 /* Only MSI are currently supported */
723 if (rte_intr_type_get(dev->intr_handle) == RTE_INTR_HANDLE_VFIO_MSI ||
724 rte_intr_type_get(dev->intr_handle) == RTE_INTR_HANDLE_UIO) {
726 ret = allocate_info_ring(dev);
729 "Couldn't allocate info ring for device: %s",
734 ret = rte_intr_enable(dev->intr_handle);
737 "Couldn't enable interrupts for device: %s",
739 rte_free(d->info_ring);
742 ret = rte_intr_callback_register(dev->intr_handle,
743 acc100_dev_interrupt_handler, dev);
746 "Couldn't register interrupt callback for device: %s",
748 rte_free(d->info_ring);
755 rte_bbdev_log(ERR, "ACC100 (%s) supports only VFIO MSI interrupts",
760 /* Free memory used for software rings */
762 acc100_dev_close(struct rte_bbdev *dev)
764 struct acc100_device *d = dev->data->dev_private;
766 if (d->sw_rings_base != NULL) {
767 rte_free(d->tail_ptrs);
768 rte_free(d->info_ring);
769 rte_free(d->sw_rings_base);
770 d->sw_rings_base = NULL;
772 /* Ensure all in flight HW transactions are completed */
773 usleep(ACC100_LONG_WAIT);
778 * Report a ACC100 queue index which is free
779 * Return 0 to 16k for a valid queue_idx or -1 when no queue is available
780 * Note : Only supporting VF0 Bundle for PF mode
783 acc100_find_free_queue_idx(struct rte_bbdev *dev,
784 const struct rte_bbdev_queue_conf *conf)
786 struct acc100_device *d = dev->data->dev_private;
787 int op_2_acc[5] = {0, UL_4G, DL_4G, UL_5G, DL_5G};
788 int acc = op_2_acc[conf->op_type];
789 struct rte_acc100_queue_topology *qtop = NULL;
791 qtopFromAcc(&qtop, acc, &(d->acc100_conf));
794 /* Identify matching QGroup Index which are sorted in priority order */
795 uint16_t group_idx = qtop->first_qgroup_index;
796 group_idx += conf->priority;
797 if (group_idx >= ACC100_NUM_QGRPS ||
798 conf->priority >= qtop->num_qgroups) {
799 rte_bbdev_log(INFO, "Invalid Priority on %s, priority %u",
800 dev->data->name, conf->priority);
803 /* Find a free AQ_idx */
805 for (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {
806 if (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {
807 /* Mark the Queue as assigned */
808 d->q_assigned_bit_map[group_idx] |= (1 << aq_idx);
809 /* Report the AQ Index */
810 return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx;
813 rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u",
814 dev->data->name, conf->priority);
818 /* Setup ACC100 queue */
820 acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
821 const struct rte_bbdev_queue_conf *conf)
823 struct acc100_device *d = dev->data->dev_private;
824 struct acc100_queue *q;
827 /* Allocate the queue data structure. */
828 q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
829 RTE_CACHE_LINE_SIZE, conf->socket);
831 rte_bbdev_log(ERR, "Failed to allocate queue memory");
835 rte_bbdev_log(ERR, "Undefined device");
840 q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
841 q->ring_addr_iova = d->sw_rings_iova + (d->sw_ring_size * queue_id);
843 /* Prepare the Ring with default descriptor format */
844 union acc100_dma_desc *desc = NULL;
845 unsigned int desc_idx, b_idx;
846 int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
847 ACC100_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?
848 ACC100_FCW_TD_BLEN : ACC100_FCW_LD_BLEN));
850 for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
851 desc = q->ring_addr + desc_idx;
852 desc->req.word0 = ACC100_DMA_DESC_TYPE;
853 desc->req.word1 = 0; /**< Timestamp */
856 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
857 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
858 desc->req.data_ptrs[0].blen = fcw_len;
859 desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
860 desc->req.data_ptrs[0].last = 0;
861 desc->req.data_ptrs[0].dma_ext = 0;
862 for (b_idx = 1; b_idx < ACC100_DMA_MAX_NUM_POINTERS - 1;
864 desc->req.data_ptrs[b_idx].blkid = ACC100_DMA_BLKID_IN;
865 desc->req.data_ptrs[b_idx].last = 1;
866 desc->req.data_ptrs[b_idx].dma_ext = 0;
868 desc->req.data_ptrs[b_idx].blkid =
869 ACC100_DMA_BLKID_OUT_ENC;
870 desc->req.data_ptrs[b_idx].last = 1;
871 desc->req.data_ptrs[b_idx].dma_ext = 0;
873 /* Preset some fields of LDPC FCW */
874 desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
875 desc->req.fcw_ld.gain_i = 1;
876 desc->req.fcw_ld.gain_h = 1;
879 q->lb_in = rte_zmalloc_socket(dev->device->driver->name,
881 RTE_CACHE_LINE_SIZE, conf->socket);
882 if (q->lb_in == NULL) {
883 rte_bbdev_log(ERR, "Failed to allocate lb_in memory");
887 q->lb_in_addr_iova = rte_malloc_virt2iova(q->lb_in);
888 q->lb_out = rte_zmalloc_socket(dev->device->driver->name,
890 RTE_CACHE_LINE_SIZE, conf->socket);
891 if (q->lb_out == NULL) {
892 rte_bbdev_log(ERR, "Failed to allocate lb_out memory");
897 q->lb_out_addr_iova = rte_malloc_virt2iova(q->lb_out);
900 * Software queue ring wraps synchronously with the HW when it reaches
901 * the boundary of the maximum allocated queue size, no matter what the
902 * sw queue size is. This wrapping is guarded by setting the wrap_mask
903 * to represent the maximum queue size as allocated at the time when
904 * the device has been setup (in configure()).
906 * The queue depth is set to the queue size value (conf->queue_size).
907 * This limits the occupancy of the queue at any point of time, so that
908 * the queue does not get swamped with enqueue requests.
910 q->sw_ring_depth = conf->queue_size;
911 q->sw_ring_wrap_mask = d->sw_ring_max_depth - 1;
913 q->op_type = conf->op_type;
915 q_idx = acc100_find_free_queue_idx(dev, conf);
923 q->qgrp_id = (q_idx >> ACC100_GRP_ID_SHIFT) & 0xF;
924 q->vf_id = (q_idx >> ACC100_VF_ID_SHIFT) & 0x3F;
925 q->aq_id = q_idx & 0xF;
926 q->aq_depth = (conf->op_type == RTE_BBDEV_OP_TURBO_DEC) ?
927 (1 << d->acc100_conf.q_ul_4g.aq_depth_log2) :
928 (1 << d->acc100_conf.q_dl_4g.aq_depth_log2);
930 q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,
931 queue_offset(d->pf_device,
932 q->vf_id, q->qgrp_id, q->aq_id));
935 "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p",
936 dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id,
937 q->aq_id, q->aq_depth, q->mmio_reg_enqueue);
939 dev->data->queues[queue_id].queue_private = q;
943 /* Release ACC100 queue */
945 acc100_queue_release(struct rte_bbdev *dev, uint16_t q_id)
947 struct acc100_device *d = dev->data->dev_private;
948 struct acc100_queue *q = dev->data->queues[q_id].queue_private;
951 /* Mark the Queue as un-assigned */
952 d->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -
957 dev->data->queues[q_id].queue_private = NULL;
963 /* Get ACC100 device info */
965 acc100_dev_info_get(struct rte_bbdev *dev,
966 struct rte_bbdev_driver_info *dev_info)
968 struct acc100_device *d = dev->data->dev_private;
970 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
972 .type = RTE_BBDEV_OP_TURBO_DEC,
975 RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
976 RTE_BBDEV_TURBO_CRC_TYPE_24B |
977 RTE_BBDEV_TURBO_HALF_ITERATION_EVEN |
978 RTE_BBDEV_TURBO_EARLY_TERMINATION |
979 RTE_BBDEV_TURBO_DEC_INTERRUPTS |
980 RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
981 RTE_BBDEV_TURBO_MAP_DEC |
982 RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
983 RTE_BBDEV_TURBO_DEC_CRC_24B_DROP |
984 RTE_BBDEV_TURBO_DEC_SCATTER_GATHER,
985 .max_llr_modulus = INT8_MAX,
987 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
988 .num_buffers_hard_out =
989 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
990 .num_buffers_soft_out =
991 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
995 .type = RTE_BBDEV_OP_TURBO_ENC,
998 RTE_BBDEV_TURBO_CRC_24B_ATTACH |
999 RTE_BBDEV_TURBO_RV_INDEX_BYPASS |
1000 RTE_BBDEV_TURBO_RATE_MATCH |
1001 RTE_BBDEV_TURBO_ENC_INTERRUPTS |
1002 RTE_BBDEV_TURBO_ENC_SCATTER_GATHER,
1004 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
1006 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
1010 .type = RTE_BBDEV_OP_LDPC_ENC,
1013 RTE_BBDEV_LDPC_RATE_MATCH |
1014 RTE_BBDEV_LDPC_CRC_24B_ATTACH |
1015 RTE_BBDEV_LDPC_INTERLEAVER_BYPASS |
1016 RTE_BBDEV_LDPC_ENC_INTERRUPTS,
1018 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1020 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1024 .type = RTE_BBDEV_OP_LDPC_DEC,
1027 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
1028 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
1029 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
1030 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
1031 #ifdef ACC100_EXT_MEM
1032 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
1033 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
1034 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
1036 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
1037 RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS |
1038 RTE_BBDEV_LDPC_DECODE_BYPASS |
1039 RTE_BBDEV_LDPC_DEC_SCATTER_GATHER |
1040 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION |
1041 RTE_BBDEV_LDPC_LLR_COMPRESSION |
1042 RTE_BBDEV_LDPC_DEC_INTERRUPTS,
1046 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1047 .num_buffers_hard_out =
1048 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1049 .num_buffers_soft_out = 0,
1052 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
1055 static struct rte_bbdev_queue_conf default_queue_conf;
1056 default_queue_conf.socket = dev->data->socket_id;
1057 default_queue_conf.queue_size = ACC100_MAX_QUEUE_DEPTH;
1059 dev_info->driver_name = dev->device->driver->name;
1061 /* Read and save the populated config from ACC100 registers */
1062 fetch_acc100_config(dev);
1064 /* This isn't ideal because it reports the maximum number of queues but
1065 * does not provide info on how many can be uplink/downlink or different
1068 dev_info->max_num_queues =
1069 d->acc100_conf.q_dl_5g.num_aqs_per_groups *
1070 d->acc100_conf.q_dl_5g.num_qgroups +
1071 d->acc100_conf.q_ul_5g.num_aqs_per_groups *
1072 d->acc100_conf.q_ul_5g.num_qgroups +
1073 d->acc100_conf.q_dl_4g.num_aqs_per_groups *
1074 d->acc100_conf.q_dl_4g.num_qgroups +
1075 d->acc100_conf.q_ul_4g.num_aqs_per_groups *
1076 d->acc100_conf.q_ul_4g.num_qgroups;
1077 dev_info->queue_size_lim = ACC100_MAX_QUEUE_DEPTH;
1078 dev_info->hardware_accelerated = true;
1079 dev_info->max_dl_queue_priority =
1080 d->acc100_conf.q_dl_4g.num_qgroups - 1;
1081 dev_info->max_ul_queue_priority =
1082 d->acc100_conf.q_ul_4g.num_qgroups - 1;
1083 dev_info->default_queue_conf = default_queue_conf;
1084 dev_info->cpu_flag_reqs = NULL;
1085 dev_info->min_alignment = 64;
1086 dev_info->capabilities = bbdev_capabilities;
1087 #ifdef ACC100_EXT_MEM
1088 dev_info->harq_buffer_size = d->ddr_size;
1090 dev_info->harq_buffer_size = 0;
1092 dev_info->data_endianness = RTE_LITTLE_ENDIAN;
1097 acc100_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
1099 struct acc100_queue *q = dev->data->queues[queue_id].queue_private;
1101 if (rte_intr_type_get(dev->intr_handle) != RTE_INTR_HANDLE_VFIO_MSI &&
1102 rte_intr_type_get(dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1110 acc100_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
1112 struct acc100_queue *q = dev->data->queues[queue_id].queue_private;
1114 if (rte_intr_type_get(dev->intr_handle) != RTE_INTR_HANDLE_VFIO_MSI &&
1115 rte_intr_type_get(dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1122 static const struct rte_bbdev_ops acc100_bbdev_ops = {
1123 .setup_queues = acc100_setup_queues,
1124 .intr_enable = acc100_intr_enable,
1125 .close = acc100_dev_close,
1126 .info_get = acc100_dev_info_get,
1127 .queue_setup = acc100_queue_setup,
1128 .queue_release = acc100_queue_release,
1129 .queue_intr_enable = acc100_queue_intr_enable,
1130 .queue_intr_disable = acc100_queue_intr_disable
1133 /* ACC100 PCI PF address map */
1134 static struct rte_pci_id pci_id_acc100_pf_map[] = {
1136 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_PF_DEVICE_ID)
1141 /* ACC100 PCI VF address map */
1142 static struct rte_pci_id pci_id_acc100_vf_map[] = {
1144 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_VF_DEVICE_ID)
1149 /* Read flag value 0/1 from bitmap */
1151 check_bit(uint32_t bitmap, uint32_t bitmask)
1153 return bitmap & bitmask;
1156 static inline char *
1157 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
1159 if (unlikely(len > rte_pktmbuf_tailroom(m)))
1162 char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
1163 m->data_len = (uint16_t)(m->data_len + len);
1164 m_head->pkt_len = (m_head->pkt_len + len);
1168 /* Fill in a frame control word for turbo encoding. */
1170 acc100_fcw_te_fill(const struct rte_bbdev_enc_op *op, struct acc100_fcw_te *fcw)
1172 fcw->code_block_mode = op->turbo_enc.code_block_mode;
1173 if (fcw->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1174 fcw->k_neg = op->turbo_enc.tb_params.k_neg;
1175 fcw->k_pos = op->turbo_enc.tb_params.k_pos;
1176 fcw->c_neg = op->turbo_enc.tb_params.c_neg;
1177 fcw->c = op->turbo_enc.tb_params.c;
1178 fcw->ncb_neg = op->turbo_enc.tb_params.ncb_neg;
1179 fcw->ncb_pos = op->turbo_enc.tb_params.ncb_pos;
1181 if (check_bit(op->turbo_enc.op_flags,
1182 RTE_BBDEV_TURBO_RATE_MATCH)) {
1184 fcw->cab = op->turbo_enc.tb_params.cab;
1185 fcw->ea = op->turbo_enc.tb_params.ea;
1186 fcw->eb = op->turbo_enc.tb_params.eb;
1188 /* E is set to the encoding output size when RM is
1192 fcw->cab = fcw->c_neg;
1193 fcw->ea = 3 * fcw->k_neg + 12;
1194 fcw->eb = 3 * fcw->k_pos + 12;
1196 } else { /* For CB mode */
1197 fcw->k_pos = op->turbo_enc.cb_params.k;
1198 fcw->ncb_pos = op->turbo_enc.cb_params.ncb;
1200 if (check_bit(op->turbo_enc.op_flags,
1201 RTE_BBDEV_TURBO_RATE_MATCH)) {
1203 fcw->eb = op->turbo_enc.cb_params.e;
1205 /* E is set to the encoding output size when RM is
1209 fcw->eb = 3 * fcw->k_pos + 12;
1213 fcw->bypass_rv_idx1 = check_bit(op->turbo_enc.op_flags,
1214 RTE_BBDEV_TURBO_RV_INDEX_BYPASS);
1215 fcw->code_block_crc = check_bit(op->turbo_enc.op_flags,
1216 RTE_BBDEV_TURBO_CRC_24B_ATTACH);
1217 fcw->rv_idx1 = op->turbo_enc.rv_index;
1220 /* Compute value of k0.
1221 * Based on 3GPP 38.212 Table 5.4.2.1-2
1222 * Starting position of different redundancy versions, k0
1224 static inline uint16_t
1225 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
1229 uint16_t n = (bg == 1 ? ACC100_N_ZC_1 : ACC100_N_ZC_2) * z_c;
1232 return (bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * z_c;
1233 else if (rv_index == 2)
1234 return (bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * z_c;
1236 return (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c;
1238 /* LBRM case - includes a division by N */
1240 return (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb)
1242 else if (rv_index == 2)
1243 return (((bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * n_cb)
1246 return (((bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * n_cb)
1250 /* Fill in a frame control word for LDPC encoding. */
1252 acc100_fcw_le_fill(const struct rte_bbdev_enc_op *op,
1253 struct acc100_fcw_le *fcw, int num_cb)
1255 fcw->qm = op->ldpc_enc.q_m;
1256 fcw->nfiller = op->ldpc_enc.n_filler;
1257 fcw->BG = (op->ldpc_enc.basegraph - 1);
1258 fcw->Zc = op->ldpc_enc.z_c;
1259 fcw->ncb = op->ldpc_enc.n_cb;
1260 fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,
1261 op->ldpc_enc.rv_index);
1262 fcw->rm_e = op->ldpc_enc.cb_params.e;
1263 fcw->crc_select = check_bit(op->ldpc_enc.op_flags,
1264 RTE_BBDEV_LDPC_CRC_24B_ATTACH);
1265 fcw->bypass_intlv = check_bit(op->ldpc_enc.op_flags,
1266 RTE_BBDEV_LDPC_INTERLEAVER_BYPASS);
1267 fcw->mcb_count = num_cb;
1270 /* Fill in a frame control word for turbo decoding. */
1272 acc100_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_td *fcw)
1274 /* Note : Early termination is always enabled for 4GUL */
1276 if (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
1277 fcw->k_pos = op->turbo_dec.tb_params.k_pos;
1279 fcw->k_pos = op->turbo_dec.cb_params.k;
1280 fcw->turbo_crc_type = check_bit(op->turbo_dec.op_flags,
1281 RTE_BBDEV_TURBO_CRC_TYPE_24B);
1282 fcw->bypass_sb_deint = 0;
1283 fcw->raw_decoder_input_on = 0;
1284 fcw->max_iter = op->turbo_dec.iter_max;
1285 fcw->half_iter_on = !check_bit(op->turbo_dec.op_flags,
1286 RTE_BBDEV_TURBO_HALF_ITERATION_EVEN);
1289 /* Fill in a frame control word for LDPC decoding. */
1291 acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,
1292 union acc100_harq_layout_data *harq_layout)
1294 uint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;
1295 uint16_t harq_index;
1297 bool harq_prun = false;
1299 fcw->qm = op->ldpc_dec.q_m;
1300 fcw->nfiller = op->ldpc_dec.n_filler;
1301 fcw->BG = (op->ldpc_dec.basegraph - 1);
1302 fcw->Zc = op->ldpc_dec.z_c;
1303 fcw->ncb = op->ldpc_dec.n_cb;
1304 fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_dec.basegraph,
1305 op->ldpc_dec.rv_index);
1306 if (op->ldpc_dec.code_block_mode == RTE_BBDEV_CODE_BLOCK)
1307 fcw->rm_e = op->ldpc_dec.cb_params.e;
1309 fcw->rm_e = (op->ldpc_dec.tb_params.r <
1310 op->ldpc_dec.tb_params.cab) ?
1311 op->ldpc_dec.tb_params.ea :
1312 op->ldpc_dec.tb_params.eb;
1314 fcw->hcin_en = check_bit(op->ldpc_dec.op_flags,
1315 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
1316 fcw->hcout_en = check_bit(op->ldpc_dec.op_flags,
1317 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
1318 fcw->crc_select = check_bit(op->ldpc_dec.op_flags,
1319 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
1320 fcw->bypass_dec = check_bit(op->ldpc_dec.op_flags,
1321 RTE_BBDEV_LDPC_DECODE_BYPASS);
1322 fcw->bypass_intlv = check_bit(op->ldpc_dec.op_flags,
1323 RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS);
1324 if (op->ldpc_dec.q_m == 1) {
1325 fcw->bypass_intlv = 1;
1328 fcw->hcin_decomp_mode = check_bit(op->ldpc_dec.op_flags,
1329 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1330 fcw->hcout_comp_mode = check_bit(op->ldpc_dec.op_flags,
1331 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1332 fcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,
1333 RTE_BBDEV_LDPC_LLR_COMPRESSION);
1334 harq_index = op->ldpc_dec.harq_combined_output.offset /
1336 #ifdef ACC100_EXT_MEM
1337 /* Limit cases when HARQ pruning is valid */
1338 harq_prun = ((op->ldpc_dec.harq_combined_output.offset %
1339 ACC100_HARQ_OFFSET) == 0) &&
1340 (op->ldpc_dec.harq_combined_output.offset <= UINT16_MAX
1341 * ACC100_HARQ_OFFSET);
1343 if (fcw->hcin_en > 0) {
1344 harq_in_length = op->ldpc_dec.harq_combined_input.length;
1345 if (fcw->hcin_decomp_mode > 0)
1346 harq_in_length = harq_in_length * 8 / 6;
1347 harq_in_length = RTE_ALIGN(harq_in_length, 64);
1348 if ((harq_layout[harq_index].offset > 0) & harq_prun) {
1349 rte_bbdev_log_debug("HARQ IN offset unexpected for now\n");
1350 fcw->hcin_size0 = harq_layout[harq_index].size0;
1351 fcw->hcin_offset = harq_layout[harq_index].offset;
1352 fcw->hcin_size1 = harq_in_length -
1353 harq_layout[harq_index].offset;
1355 fcw->hcin_size0 = harq_in_length;
1356 fcw->hcin_offset = 0;
1357 fcw->hcin_size1 = 0;
1360 fcw->hcin_size0 = 0;
1361 fcw->hcin_offset = 0;
1362 fcw->hcin_size1 = 0;
1365 fcw->itmax = op->ldpc_dec.iter_max;
1366 fcw->itstop = check_bit(op->ldpc_dec.op_flags,
1367 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
1368 fcw->synd_precoder = fcw->itstop;
1370 * These are all implicitly set
1371 * fcw->synd_post = 0;
1373 * fcw->so_bypass_rm = 0;
1374 * fcw->so_bypass_intlv = 0;
1375 * fcw->dec_convllr = 0;
1376 * fcw->hcout_convllr = 0;
1377 * fcw->hcout_size1 = 0;
1379 * fcw->hcout_offset = 0;
1380 * fcw->negstop_th = 0;
1381 * fcw->negstop_it = 0;
1382 * fcw->negstop_en = 0;
1386 if (fcw->hcout_en > 0) {
1387 parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)
1388 * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
1389 k0_p = (fcw->k0 > parity_offset) ?
1390 fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;
1391 ncb_p = fcw->ncb - op->ldpc_dec.n_filler;
1392 l = k0_p + fcw->rm_e;
1393 harq_out_length = (uint16_t) fcw->hcin_size0;
1394 harq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);
1395 harq_out_length = (harq_out_length + 0x3F) & 0xFFC0;
1396 if ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) &&
1398 fcw->hcout_size0 = (uint16_t) fcw->hcin_size0;
1399 fcw->hcout_offset = k0_p & 0xFFC0;
1400 fcw->hcout_size1 = harq_out_length - fcw->hcout_offset;
1402 fcw->hcout_size0 = harq_out_length;
1403 fcw->hcout_size1 = 0;
1404 fcw->hcout_offset = 0;
1406 harq_layout[harq_index].offset = fcw->hcout_offset;
1407 harq_layout[harq_index].size0 = fcw->hcout_size0;
1409 fcw->hcout_size0 = 0;
1410 fcw->hcout_size1 = 0;
1411 fcw->hcout_offset = 0;
1416 * Fills descriptor with data pointers of one block type.
1419 * Pointer to DMA descriptor.
1421 * Pointer to pointer to input data which will be encoded. It can be changed
1422 * and points to next segment in scatter-gather case.
1424 * Input offset in rte_mbuf structure. It is used for calculating the point
1425 * where data is starting.
1427 * Length of currently processed Code Block
1428 * @param seg_total_left
1429 * It indicates how many bytes still left in segment (mbuf) for further
1432 * Store information about device capabilities
1433 * @param next_triplet
1434 * Index for ACC100 DMA Descriptor triplet
1437 * Returns index of next triplet on success, other value if lengths of
1438 * pkt and processed cb do not match.
1442 acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,
1443 struct rte_mbuf **input, uint32_t *offset, uint32_t cb_len,
1444 uint32_t *seg_total_left, int next_triplet)
1447 struct rte_mbuf *m = *input;
1449 part_len = (*seg_total_left < cb_len) ? *seg_total_left : cb_len;
1451 *seg_total_left -= part_len;
1453 desc->data_ptrs[next_triplet].address =
1454 rte_pktmbuf_iova_offset(m, *offset);
1455 desc->data_ptrs[next_triplet].blen = part_len;
1456 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
1457 desc->data_ptrs[next_triplet].last = 0;
1458 desc->data_ptrs[next_triplet].dma_ext = 0;
1459 *offset += part_len;
1462 while (cb_len > 0) {
1463 if (next_triplet < ACC100_DMA_MAX_NUM_POINTERS &&
1467 *seg_total_left = rte_pktmbuf_data_len(m);
1468 part_len = (*seg_total_left < cb_len) ?
1471 desc->data_ptrs[next_triplet].address =
1472 rte_pktmbuf_iova_offset(m, 0);
1473 desc->data_ptrs[next_triplet].blen = part_len;
1474 desc->data_ptrs[next_triplet].blkid =
1475 ACC100_DMA_BLKID_IN;
1476 desc->data_ptrs[next_triplet].last = 0;
1477 desc->data_ptrs[next_triplet].dma_ext = 0;
1479 *seg_total_left -= part_len;
1480 /* Initializing offset for next segment (mbuf) */
1485 "Some data still left for processing: "
1486 "data_left: %u, next_triplet: %u, next_mbuf: %p",
1487 cb_len, next_triplet, m->next);
1491 /* Storing new mbuf as it could be changed in scatter-gather case*/
1494 return next_triplet;
1497 /* Fills descriptor with data pointers of one block type.
1498 * Returns index of next triplet on success, other value if lengths of
1499 * output data and processed mbuf do not match.
1502 acc100_dma_fill_blk_type_out(struct acc100_dma_req_desc *desc,
1503 struct rte_mbuf *output, uint32_t out_offset,
1504 uint32_t output_len, int next_triplet, int blk_id)
1506 desc->data_ptrs[next_triplet].address =
1507 rte_pktmbuf_iova_offset(output, out_offset);
1508 desc->data_ptrs[next_triplet].blen = output_len;
1509 desc->data_ptrs[next_triplet].blkid = blk_id;
1510 desc->data_ptrs[next_triplet].last = 0;
1511 desc->data_ptrs[next_triplet].dma_ext = 0;
1514 return next_triplet;
1518 acc100_header_init(struct acc100_dma_req_desc *desc)
1520 desc->word0 = ACC100_DMA_DESC_TYPE;
1521 desc->word1 = 0; /**< Timestamp could be disabled */
1527 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1528 /* Check if any input data is unexpectedly left for processing */
1530 check_mbuf_total_left(uint32_t mbuf_total_left)
1532 if (mbuf_total_left == 0)
1535 "Some date still left for processing: mbuf_total_left = %u",
1542 acc100_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
1543 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1544 struct rte_mbuf *output, uint32_t *in_offset,
1545 uint32_t *out_offset, uint32_t *out_length,
1546 uint32_t *mbuf_total_left, uint32_t *seg_total_left, uint8_t r)
1548 int next_triplet = 1; /* FCW already done */
1549 uint32_t e, ea, eb, length;
1550 uint16_t k, k_neg, k_pos;
1553 desc->word0 = ACC100_DMA_DESC_TYPE;
1554 desc->word1 = 0; /**< Timestamp could be disabled */
1559 if (op->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1560 ea = op->turbo_enc.tb_params.ea;
1561 eb = op->turbo_enc.tb_params.eb;
1562 cab = op->turbo_enc.tb_params.cab;
1563 k_neg = op->turbo_enc.tb_params.k_neg;
1564 k_pos = op->turbo_enc.tb_params.k_pos;
1565 c_neg = op->turbo_enc.tb_params.c_neg;
1566 e = (r < cab) ? ea : eb;
1567 k = (r < c_neg) ? k_neg : k_pos;
1569 e = op->turbo_enc.cb_params.e;
1570 k = op->turbo_enc.cb_params.k;
1573 if (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
1574 length = (k - 24) >> 3;
1578 if (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < length))) {
1580 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1581 *mbuf_total_left, length);
1585 next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1586 length, seg_total_left, next_triplet);
1587 if (unlikely(next_triplet < 0)) {
1589 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1593 desc->data_ptrs[next_triplet - 1].last = 1;
1594 desc->m2dlen = next_triplet;
1595 *mbuf_total_left -= length;
1597 /* Set output length */
1598 if (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_RATE_MATCH))
1599 /* Integer round up division by 8 */
1600 *out_length = (e + 7) >> 3;
1602 *out_length = (k >> 3) * 3 + 2;
1604 next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1605 *out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1606 if (unlikely(next_triplet < 0)) {
1608 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1612 op->turbo_enc.output.length += *out_length;
1613 *out_offset += *out_length;
1614 desc->data_ptrs[next_triplet - 1].last = 1;
1615 desc->d2mlen = next_triplet - desc->m2dlen;
1623 acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,
1624 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1625 struct rte_mbuf *output, uint32_t *in_offset,
1626 uint32_t *out_offset, uint32_t *out_length,
1627 uint32_t *mbuf_total_left, uint32_t *seg_total_left)
1629 int next_triplet = 1; /* FCW already done */
1630 uint16_t K, in_length_in_bits, in_length_in_bytes;
1631 struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1633 acc100_header_init(desc);
1635 K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1636 in_length_in_bits = K - enc->n_filler;
1637 if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) ||
1638 (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH))
1639 in_length_in_bits -= 24;
1640 in_length_in_bytes = in_length_in_bits >> 3;
1642 if (unlikely((*mbuf_total_left == 0) ||
1643 (*mbuf_total_left < in_length_in_bytes))) {
1645 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1646 *mbuf_total_left, in_length_in_bytes);
1650 next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1652 seg_total_left, next_triplet);
1653 if (unlikely(next_triplet < 0)) {
1655 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1659 desc->data_ptrs[next_triplet - 1].last = 1;
1660 desc->m2dlen = next_triplet;
1661 *mbuf_total_left -= in_length_in_bytes;
1663 /* Set output length */
1664 /* Integer round up division by 8 */
1665 *out_length = (enc->cb_params.e + 7) >> 3;
1667 next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1668 *out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1669 op->ldpc_enc.output.length += *out_length;
1670 *out_offset += *out_length;
1671 desc->data_ptrs[next_triplet - 1].last = 1;
1672 desc->data_ptrs[next_triplet - 1].dma_ext = 0;
1673 desc->d2mlen = next_triplet - desc->m2dlen;
1681 acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,
1682 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1683 struct rte_mbuf *h_output, struct rte_mbuf *s_output,
1684 uint32_t *in_offset, uint32_t *h_out_offset,
1685 uint32_t *s_out_offset, uint32_t *h_out_length,
1686 uint32_t *s_out_length, uint32_t *mbuf_total_left,
1687 uint32_t *seg_total_left, uint8_t r)
1689 int next_triplet = 1; /* FCW already done */
1691 uint16_t crc24_overlap = 0;
1694 desc->word0 = ACC100_DMA_DESC_TYPE;
1695 desc->word1 = 0; /**< Timestamp could be disabled */
1700 if (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1701 k = (r < op->turbo_dec.tb_params.c_neg)
1702 ? op->turbo_dec.tb_params.k_neg
1703 : op->turbo_dec.tb_params.k_pos;
1704 e = (r < op->turbo_dec.tb_params.cab)
1705 ? op->turbo_dec.tb_params.ea
1706 : op->turbo_dec.tb_params.eb;
1708 k = op->turbo_dec.cb_params.k;
1709 e = op->turbo_dec.cb_params.e;
1712 if ((op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
1713 && !check_bit(op->turbo_dec.op_flags,
1714 RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))
1716 if ((op->turbo_dec.code_block_mode == RTE_BBDEV_CODE_BLOCK)
1717 && check_bit(op->turbo_dec.op_flags,
1718 RTE_BBDEV_TURBO_DEC_CRC_24B_DROP))
1721 /* Calculates circular buffer size.
1722 * According to 3gpp 36.212 section 5.1.4.2
1726 * where nCol is 32 and nRow can be calculated from:
1728 * where D is the size of each output from turbo encoder block (k + 4).
1730 kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
1732 if (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < kw))) {
1734 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1735 *mbuf_total_left, kw);
1739 next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset, kw,
1740 seg_total_left, next_triplet);
1741 if (unlikely(next_triplet < 0)) {
1743 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1747 desc->data_ptrs[next_triplet - 1].last = 1;
1748 desc->m2dlen = next_triplet;
1749 *mbuf_total_left -= kw;
1751 next_triplet = acc100_dma_fill_blk_type_out(
1752 desc, h_output, *h_out_offset,
1753 (k - crc24_overlap) >> 3, next_triplet,
1754 ACC100_DMA_BLKID_OUT_HARD);
1755 if (unlikely(next_triplet < 0)) {
1757 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1762 *h_out_length = ((k - crc24_overlap) >> 3);
1763 op->turbo_dec.hard_output.length += *h_out_length;
1764 *h_out_offset += *h_out_length;
1767 if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
1768 if (check_bit(op->turbo_dec.op_flags,
1769 RTE_BBDEV_TURBO_EQUALIZER))
1772 *s_out_length = (k * 3) + 12;
1774 next_triplet = acc100_dma_fill_blk_type_out(desc, s_output,
1775 *s_out_offset, *s_out_length, next_triplet,
1776 ACC100_DMA_BLKID_OUT_SOFT);
1777 if (unlikely(next_triplet < 0)) {
1779 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1784 op->turbo_dec.soft_output.length += *s_out_length;
1785 *s_out_offset += *s_out_length;
1788 desc->data_ptrs[next_triplet - 1].last = 1;
1789 desc->d2mlen = next_triplet - desc->m2dlen;
1797 acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
1798 struct acc100_dma_req_desc *desc,
1799 struct rte_mbuf **input, struct rte_mbuf *h_output,
1800 uint32_t *in_offset, uint32_t *h_out_offset,
1801 uint32_t *h_out_length, uint32_t *mbuf_total_left,
1802 uint32_t *seg_total_left,
1803 struct acc100_fcw_ld *fcw)
1805 struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1806 int next_triplet = 1; /* FCW already done */
1807 uint32_t input_length;
1808 uint16_t output_length, crc24_overlap = 0;
1809 uint16_t sys_cols, K, h_p_size, h_np_size;
1810 bool h_comp = check_bit(dec->op_flags,
1811 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1813 acc100_header_init(desc);
1815 if (check_bit(op->ldpc_dec.op_flags,
1816 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1819 /* Compute some LDPC BG lengths */
1820 input_length = dec->cb_params.e;
1821 if (check_bit(op->ldpc_dec.op_flags,
1822 RTE_BBDEV_LDPC_LLR_COMPRESSION))
1823 input_length = (input_length * 3 + 3) / 4;
1824 sys_cols = (dec->basegraph == 1) ? 22 : 10;
1825 K = sys_cols * dec->z_c;
1826 output_length = K - dec->n_filler - crc24_overlap;
1828 if (unlikely((*mbuf_total_left == 0) ||
1829 (*mbuf_total_left < input_length))) {
1831 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1832 *mbuf_total_left, input_length);
1836 next_triplet = acc100_dma_fill_blk_type_in(desc, input,
1837 in_offset, input_length,
1838 seg_total_left, next_triplet);
1840 if (unlikely(next_triplet < 0)) {
1842 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1847 if (check_bit(op->ldpc_dec.op_flags,
1848 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1849 h_p_size = fcw->hcin_size0 + fcw->hcin_size1;
1851 h_p_size = (h_p_size * 3 + 3) / 4;
1852 desc->data_ptrs[next_triplet].address =
1853 dec->harq_combined_input.offset;
1854 desc->data_ptrs[next_triplet].blen = h_p_size;
1855 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN_HARQ;
1856 desc->data_ptrs[next_triplet].dma_ext = 1;
1857 #ifndef ACC100_EXT_MEM
1858 acc100_dma_fill_blk_type_out(
1860 op->ldpc_dec.harq_combined_input.data,
1861 op->ldpc_dec.harq_combined_input.offset,
1864 ACC100_DMA_BLKID_IN_HARQ);
1869 desc->data_ptrs[next_triplet - 1].last = 1;
1870 desc->m2dlen = next_triplet;
1871 *mbuf_total_left -= input_length;
1873 next_triplet = acc100_dma_fill_blk_type_out(desc, h_output,
1874 *h_out_offset, output_length >> 3, next_triplet,
1875 ACC100_DMA_BLKID_OUT_HARD);
1877 if (check_bit(op->ldpc_dec.op_flags,
1878 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1879 /* Pruned size of the HARQ */
1880 h_p_size = fcw->hcout_size0 + fcw->hcout_size1;
1881 /* Non-Pruned size of the HARQ */
1882 h_np_size = fcw->hcout_offset > 0 ?
1883 fcw->hcout_offset + fcw->hcout_size1 :
1886 h_np_size = (h_np_size * 3 + 3) / 4;
1887 h_p_size = (h_p_size * 3 + 3) / 4;
1889 dec->harq_combined_output.length = h_np_size;
1890 desc->data_ptrs[next_triplet].address =
1891 dec->harq_combined_output.offset;
1892 desc->data_ptrs[next_triplet].blen = h_p_size;
1893 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARQ;
1894 desc->data_ptrs[next_triplet].dma_ext = 1;
1895 #ifndef ACC100_EXT_MEM
1896 acc100_dma_fill_blk_type_out(
1898 dec->harq_combined_output.data,
1899 dec->harq_combined_output.offset,
1902 ACC100_DMA_BLKID_OUT_HARQ);
1907 *h_out_length = output_length >> 3;
1908 dec->hard_output.length += *h_out_length;
1909 *h_out_offset += *h_out_length;
1910 desc->data_ptrs[next_triplet - 1].last = 1;
1911 desc->d2mlen = next_triplet - desc->m2dlen;
1919 acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,
1920 struct acc100_dma_req_desc *desc,
1921 struct rte_mbuf *input, struct rte_mbuf *h_output,
1922 uint32_t *in_offset, uint32_t *h_out_offset,
1923 uint32_t *h_out_length,
1924 union acc100_harq_layout_data *harq_layout)
1926 int next_triplet = 1; /* FCW already done */
1927 desc->data_ptrs[next_triplet].address =
1928 rte_pktmbuf_iova_offset(input, *in_offset);
1931 if (check_bit(op->ldpc_dec.op_flags,
1932 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1933 struct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;
1934 desc->data_ptrs[next_triplet].address = hi.offset;
1935 #ifndef ACC100_EXT_MEM
1936 desc->data_ptrs[next_triplet].address =
1937 rte_pktmbuf_iova_offset(hi.data, hi.offset);
1942 desc->data_ptrs[next_triplet].address =
1943 rte_pktmbuf_iova_offset(h_output, *h_out_offset);
1944 *h_out_length = desc->data_ptrs[next_triplet].blen;
1947 if (check_bit(op->ldpc_dec.op_flags,
1948 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1949 desc->data_ptrs[next_triplet].address =
1950 op->ldpc_dec.harq_combined_output.offset;
1951 /* Adjust based on previous operation */
1952 struct rte_bbdev_dec_op *prev_op = desc->op_addr;
1953 op->ldpc_dec.harq_combined_output.length =
1954 prev_op->ldpc_dec.harq_combined_output.length;
1955 int16_t hq_idx = op->ldpc_dec.harq_combined_output.offset /
1957 int16_t prev_hq_idx =
1958 prev_op->ldpc_dec.harq_combined_output.offset
1959 / ACC100_HARQ_OFFSET;
1960 harq_layout[hq_idx].val = harq_layout[prev_hq_idx].val;
1961 #ifndef ACC100_EXT_MEM
1962 struct rte_bbdev_op_data ho =
1963 op->ldpc_dec.harq_combined_output;
1964 desc->data_ptrs[next_triplet].address =
1965 rte_pktmbuf_iova_offset(ho.data, ho.offset);
1970 op->ldpc_dec.hard_output.length += *h_out_length;
1975 /* Enqueue a number of operations to HW and update software rings */
1977 acc100_dma_enqueue(struct acc100_queue *q, uint16_t n,
1978 struct rte_bbdev_stats *queue_stats)
1980 union acc100_enqueue_reg_fmt enq_req;
1981 #ifdef RTE_BBDEV_OFFLOAD_COST
1982 uint64_t start_time = 0;
1983 queue_stats->acc_offload_cycles = 0;
1985 RTE_SET_USED(queue_stats);
1989 /* Setting offset, 100b for 256 DMA Desc */
1990 enq_req.addr_offset = ACC100_DESC_OFFSET;
1992 /* Split ops into batches */
1994 union acc100_dma_desc *desc;
1995 uint16_t enq_batch_size;
1997 rte_iova_t req_elem_addr;
1999 enq_batch_size = RTE_MIN(n, MAX_ENQ_BATCH_SIZE);
2001 /* Set flag on last descriptor in a batch */
2002 desc = q->ring_addr + ((q->sw_ring_head + enq_batch_size - 1) &
2003 q->sw_ring_wrap_mask);
2004 desc->req.last_desc_in_batch = 1;
2006 /* Calculate the 1st descriptor's address */
2007 offset = ((q->sw_ring_head & q->sw_ring_wrap_mask) *
2008 sizeof(union acc100_dma_desc));
2009 req_elem_addr = q->ring_addr_iova + offset;
2011 /* Fill enqueue struct */
2012 enq_req.num_elem = enq_batch_size;
2013 /* low 6 bits are not needed */
2014 enq_req.req_elem_addr = (uint32_t)(req_elem_addr >> 6);
2016 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2017 rte_memdump(stderr, "Req sdone", desc, sizeof(*desc));
2019 rte_bbdev_log_debug(
2020 "Enqueue %u reqs (phys %#"PRIx64") to reg %p",
2023 (void *)q->mmio_reg_enqueue);
2027 #ifdef RTE_BBDEV_OFFLOAD_COST
2028 /* Start time measurement for enqueue function offload. */
2029 start_time = rte_rdtsc_precise();
2031 rte_bbdev_log(DEBUG, "Debug : MMIO Enqueue");
2032 mmio_write(q->mmio_reg_enqueue, enq_req.val);
2034 #ifdef RTE_BBDEV_OFFLOAD_COST
2035 queue_stats->acc_offload_cycles +=
2036 rte_rdtsc_precise() - start_time;
2040 q->sw_ring_head += enq_batch_size;
2041 n -= enq_batch_size;
2048 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2049 /* Validates turbo encoder parameters */
2051 validate_enc_op(struct rte_bbdev_enc_op *op)
2053 struct rte_bbdev_op_turbo_enc *turbo_enc = &op->turbo_enc;
2054 struct rte_bbdev_op_enc_turbo_cb_params *cb = NULL;
2055 struct rte_bbdev_op_enc_turbo_tb_params *tb = NULL;
2056 uint16_t kw, kw_neg, kw_pos;
2058 if (op->mempool == NULL) {
2059 rte_bbdev_log(ERR, "Invalid mempool pointer");
2062 if (turbo_enc->input.data == NULL) {
2063 rte_bbdev_log(ERR, "Invalid input pointer");
2066 if (turbo_enc->output.data == NULL) {
2067 rte_bbdev_log(ERR, "Invalid output pointer");
2070 if (turbo_enc->rv_index > 3) {
2072 "rv_index (%u) is out of range 0 <= value <= 3",
2073 turbo_enc->rv_index);
2076 if (turbo_enc->code_block_mode != RTE_BBDEV_TRANSPORT_BLOCK &&
2077 turbo_enc->code_block_mode != RTE_BBDEV_CODE_BLOCK) {
2079 "code_block_mode (%u) is out of range 0 <= value <= 1",
2080 turbo_enc->code_block_mode);
2084 if (turbo_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
2085 tb = &turbo_enc->tb_params;
2086 if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE
2087 || tb->k_neg > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2090 "k_neg (%u) is out of range %u <= value <= %u",
2091 tb->k_neg, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2092 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2095 if (tb->k_pos < RTE_BBDEV_TURBO_MIN_CB_SIZE
2096 || tb->k_pos > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2098 "k_pos (%u) is out of range %u <= value <= %u",
2099 tb->k_pos, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2100 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2103 if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))
2105 "c_neg (%u) is out of range 0 <= value <= %u",
2107 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1);
2108 if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
2110 "c (%u) is out of range 1 <= value <= %u",
2111 tb->c, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
2114 if (tb->cab > tb->c) {
2116 "cab (%u) is greater than c (%u)",
2120 if ((tb->ea < RTE_BBDEV_TURBO_MIN_CB_SIZE || (tb->ea % 2))
2121 && tb->r < tb->cab) {
2123 "ea (%u) is less than %u or it is not even",
2124 tb->ea, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2127 if ((tb->eb < RTE_BBDEV_TURBO_MIN_CB_SIZE || (tb->eb % 2))
2128 && tb->c > tb->cab) {
2130 "eb (%u) is less than %u or it is not even",
2131 tb->eb, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2135 kw_neg = 3 * RTE_ALIGN_CEIL(tb->k_neg + 4,
2136 RTE_BBDEV_TURBO_C_SUBBLOCK);
2137 if (tb->ncb_neg < tb->k_neg || tb->ncb_neg > kw_neg) {
2139 "ncb_neg (%u) is out of range (%u) k_neg <= value <= (%u) kw_neg",
2140 tb->ncb_neg, tb->k_neg, kw_neg);
2144 kw_pos = 3 * RTE_ALIGN_CEIL(tb->k_pos + 4,
2145 RTE_BBDEV_TURBO_C_SUBBLOCK);
2146 if (tb->ncb_pos < tb->k_pos || tb->ncb_pos > kw_pos) {
2148 "ncb_pos (%u) is out of range (%u) k_pos <= value <= (%u) kw_pos",
2149 tb->ncb_pos, tb->k_pos, kw_pos);
2152 if (tb->r > (tb->c - 1)) {
2154 "r (%u) is greater than c - 1 (%u)",
2159 cb = &turbo_enc->cb_params;
2160 if (cb->k < RTE_BBDEV_TURBO_MIN_CB_SIZE
2161 || cb->k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2163 "k (%u) is out of range %u <= value <= %u",
2164 cb->k, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2165 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2169 if (cb->e < RTE_BBDEV_TURBO_MIN_CB_SIZE || (cb->e % 2)) {
2171 "e (%u) is less than %u or it is not even",
2172 cb->e, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2176 kw = RTE_ALIGN_CEIL(cb->k + 4, RTE_BBDEV_TURBO_C_SUBBLOCK) * 3;
2177 if (cb->ncb < cb->k || cb->ncb > kw) {
2179 "ncb (%u) is out of range (%u) k <= value <= (%u) kw",
2180 cb->ncb, cb->k, kw);
2187 /* Validates LDPC encoder parameters */
2189 validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
2191 struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
2193 if (op->mempool == NULL) {
2194 rte_bbdev_log(ERR, "Invalid mempool pointer");
2197 if (ldpc_enc->input.data == NULL) {
2198 rte_bbdev_log(ERR, "Invalid input pointer");
2201 if (ldpc_enc->output.data == NULL) {
2202 rte_bbdev_log(ERR, "Invalid output pointer");
2205 if (ldpc_enc->input.length >
2206 RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {
2207 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
2208 ldpc_enc->input.length,
2209 RTE_BBDEV_LDPC_MAX_CB_SIZE);
2212 if ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {
2214 "BG (%u) is out of range 1 <= value <= 2",
2215 ldpc_enc->basegraph);
2218 if (ldpc_enc->rv_index > 3) {
2220 "rv_index (%u) is out of range 0 <= value <= 3",
2221 ldpc_enc->rv_index);
2224 if (ldpc_enc->code_block_mode > RTE_BBDEV_CODE_BLOCK) {
2226 "code_block_mode (%u) is out of range 0 <= value <= 1",
2227 ldpc_enc->code_block_mode);
2230 int K = (ldpc_enc->basegraph == 1 ? 22 : 10) * ldpc_enc->z_c;
2231 if (ldpc_enc->n_filler >= K) {
2233 "K and F are not compatible %u %u",
2234 K, ldpc_enc->n_filler);
2240 /* Validates LDPC decoder parameters */
2242 validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
2244 struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
2246 if (op->mempool == NULL) {
2247 rte_bbdev_log(ERR, "Invalid mempool pointer");
2250 if ((ldpc_dec->basegraph > 2) || (ldpc_dec->basegraph == 0)) {
2252 "BG (%u) is out of range 1 <= value <= 2",
2253 ldpc_dec->basegraph);
2256 if (ldpc_dec->iter_max == 0) {
2258 "iter_max (%u) is equal to 0",
2259 ldpc_dec->iter_max);
2262 if (ldpc_dec->rv_index > 3) {
2264 "rv_index (%u) is out of range 0 <= value <= 3",
2265 ldpc_dec->rv_index);
2268 if (ldpc_dec->code_block_mode > RTE_BBDEV_CODE_BLOCK) {
2270 "code_block_mode (%u) is out of range 0 <= value <= 1",
2271 ldpc_dec->code_block_mode);
2274 int K = (ldpc_dec->basegraph == 1 ? 22 : 10) * ldpc_dec->z_c;
2275 if (ldpc_dec->n_filler >= K) {
2277 "K and F are not compatible %u %u",
2278 K, ldpc_dec->n_filler);
2285 /* Enqueue one encode operations for ACC100 device in CB mode */
2287 enqueue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2288 uint16_t total_enqueued_cbs)
2290 union acc100_dma_desc *desc = NULL;
2292 uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2294 struct rte_mbuf *input, *output_head, *output;
2296 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2297 /* Validate op structure */
2298 if (validate_enc_op(op) == -1) {
2299 rte_bbdev_log(ERR, "Turbo encoder validation failed");
2304 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2305 & q->sw_ring_wrap_mask);
2306 desc = q->ring_addr + desc_idx;
2307 acc100_fcw_te_fill(op, &desc->req.fcw_te);
2309 input = op->turbo_enc.input.data;
2310 output_head = output = op->turbo_enc.output.data;
2311 in_offset = op->turbo_enc.input.offset;
2312 out_offset = op->turbo_enc.output.offset;
2314 mbuf_total_left = op->turbo_enc.input.length;
2315 seg_total_left = rte_pktmbuf_data_len(op->turbo_enc.input.data)
2318 ret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,
2319 &in_offset, &out_offset, &out_length, &mbuf_total_left,
2320 &seg_total_left, 0);
2322 if (unlikely(ret < 0))
2325 mbuf_append(output_head, output, out_length);
2327 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2328 rte_memdump(stderr, "FCW", &desc->req.fcw_te,
2329 sizeof(desc->req.fcw_te) - 8);
2330 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2331 if (check_mbuf_total_left(mbuf_total_left) != 0)
2334 /* One CB (one op) was successfully prepared to enqueue */
2338 /* Enqueue one encode operations for ACC100 device in CB mode */
2340 enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,
2341 uint16_t total_enqueued_cbs, int16_t num)
2343 union acc100_dma_desc *desc = NULL;
2344 uint32_t out_length;
2345 struct rte_mbuf *output_head, *output;
2346 int i, next_triplet;
2347 uint16_t in_length_in_bytes;
2348 struct rte_bbdev_op_ldpc_enc *enc = &ops[0]->ldpc_enc;
2350 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2351 /* Validate op structure */
2352 if (validate_ldpc_enc_op(ops[0]) == -1) {
2353 rte_bbdev_log(ERR, "LDPC encoder validation failed");
2358 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2359 & q->sw_ring_wrap_mask);
2360 desc = q->ring_addr + desc_idx;
2361 acc100_fcw_le_fill(ops[0], &desc->req.fcw_le, num);
2363 /** This could be done at polling */
2364 acc100_header_init(&desc->req);
2365 desc->req.numCBs = num;
2367 in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;
2368 out_length = (enc->cb_params.e + 7) >> 3;
2369 desc->req.m2dlen = 1 + num;
2370 desc->req.d2mlen = num;
2373 for (i = 0; i < num; i++) {
2374 desc->req.data_ptrs[next_triplet].address =
2375 rte_pktmbuf_iova_offset(ops[i]->ldpc_enc.input.data, 0);
2376 desc->req.data_ptrs[next_triplet].blen = in_length_in_bytes;
2378 desc->req.data_ptrs[next_triplet].address =
2379 rte_pktmbuf_iova_offset(
2380 ops[i]->ldpc_enc.output.data, 0);
2381 desc->req.data_ptrs[next_triplet].blen = out_length;
2383 ops[i]->ldpc_enc.output.length = out_length;
2384 output_head = output = ops[i]->ldpc_enc.output.data;
2385 mbuf_append(output_head, output, out_length);
2386 output->data_len = out_length;
2389 desc->req.op_addr = ops[0];
2391 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2392 rte_memdump(stderr, "FCW", &desc->req.fcw_le,
2393 sizeof(desc->req.fcw_le) - 8);
2394 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2397 /* One CB (one op) was successfully prepared to enqueue */
2401 /* Enqueue one encode operations for ACC100 device in CB mode */
2403 enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2404 uint16_t total_enqueued_cbs)
2406 union acc100_dma_desc *desc = NULL;
2408 uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2410 struct rte_mbuf *input, *output_head, *output;
2412 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2413 /* Validate op structure */
2414 if (validate_ldpc_enc_op(op) == -1) {
2415 rte_bbdev_log(ERR, "LDPC encoder validation failed");
2420 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2421 & q->sw_ring_wrap_mask);
2422 desc = q->ring_addr + desc_idx;
2423 acc100_fcw_le_fill(op, &desc->req.fcw_le, 1);
2425 input = op->ldpc_enc.input.data;
2426 output_head = output = op->ldpc_enc.output.data;
2427 in_offset = op->ldpc_enc.input.offset;
2428 out_offset = op->ldpc_enc.output.offset;
2430 mbuf_total_left = op->ldpc_enc.input.length;
2431 seg_total_left = rte_pktmbuf_data_len(op->ldpc_enc.input.data)
2434 ret = acc100_dma_desc_le_fill(op, &desc->req, &input, output,
2435 &in_offset, &out_offset, &out_length, &mbuf_total_left,
2438 if (unlikely(ret < 0))
2441 mbuf_append(output_head, output, out_length);
2443 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2444 rte_memdump(stderr, "FCW", &desc->req.fcw_le,
2445 sizeof(desc->req.fcw_le) - 8);
2446 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2448 if (check_mbuf_total_left(mbuf_total_left) != 0)
2451 /* One CB (one op) was successfully prepared to enqueue */
2456 /* Enqueue one encode operations for ACC100 device in TB mode. */
2458 enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2459 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
2461 union acc100_dma_desc *desc = NULL;
2464 uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2466 struct rte_mbuf *input, *output_head, *output;
2467 uint16_t current_enqueued_cbs = 0;
2469 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2470 /* Validate op structure */
2471 if (validate_enc_op(op) == -1) {
2472 rte_bbdev_log(ERR, "Turbo encoder validation failed");
2477 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2478 & q->sw_ring_wrap_mask);
2479 desc = q->ring_addr + desc_idx;
2480 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
2481 acc100_fcw_te_fill(op, &desc->req.fcw_te);
2483 input = op->turbo_enc.input.data;
2484 output_head = output = op->turbo_enc.output.data;
2485 in_offset = op->turbo_enc.input.offset;
2486 out_offset = op->turbo_enc.output.offset;
2488 mbuf_total_left = op->turbo_enc.input.length;
2490 c = op->turbo_enc.tb_params.c;
2491 r = op->turbo_enc.tb_params.r;
2493 while (mbuf_total_left > 0 && r < c) {
2494 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
2495 /* Set up DMA descriptor */
2496 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
2497 & q->sw_ring_wrap_mask);
2498 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
2499 desc->req.data_ptrs[0].blen = ACC100_FCW_TE_BLEN;
2501 ret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,
2502 &in_offset, &out_offset, &out_length,
2503 &mbuf_total_left, &seg_total_left, r);
2504 if (unlikely(ret < 0))
2506 mbuf_append(output_head, output, out_length);
2508 /* Set total number of CBs in TB */
2509 desc->req.cbs_in_tb = cbs_in_tb;
2510 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2511 rte_memdump(stderr, "FCW", &desc->req.fcw_te,
2512 sizeof(desc->req.fcw_te) - 8);
2513 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2516 if (seg_total_left == 0) {
2517 /* Go to the next mbuf */
2518 input = input->next;
2520 output = output->next;
2524 total_enqueued_cbs++;
2525 current_enqueued_cbs++;
2529 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2530 if (check_mbuf_total_left(mbuf_total_left) != 0)
2534 /* Set SDone on last CB descriptor for TB mode. */
2535 desc->req.sdone_enable = 1;
2536 desc->req.irq_enable = q->irq_enable;
2538 return current_enqueued_cbs;
2541 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2542 /* Validates turbo decoder parameters */
2544 validate_dec_op(struct rte_bbdev_dec_op *op)
2546 struct rte_bbdev_op_turbo_dec *turbo_dec = &op->turbo_dec;
2547 struct rte_bbdev_op_dec_turbo_cb_params *cb = NULL;
2548 struct rte_bbdev_op_dec_turbo_tb_params *tb = NULL;
2550 if (op->mempool == NULL) {
2551 rte_bbdev_log(ERR, "Invalid mempool pointer");
2554 if (turbo_dec->input.data == NULL) {
2555 rte_bbdev_log(ERR, "Invalid input pointer");
2558 if (turbo_dec->hard_output.data == NULL) {
2559 rte_bbdev_log(ERR, "Invalid hard_output pointer");
2562 if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
2563 turbo_dec->soft_output.data == NULL) {
2564 rte_bbdev_log(ERR, "Invalid soft_output pointer");
2567 if (turbo_dec->rv_index > 3) {
2569 "rv_index (%u) is out of range 0 <= value <= 3",
2570 turbo_dec->rv_index);
2573 if (turbo_dec->iter_min < 1) {
2575 "iter_min (%u) is less than 1",
2576 turbo_dec->iter_min);
2579 if (turbo_dec->iter_max <= 2) {
2581 "iter_max (%u) is less than or equal to 2",
2582 turbo_dec->iter_max);
2585 if (turbo_dec->iter_min > turbo_dec->iter_max) {
2587 "iter_min (%u) is greater than iter_max (%u)",
2588 turbo_dec->iter_min, turbo_dec->iter_max);
2591 if (turbo_dec->code_block_mode != RTE_BBDEV_TRANSPORT_BLOCK &&
2592 turbo_dec->code_block_mode != RTE_BBDEV_CODE_BLOCK) {
2594 "code_block_mode (%u) is out of range 0 <= value <= 1",
2595 turbo_dec->code_block_mode);
2599 if (turbo_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
2600 tb = &turbo_dec->tb_params;
2601 if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE
2602 || tb->k_neg > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2605 "k_neg (%u) is out of range %u <= value <= %u",
2606 tb->k_neg, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2607 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2610 if ((tb->k_pos < RTE_BBDEV_TURBO_MIN_CB_SIZE
2611 || tb->k_pos > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2612 && tb->c > tb->c_neg) {
2614 "k_pos (%u) is out of range %u <= value <= %u",
2615 tb->k_pos, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2616 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2619 if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))
2621 "c_neg (%u) is out of range 0 <= value <= %u",
2623 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1);
2624 if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
2626 "c (%u) is out of range 1 <= value <= %u",
2627 tb->c, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
2630 if (tb->cab > tb->c) {
2632 "cab (%u) is greater than c (%u)",
2636 if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2637 (tb->ea < RTE_BBDEV_TURBO_MIN_CB_SIZE
2641 "ea (%u) is less than %u or it is not even",
2642 tb->ea, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2645 if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2646 (tb->eb < RTE_BBDEV_TURBO_MIN_CB_SIZE
2648 && tb->c > tb->cab) {
2650 "eb (%u) is less than %u or it is not even",
2651 tb->eb, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2654 cb = &turbo_dec->cb_params;
2655 if (cb->k < RTE_BBDEV_TURBO_MIN_CB_SIZE
2656 || cb->k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2658 "k (%u) is out of range %u <= value <= %u",
2659 cb->k, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2660 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2663 if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2664 (cb->e < RTE_BBDEV_TURBO_MIN_CB_SIZE ||
2667 "e (%u) is less than %u or it is not even",
2668 cb->e, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2677 /** Enqueue one decode operations for ACC100 device in CB mode */
2679 enqueue_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2680 uint16_t total_enqueued_cbs)
2682 union acc100_dma_desc *desc = NULL;
2684 uint32_t in_offset, h_out_offset, s_out_offset, s_out_length,
2685 h_out_length, mbuf_total_left, seg_total_left;
2686 struct rte_mbuf *input, *h_output_head, *h_output,
2687 *s_output_head, *s_output;
2689 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2690 /* Validate op structure */
2691 if (validate_dec_op(op) == -1) {
2692 rte_bbdev_log(ERR, "Turbo decoder validation failed");
2697 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2698 & q->sw_ring_wrap_mask);
2699 desc = q->ring_addr + desc_idx;
2700 acc100_fcw_td_fill(op, &desc->req.fcw_td);
2702 input = op->turbo_dec.input.data;
2703 h_output_head = h_output = op->turbo_dec.hard_output.data;
2704 s_output_head = s_output = op->turbo_dec.soft_output.data;
2705 in_offset = op->turbo_dec.input.offset;
2706 h_out_offset = op->turbo_dec.hard_output.offset;
2707 s_out_offset = op->turbo_dec.soft_output.offset;
2708 h_out_length = s_out_length = 0;
2709 mbuf_total_left = op->turbo_dec.input.length;
2710 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
2712 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2713 if (unlikely(input == NULL)) {
2714 rte_bbdev_log(ERR, "Invalid mbuf pointer");
2719 /* Set up DMA descriptor */
2720 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
2721 & q->sw_ring_wrap_mask);
2723 ret = acc100_dma_desc_td_fill(op, &desc->req, &input, h_output,
2724 s_output, &in_offset, &h_out_offset, &s_out_offset,
2725 &h_out_length, &s_out_length, &mbuf_total_left,
2726 &seg_total_left, 0);
2728 if (unlikely(ret < 0))
2732 mbuf_append(h_output_head, h_output, h_out_length);
2735 if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT))
2736 mbuf_append(s_output_head, s_output, s_out_length);
2738 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2739 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
2740 sizeof(desc->req.fcw_td) - 8);
2741 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2742 if (check_mbuf_total_left(mbuf_total_left) != 0)
2746 /* One CB (one op) was successfully prepared to enqueue */
2751 harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2752 uint16_t total_enqueued_cbs) {
2753 struct acc100_fcw_ld *fcw;
2754 union acc100_dma_desc *desc;
2755 int next_triplet = 1;
2756 struct rte_mbuf *hq_output_head, *hq_output;
2757 uint16_t harq_dma_length_in, harq_dma_length_out;
2758 uint16_t harq_in_length = op->ldpc_dec.harq_combined_input.length;
2759 if (harq_in_length == 0) {
2760 rte_bbdev_log(ERR, "Loopback of invalid null size\n");
2764 int h_comp = check_bit(op->ldpc_dec.op_flags,
2765 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION
2768 harq_in_length = harq_in_length * 8 / 6;
2769 harq_in_length = RTE_ALIGN(harq_in_length, 64);
2770 harq_dma_length_in = harq_in_length * 6 / 8;
2772 harq_in_length = RTE_ALIGN(harq_in_length, 64);
2773 harq_dma_length_in = harq_in_length;
2775 harq_dma_length_out = harq_dma_length_in;
2777 bool ddr_mem_in = check_bit(op->ldpc_dec.op_flags,
2778 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE);
2779 union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
2780 uint16_t harq_index = (ddr_mem_in ?
2781 op->ldpc_dec.harq_combined_input.offset :
2782 op->ldpc_dec.harq_combined_output.offset)
2783 / ACC100_HARQ_OFFSET;
2785 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2786 & q->sw_ring_wrap_mask);
2787 desc = q->ring_addr + desc_idx;
2788 fcw = &desc->req.fcw_ld;
2789 /* Set the FCW from loopback into DDR */
2790 memset(fcw, 0, sizeof(struct acc100_fcw_ld));
2791 fcw->FCWversion = ACC100_FCW_VER;
2794 if (harq_in_length < 16 * ACC100_N_ZC_1)
2796 fcw->ncb = fcw->Zc * ACC100_N_ZC_1;
2801 rte_bbdev_log(DEBUG, "Loopback IN %d Index %d offset %d length %d %d\n",
2802 ddr_mem_in, harq_index,
2803 harq_layout[harq_index].offset, harq_in_length,
2804 harq_dma_length_in);
2806 if (ddr_mem_in && (harq_layout[harq_index].offset > 0)) {
2807 fcw->hcin_size0 = harq_layout[harq_index].size0;
2808 fcw->hcin_offset = harq_layout[harq_index].offset;
2809 fcw->hcin_size1 = harq_in_length - fcw->hcin_offset;
2810 harq_dma_length_in = (fcw->hcin_size0 + fcw->hcin_size1);
2812 harq_dma_length_in = harq_dma_length_in * 6 / 8;
2814 fcw->hcin_size0 = harq_in_length;
2816 harq_layout[harq_index].val = 0;
2817 rte_bbdev_log(DEBUG, "Loopback FCW Config %d %d %d\n",
2818 fcw->hcin_size0, fcw->hcin_offset, fcw->hcin_size1);
2819 fcw->hcout_size0 = harq_in_length;
2820 fcw->hcin_decomp_mode = h_comp;
2821 fcw->hcout_comp_mode = h_comp;
2825 /* Set the prefix of descriptor. This could be done at polling */
2826 acc100_header_init(&desc->req);
2828 /* Null LLR input for Decoder */
2829 desc->req.data_ptrs[next_triplet].address =
2831 desc->req.data_ptrs[next_triplet].blen = 2;
2832 desc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
2833 desc->req.data_ptrs[next_triplet].last = 0;
2834 desc->req.data_ptrs[next_triplet].dma_ext = 0;
2837 /* HARQ Combine input from either Memory interface */
2839 next_triplet = acc100_dma_fill_blk_type_out(&desc->req,
2840 op->ldpc_dec.harq_combined_input.data,
2841 op->ldpc_dec.harq_combined_input.offset,
2844 ACC100_DMA_BLKID_IN_HARQ);
2846 desc->req.data_ptrs[next_triplet].address =
2847 op->ldpc_dec.harq_combined_input.offset;
2848 desc->req.data_ptrs[next_triplet].blen =
2850 desc->req.data_ptrs[next_triplet].blkid =
2851 ACC100_DMA_BLKID_IN_HARQ;
2852 desc->req.data_ptrs[next_triplet].dma_ext = 1;
2855 desc->req.data_ptrs[next_triplet - 1].last = 1;
2856 desc->req.m2dlen = next_triplet;
2858 /* Dropped decoder hard output */
2859 desc->req.data_ptrs[next_triplet].address =
2860 q->lb_out_addr_iova;
2861 desc->req.data_ptrs[next_triplet].blen = ACC100_BYTES_IN_WORD;
2862 desc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARD;
2863 desc->req.data_ptrs[next_triplet].last = 0;
2864 desc->req.data_ptrs[next_triplet].dma_ext = 0;
2867 /* HARQ Combine output to either Memory interface */
2868 if (check_bit(op->ldpc_dec.op_flags,
2869 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE
2871 desc->req.data_ptrs[next_triplet].address =
2872 op->ldpc_dec.harq_combined_output.offset;
2873 desc->req.data_ptrs[next_triplet].blen =
2874 harq_dma_length_out;
2875 desc->req.data_ptrs[next_triplet].blkid =
2876 ACC100_DMA_BLKID_OUT_HARQ;
2877 desc->req.data_ptrs[next_triplet].dma_ext = 1;
2880 hq_output_head = op->ldpc_dec.harq_combined_output.data;
2881 hq_output = op->ldpc_dec.harq_combined_output.data;
2882 next_triplet = acc100_dma_fill_blk_type_out(
2884 op->ldpc_dec.harq_combined_output.data,
2885 op->ldpc_dec.harq_combined_output.offset,
2886 harq_dma_length_out,
2888 ACC100_DMA_BLKID_OUT_HARQ);
2890 mbuf_append(hq_output_head, hq_output, harq_dma_length_out);
2891 op->ldpc_dec.harq_combined_output.length =
2892 harq_dma_length_out;
2894 desc->req.data_ptrs[next_triplet - 1].last = 1;
2895 desc->req.d2mlen = next_triplet - desc->req.m2dlen;
2896 desc->req.op_addr = op;
2898 /* One CB (one op) was successfully prepared to enqueue */
2902 /** Enqueue one decode operations for ACC100 device in CB mode */
2904 enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2905 uint16_t total_enqueued_cbs, bool same_op)
2908 if (unlikely(check_bit(op->ldpc_dec.op_flags,
2909 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK))) {
2910 ret = harq_loopback(q, op, total_enqueued_cbs);
2914 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2915 /* Validate op structure */
2916 if (validate_ldpc_dec_op(op) == -1) {
2917 rte_bbdev_log(ERR, "LDPC decoder validation failed");
2921 union acc100_dma_desc *desc;
2922 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2923 & q->sw_ring_wrap_mask);
2924 desc = q->ring_addr + desc_idx;
2925 struct rte_mbuf *input, *h_output_head, *h_output;
2926 uint32_t in_offset, h_out_offset, mbuf_total_left, h_out_length = 0;
2927 input = op->ldpc_dec.input.data;
2928 h_output_head = h_output = op->ldpc_dec.hard_output.data;
2929 in_offset = op->ldpc_dec.input.offset;
2930 h_out_offset = op->ldpc_dec.hard_output.offset;
2931 mbuf_total_left = op->ldpc_dec.input.length;
2932 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2933 if (unlikely(input == NULL)) {
2934 rte_bbdev_log(ERR, "Invalid mbuf pointer");
2938 union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
2941 union acc100_dma_desc *prev_desc;
2942 desc_idx = ((q->sw_ring_head + total_enqueued_cbs - 1)
2943 & q->sw_ring_wrap_mask);
2944 prev_desc = q->ring_addr + desc_idx;
2945 uint8_t *prev_ptr = (uint8_t *) prev_desc;
2946 uint8_t *new_ptr = (uint8_t *) desc;
2947 /* Copy first 4 words and BDESCs */
2948 rte_memcpy(new_ptr, prev_ptr, ACC100_5GUL_SIZE_0);
2949 rte_memcpy(new_ptr + ACC100_5GUL_OFFSET_0,
2950 prev_ptr + ACC100_5GUL_OFFSET_0,
2951 ACC100_5GUL_SIZE_1);
2952 desc->req.op_addr = prev_desc->req.op_addr;
2954 rte_memcpy(new_ptr + ACC100_DESC_FCW_OFFSET,
2955 prev_ptr + ACC100_DESC_FCW_OFFSET,
2956 ACC100_FCW_LD_BLEN);
2957 acc100_dma_desc_ld_update(op, &desc->req, input, h_output,
2958 &in_offset, &h_out_offset,
2959 &h_out_length, harq_layout);
2961 struct acc100_fcw_ld *fcw;
2962 uint32_t seg_total_left;
2963 fcw = &desc->req.fcw_ld;
2964 acc100_fcw_ld_fill(op, fcw, harq_layout);
2966 /* Special handling when overusing mbuf */
2967 if (fcw->rm_e < ACC100_MAX_E_MBUF)
2968 seg_total_left = rte_pktmbuf_data_len(input)
2971 seg_total_left = fcw->rm_e;
2973 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input, h_output,
2974 &in_offset, &h_out_offset,
2975 &h_out_length, &mbuf_total_left,
2976 &seg_total_left, fcw);
2977 if (unlikely(ret < 0))
2982 mbuf_append(h_output_head, h_output, h_out_length);
2983 #ifndef ACC100_EXT_MEM
2984 if (op->ldpc_dec.harq_combined_output.length > 0) {
2985 /* Push the HARQ output into host memory */
2986 struct rte_mbuf *hq_output_head, *hq_output;
2987 hq_output_head = op->ldpc_dec.harq_combined_output.data;
2988 hq_output = op->ldpc_dec.harq_combined_output.data;
2989 mbuf_append(hq_output_head, hq_output,
2990 op->ldpc_dec.harq_combined_output.length);
2994 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2995 rte_memdump(stderr, "FCW", &desc->req.fcw_ld,
2996 sizeof(desc->req.fcw_ld) - 8);
2997 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
3000 /* One CB (one op) was successfully prepared to enqueue */
3005 /* Enqueue one decode operations for ACC100 device in TB mode */
3007 enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
3008 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
3010 union acc100_dma_desc *desc = NULL;
3013 uint32_t in_offset, h_out_offset,
3014 h_out_length, mbuf_total_left, seg_total_left;
3015 struct rte_mbuf *input, *h_output_head, *h_output;
3016 uint16_t current_enqueued_cbs = 0;
3018 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3019 /* Validate op structure */
3020 if (validate_ldpc_dec_op(op) == -1) {
3021 rte_bbdev_log(ERR, "LDPC decoder validation failed");
3026 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
3027 & q->sw_ring_wrap_mask);
3028 desc = q->ring_addr + desc_idx;
3029 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
3030 union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
3031 acc100_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout);
3033 input = op->ldpc_dec.input.data;
3034 h_output_head = h_output = op->ldpc_dec.hard_output.data;
3035 in_offset = op->ldpc_dec.input.offset;
3036 h_out_offset = op->ldpc_dec.hard_output.offset;
3038 mbuf_total_left = op->ldpc_dec.input.length;
3039 c = op->ldpc_dec.tb_params.c;
3040 r = op->ldpc_dec.tb_params.r;
3042 while (mbuf_total_left > 0 && r < c) {
3044 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
3046 /* Set up DMA descriptor */
3047 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
3048 & q->sw_ring_wrap_mask);
3049 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
3050 desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
3051 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input,
3052 h_output, &in_offset, &h_out_offset,
3054 &mbuf_total_left, &seg_total_left,
3057 if (unlikely(ret < 0))
3061 mbuf_append(h_output_head, h_output, h_out_length);
3063 /* Set total number of CBs in TB */
3064 desc->req.cbs_in_tb = cbs_in_tb;
3065 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3066 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
3067 sizeof(desc->req.fcw_td) - 8);
3068 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
3071 if (seg_total_left == 0) {
3072 /* Go to the next mbuf */
3073 input = input->next;
3075 h_output = h_output->next;
3078 total_enqueued_cbs++;
3079 current_enqueued_cbs++;
3083 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3084 if (check_mbuf_total_left(mbuf_total_left) != 0)
3087 /* Set SDone on last CB descriptor for TB mode */
3088 desc->req.sdone_enable = 1;
3089 desc->req.irq_enable = q->irq_enable;
3091 return current_enqueued_cbs;
3094 /* Enqueue one decode operations for ACC100 device in TB mode */
3096 enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
3097 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
3099 union acc100_dma_desc *desc = NULL;
3102 uint32_t in_offset, h_out_offset, s_out_offset, s_out_length,
3103 h_out_length, mbuf_total_left, seg_total_left;
3104 struct rte_mbuf *input, *h_output_head, *h_output,
3105 *s_output_head, *s_output;
3106 uint16_t current_enqueued_cbs = 0;
3108 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3109 /* Validate op structure */
3110 if (validate_dec_op(op) == -1) {
3111 rte_bbdev_log(ERR, "Turbo decoder validation failed");
3116 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
3117 & q->sw_ring_wrap_mask);
3118 desc = q->ring_addr + desc_idx;
3119 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
3120 acc100_fcw_td_fill(op, &desc->req.fcw_td);
3122 input = op->turbo_dec.input.data;
3123 h_output_head = h_output = op->turbo_dec.hard_output.data;
3124 s_output_head = s_output = op->turbo_dec.soft_output.data;
3125 in_offset = op->turbo_dec.input.offset;
3126 h_out_offset = op->turbo_dec.hard_output.offset;
3127 s_out_offset = op->turbo_dec.soft_output.offset;
3128 h_out_length = s_out_length = 0;
3129 mbuf_total_left = op->turbo_dec.input.length;
3130 c = op->turbo_dec.tb_params.c;
3131 r = op->turbo_dec.tb_params.r;
3133 while (mbuf_total_left > 0 && r < c) {
3135 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
3137 /* Set up DMA descriptor */
3138 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
3139 & q->sw_ring_wrap_mask);
3140 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
3141 desc->req.data_ptrs[0].blen = ACC100_FCW_TD_BLEN;
3142 ret = acc100_dma_desc_td_fill(op, &desc->req, &input,
3143 h_output, s_output, &in_offset, &h_out_offset,
3144 &s_out_offset, &h_out_length, &s_out_length,
3145 &mbuf_total_left, &seg_total_left, r);
3147 if (unlikely(ret < 0))
3151 mbuf_append(h_output_head, h_output, h_out_length);
3154 if (check_bit(op->turbo_dec.op_flags,
3155 RTE_BBDEV_TURBO_SOFT_OUTPUT))
3156 mbuf_append(s_output_head, s_output, s_out_length);
3158 /* Set total number of CBs in TB */
3159 desc->req.cbs_in_tb = cbs_in_tb;
3160 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3161 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
3162 sizeof(desc->req.fcw_td) - 8);
3163 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
3166 if (seg_total_left == 0) {
3167 /* Go to the next mbuf */
3168 input = input->next;
3170 h_output = h_output->next;
3173 if (check_bit(op->turbo_dec.op_flags,
3174 RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
3175 s_output = s_output->next;
3180 total_enqueued_cbs++;
3181 current_enqueued_cbs++;
3185 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3186 if (check_mbuf_total_left(mbuf_total_left) != 0)
3189 /* Set SDone on last CB descriptor for TB mode */
3190 desc->req.sdone_enable = 1;
3191 desc->req.irq_enable = q->irq_enable;
3193 return current_enqueued_cbs;
3196 /* Calculates number of CBs in processed encoder TB based on 'r' and input
3199 static inline uint8_t
3200 get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)
3202 uint8_t c, c_neg, r, crc24_bits = 0;
3203 uint16_t k, k_neg, k_pos;
3204 uint8_t cbs_in_tb = 0;
3207 length = turbo_enc->input.length;
3208 r = turbo_enc->tb_params.r;
3209 c = turbo_enc->tb_params.c;
3210 c_neg = turbo_enc->tb_params.c_neg;
3211 k_neg = turbo_enc->tb_params.k_neg;
3212 k_pos = turbo_enc->tb_params.k_pos;
3214 if (check_bit(turbo_enc->op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
3216 while (length > 0 && r < c) {
3217 k = (r < c_neg) ? k_neg : k_pos;
3218 length -= (k - crc24_bits) >> 3;
3226 /* Calculates number of CBs in processed decoder TB based on 'r' and input
3229 static inline uint16_t
3230 get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)
3232 uint8_t c, c_neg, r = 0;
3233 uint16_t kw, k, k_neg, k_pos, cbs_in_tb = 0;
3236 length = turbo_dec->input.length;
3237 r = turbo_dec->tb_params.r;
3238 c = turbo_dec->tb_params.c;
3239 c_neg = turbo_dec->tb_params.c_neg;
3240 k_neg = turbo_dec->tb_params.k_neg;
3241 k_pos = turbo_dec->tb_params.k_pos;
3242 while (length > 0 && r < c) {
3243 k = (r < c_neg) ? k_neg : k_pos;
3244 kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
3253 /* Calculates number of CBs in processed decoder TB based on 'r' and input
3256 static inline uint16_t
3257 get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)
3259 uint16_t r, cbs_in_tb = 0;
3260 int32_t length = ldpc_dec->input.length;
3261 r = ldpc_dec->tb_params.r;
3262 while (length > 0 && r < ldpc_dec->tb_params.c) {
3263 length -= (r < ldpc_dec->tb_params.cab) ?
3264 ldpc_dec->tb_params.ea :
3265 ldpc_dec->tb_params.eb;
3272 /* Enqueue encode operations for ACC100 device in CB mode. */
3274 acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
3275 struct rte_bbdev_enc_op **ops, uint16_t num)
3277 struct acc100_queue *q = q_data->queue_private;
3278 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3280 union acc100_dma_desc *desc;
3283 for (i = 0; i < num; ++i) {
3284 /* Check if there are available space for further processing */
3285 if (unlikely(avail - 1 < 0))
3289 ret = enqueue_enc_one_op_cb(q, ops[i], i);
3294 if (unlikely(i == 0))
3295 return 0; /* Nothing to enqueue */
3297 /* Set SDone in last CB in enqueued ops for CB mode*/
3298 desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3299 & q->sw_ring_wrap_mask);
3300 desc->req.sdone_enable = 1;
3301 desc->req.irq_enable = q->irq_enable;
3303 acc100_dma_enqueue(q, i, &q_data->queue_stats);
3306 q_data->queue_stats.enqueued_count += i;
3307 q_data->queue_stats.enqueue_err_count += num - i;
3311 /* Check we can mux encode operations with common FCW */
3313 check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {
3317 for (i = 1; i < num; ++i) {
3318 /* Only mux compatible code blocks */
3319 if (memcmp((uint8_t *)(&ops[i]->ldpc_enc) + ACC100_ENC_OFFSET,
3320 (uint8_t *)(&ops[0]->ldpc_enc) +
3322 ACC100_CMP_ENC_SIZE) != 0)
3328 /** Enqueue encode operations for ACC100 device in CB mode. */
3329 static inline uint16_t
3330 acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
3331 struct rte_bbdev_enc_op **ops, uint16_t num)
3333 struct acc100_queue *q = q_data->queue_private;
3334 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3336 union acc100_dma_desc *desc;
3337 int ret, desc_idx = 0;
3338 int16_t enq, left = num;
3341 if (unlikely(avail < 1))
3344 enq = RTE_MIN(left, ACC100_MUX_5GDL_DESC);
3345 if (check_mux(&ops[i], enq)) {
3346 ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],
3352 ret = enqueue_ldpc_enc_one_op_cb(q, ops[i], desc_idx);
3361 if (unlikely(i == 0))
3362 return 0; /* Nothing to enqueue */
3364 /* Set SDone in last CB in enqueued ops for CB mode*/
3365 desc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1)
3366 & q->sw_ring_wrap_mask);
3367 desc->req.sdone_enable = 1;
3368 desc->req.irq_enable = q->irq_enable;
3370 acc100_dma_enqueue(q, desc_idx, &q_data->queue_stats);
3373 q_data->queue_stats.enqueued_count += i;
3374 q_data->queue_stats.enqueue_err_count += num - i;
3379 /* Enqueue encode operations for ACC100 device in TB mode. */
3381 acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,
3382 struct rte_bbdev_enc_op **ops, uint16_t num)
3384 struct acc100_queue *q = q_data->queue_private;
3385 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3386 uint16_t i, enqueued_cbs = 0;
3390 for (i = 0; i < num; ++i) {
3391 cbs_in_tb = get_num_cbs_in_tb_enc(&ops[i]->turbo_enc);
3392 /* Check if there are available space for further processing */
3393 if (unlikely(avail - cbs_in_tb < 0))
3397 ret = enqueue_enc_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);
3400 enqueued_cbs += ret;
3402 if (unlikely(enqueued_cbs == 0))
3403 return 0; /* Nothing to enqueue */
3405 acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3408 q_data->queue_stats.enqueued_count += i;
3409 q_data->queue_stats.enqueue_err_count += num - i;
3414 /* Enqueue encode operations for ACC100 device. */
3416 acc100_enqueue_enc(struct rte_bbdev_queue_data *q_data,
3417 struct rte_bbdev_enc_op **ops, uint16_t num)
3419 if (unlikely(num == 0))
3421 if (ops[0]->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
3422 return acc100_enqueue_enc_tb(q_data, ops, num);
3424 return acc100_enqueue_enc_cb(q_data, ops, num);
3427 /* Enqueue encode operations for ACC100 device. */
3429 acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
3430 struct rte_bbdev_enc_op **ops, uint16_t num)
3432 if (unlikely(num == 0))
3434 if (ops[0]->ldpc_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
3435 return acc100_enqueue_enc_tb(q_data, ops, num);
3437 return acc100_enqueue_ldpc_enc_cb(q_data, ops, num);
3441 /* Enqueue decode operations for ACC100 device in CB mode */
3443 acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,
3444 struct rte_bbdev_dec_op **ops, uint16_t num)
3446 struct acc100_queue *q = q_data->queue_private;
3447 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3449 union acc100_dma_desc *desc;
3452 for (i = 0; i < num; ++i) {
3453 /* Check if there are available space for further processing */
3454 if (unlikely(avail - 1 < 0))
3458 ret = enqueue_dec_one_op_cb(q, ops[i], i);
3463 if (unlikely(i == 0))
3464 return 0; /* Nothing to enqueue */
3466 /* Set SDone in last CB in enqueued ops for CB mode*/
3467 desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3468 & q->sw_ring_wrap_mask);
3469 desc->req.sdone_enable = 1;
3470 desc->req.irq_enable = q->irq_enable;
3472 acc100_dma_enqueue(q, i, &q_data->queue_stats);
3475 q_data->queue_stats.enqueued_count += i;
3476 q_data->queue_stats.enqueue_err_count += num - i;
3481 /* Check we can mux encode operations with common FCW */
3483 cmp_ldpc_dec_op(struct rte_bbdev_dec_op **ops) {
3484 /* Only mux compatible code blocks */
3485 if (memcmp((uint8_t *)(&ops[0]->ldpc_dec) + ACC100_DEC_OFFSET,
3486 (uint8_t *)(&ops[1]->ldpc_dec) +
3487 ACC100_DEC_OFFSET, ACC100_CMP_DEC_SIZE) != 0) {
3494 /* Enqueue decode operations for ACC100 device in TB mode */
3496 acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,
3497 struct rte_bbdev_dec_op **ops, uint16_t num)
3499 struct acc100_queue *q = q_data->queue_private;
3500 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3501 uint16_t i, enqueued_cbs = 0;
3505 for (i = 0; i < num; ++i) {
3506 cbs_in_tb = get_num_cbs_in_tb_ldpc_dec(&ops[i]->ldpc_dec);
3507 /* Check if there are available space for further processing */
3508 if (unlikely(avail - cbs_in_tb < 0))
3512 ret = enqueue_ldpc_dec_one_op_tb(q, ops[i],
3513 enqueued_cbs, cbs_in_tb);
3516 enqueued_cbs += ret;
3519 acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3522 q_data->queue_stats.enqueued_count += i;
3523 q_data->queue_stats.enqueue_err_count += num - i;
3527 /* Enqueue decode operations for ACC100 device in CB mode */
3529 acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
3530 struct rte_bbdev_dec_op **ops, uint16_t num)
3532 struct acc100_queue *q = q_data->queue_private;
3533 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3535 union acc100_dma_desc *desc;
3537 bool same_op = false;
3538 for (i = 0; i < num; ++i) {
3539 /* Check if there are available space for further processing */
3540 if (unlikely(avail < 1))
3545 same_op = cmp_ldpc_dec_op(&ops[i-1]);
3546 rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n",
3547 i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index,
3548 ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count,
3549 ops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c,
3550 ops[i]->ldpc_dec.n_cb, ops[i]->ldpc_dec.q_m,
3551 ops[i]->ldpc_dec.n_filler, ops[i]->ldpc_dec.cb_params.e,
3553 ret = enqueue_ldpc_dec_one_op_cb(q, ops[i], i, same_op);
3558 if (unlikely(i == 0))
3559 return 0; /* Nothing to enqueue */
3561 /* Set SDone in last CB in enqueued ops for CB mode*/
3562 desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3563 & q->sw_ring_wrap_mask);
3565 desc->req.sdone_enable = 1;
3566 desc->req.irq_enable = q->irq_enable;
3568 acc100_dma_enqueue(q, i, &q_data->queue_stats);
3571 q_data->queue_stats.enqueued_count += i;
3572 q_data->queue_stats.enqueue_err_count += num - i;
3577 /* Enqueue decode operations for ACC100 device in TB mode */
3579 acc100_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data,
3580 struct rte_bbdev_dec_op **ops, uint16_t num)
3582 struct acc100_queue *q = q_data->queue_private;
3583 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3584 uint16_t i, enqueued_cbs = 0;
3588 for (i = 0; i < num; ++i) {
3589 cbs_in_tb = get_num_cbs_in_tb_dec(&ops[i]->turbo_dec);
3590 /* Check if there are available space for further processing */
3591 if (unlikely(avail - cbs_in_tb < 0))
3595 ret = enqueue_dec_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);
3598 enqueued_cbs += ret;
3601 acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3604 q_data->queue_stats.enqueued_count += i;
3605 q_data->queue_stats.enqueue_err_count += num - i;
3610 /* Enqueue decode operations for ACC100 device. */
3612 acc100_enqueue_dec(struct rte_bbdev_queue_data *q_data,
3613 struct rte_bbdev_dec_op **ops, uint16_t num)
3615 if (unlikely(num == 0))
3617 if (ops[0]->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
3618 return acc100_enqueue_dec_tb(q_data, ops, num);
3620 return acc100_enqueue_dec_cb(q_data, ops, num);
3623 /* Enqueue decode operations for ACC100 device. */
3625 acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
3626 struct rte_bbdev_dec_op **ops, uint16_t num)
3628 struct acc100_queue *q = q_data->queue_private;
3629 int32_t aq_avail = q->aq_depth +
3630 (q->aq_dequeued - q->aq_enqueued) / 128;
3632 if (unlikely((aq_avail == 0) || (num == 0)))
3635 if (ops[0]->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
3636 return acc100_enqueue_ldpc_dec_tb(q_data, ops, num);
3638 return acc100_enqueue_ldpc_dec_cb(q_data, ops, num);
3642 /* Dequeue one encode operations from ACC100 device in CB mode */
3644 dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
3645 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
3647 union acc100_dma_desc *desc, atom_desc;
3648 union acc100_dma_rsp_desc rsp;
3649 struct rte_bbdev_enc_op *op;
3652 desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
3653 & q->sw_ring_wrap_mask);
3654 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3657 /* Check fdone bit */
3658 if (!(atom_desc.rsp.val & ACC100_FDONE))
3661 rsp.val = atom_desc.rsp.val;
3662 rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
3665 op = desc->req.op_addr;
3667 /* Clearing status, it will be set based on response */
3670 op->status |= ((rsp.input_err)
3671 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3672 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3673 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3675 if (desc->req.last_desc_in_batch) {
3677 desc->req.last_desc_in_batch = 0;
3679 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3680 desc->rsp.add_info_0 = 0; /*Reserved bits */
3681 desc->rsp.add_info_1 = 0; /*Reserved bits */
3683 /* Flag that the muxing cause loss of opaque data */
3684 op->opaque_data = (void *)-1;
3685 for (i = 0 ; i < desc->req.numCBs; i++)
3688 /* One CB (op) was successfully dequeued */
3689 return desc->req.numCBs;
3692 /* Dequeue one encode operations from ACC100 device in TB mode */
3694 dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
3695 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
3697 union acc100_dma_desc *desc, *last_desc, atom_desc;
3698 union acc100_dma_rsp_desc rsp;
3699 struct rte_bbdev_enc_op *op;
3701 uint16_t current_dequeued_cbs = 0, cbs_in_tb;
3703 desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
3704 & q->sw_ring_wrap_mask);
3705 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3708 /* Check fdone bit */
3709 if (!(atom_desc.rsp.val & ACC100_FDONE))
3712 /* Get number of CBs in dequeued TB */
3713 cbs_in_tb = desc->req.cbs_in_tb;
3715 last_desc = q->ring_addr + ((q->sw_ring_tail
3716 + total_dequeued_cbs + cbs_in_tb - 1)
3717 & q->sw_ring_wrap_mask);
3718 /* Check if last CB in TB is ready to dequeue (and thus
3719 * the whole TB) - checking sdone bit. If not return.
3721 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
3723 if (!(atom_desc.rsp.val & ACC100_SDONE))
3727 op = desc->req.op_addr;
3729 /* Clearing status, it will be set based on response */
3732 while (i < cbs_in_tb) {
3733 desc = q->ring_addr + ((q->sw_ring_tail
3734 + total_dequeued_cbs)
3735 & q->sw_ring_wrap_mask);
3736 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3738 rsp.val = atom_desc.rsp.val;
3739 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
3742 op->status |= ((rsp.input_err)
3743 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3744 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3745 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3747 if (desc->req.last_desc_in_batch) {
3749 desc->req.last_desc_in_batch = 0;
3751 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3752 desc->rsp.add_info_0 = 0;
3753 desc->rsp.add_info_1 = 0;
3754 total_dequeued_cbs++;
3755 current_dequeued_cbs++;
3761 return current_dequeued_cbs;
3764 /* Dequeue one decode operation from ACC100 device in CB mode */
3766 dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
3767 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3768 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3770 union acc100_dma_desc *desc, atom_desc;
3771 union acc100_dma_rsp_desc rsp;
3772 struct rte_bbdev_dec_op *op;
3774 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3775 & q->sw_ring_wrap_mask);
3776 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3779 /* Check fdone bit */
3780 if (!(atom_desc.rsp.val & ACC100_FDONE))
3783 rsp.val = atom_desc.rsp.val;
3784 rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
3787 op = desc->req.op_addr;
3789 /* Clearing status, it will be set based on response */
3791 op->status |= ((rsp.input_err)
3792 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3793 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3794 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3795 if (op->status != 0) {
3796 q_data->queue_stats.dequeue_err_count++;
3797 acc100_check_ir(q->d);
3800 /* CRC invalid if error exists */
3802 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3803 op->turbo_dec.iter_count = (uint8_t) rsp.iter_cnt / 2;
3804 /* Check if this is the last desc in batch (Atomic Queue) */
3805 if (desc->req.last_desc_in_batch) {
3807 desc->req.last_desc_in_batch = 0;
3809 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3810 desc->rsp.add_info_0 = 0;
3811 desc->rsp.add_info_1 = 0;
3814 /* One CB (op) was successfully dequeued */
3818 /* Dequeue one decode operations from ACC100 device in CB mode */
3820 dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
3821 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3822 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3824 union acc100_dma_desc *desc, atom_desc;
3825 union acc100_dma_rsp_desc rsp;
3826 struct rte_bbdev_dec_op *op;
3828 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3829 & q->sw_ring_wrap_mask);
3830 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3833 /* Check fdone bit */
3834 if (!(atom_desc.rsp.val & ACC100_FDONE))
3837 rsp.val = atom_desc.rsp.val;
3840 op = desc->req.op_addr;
3842 /* Clearing status, it will be set based on response */
3844 op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;
3845 op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;
3846 op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;
3847 if (op->status != 0)
3848 q_data->queue_stats.dequeue_err_count++;
3850 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3851 if (op->ldpc_dec.hard_output.length > 0 && !rsp.synd_ok)
3852 op->status |= 1 << RTE_BBDEV_SYNDROME_ERROR;
3853 op->ldpc_dec.iter_count = (uint8_t) rsp.iter_cnt;
3855 if (op->status & (1 << RTE_BBDEV_DRV_ERROR))
3856 acc100_check_ir(q->d);
3858 /* Check if this is the last desc in batch (Atomic Queue) */
3859 if (desc->req.last_desc_in_batch) {
3861 desc->req.last_desc_in_batch = 0;
3864 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3865 desc->rsp.add_info_0 = 0;
3866 desc->rsp.add_info_1 = 0;
3870 /* One CB (op) was successfully dequeued */
3874 /* Dequeue one decode operations from ACC100 device in TB mode. */
3876 dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3877 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3879 union acc100_dma_desc *desc, *last_desc, atom_desc;
3880 union acc100_dma_rsp_desc rsp;
3881 struct rte_bbdev_dec_op *op;
3882 uint8_t cbs_in_tb = 1, cb_idx = 0;
3884 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3885 & q->sw_ring_wrap_mask);
3886 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3889 /* Check fdone bit */
3890 if (!(atom_desc.rsp.val & ACC100_FDONE))
3894 op = desc->req.op_addr;
3896 /* Get number of CBs in dequeued TB */
3897 cbs_in_tb = desc->req.cbs_in_tb;
3899 last_desc = q->ring_addr + ((q->sw_ring_tail
3900 + dequeued_cbs + cbs_in_tb - 1)
3901 & q->sw_ring_wrap_mask);
3902 /* Check if last CB in TB is ready to dequeue (and thus
3903 * the whole TB) - checking sdone bit. If not return.
3905 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
3907 if (!(atom_desc.rsp.val & ACC100_SDONE))
3910 /* Clearing status, it will be set based on response */
3913 /* Read remaining CBs if exists */
3914 while (cb_idx < cbs_in_tb) {
3915 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3916 & q->sw_ring_wrap_mask);
3917 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3919 rsp.val = atom_desc.rsp.val;
3920 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
3923 op->status |= ((rsp.input_err)
3924 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3925 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3926 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3928 /* CRC invalid if error exists */
3930 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3931 op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
3932 op->turbo_dec.iter_count);
3934 /* Check if this is the last desc in batch (Atomic Queue) */
3935 if (desc->req.last_desc_in_batch) {
3937 desc->req.last_desc_in_batch = 0;
3939 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3940 desc->rsp.add_info_0 = 0;
3941 desc->rsp.add_info_1 = 0;
3951 /* Dequeue encode operations from ACC100 device. */
3953 acc100_dequeue_enc(struct rte_bbdev_queue_data *q_data,
3954 struct rte_bbdev_enc_op **ops, uint16_t num)
3956 struct acc100_queue *q = q_data->queue_private;
3957 uint16_t dequeue_num;
3958 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
3959 uint32_t aq_dequeued = 0;
3960 uint16_t i, dequeued_cbs = 0;
3961 struct rte_bbdev_enc_op *op;
3964 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3965 if (unlikely(ops == NULL || q == NULL)) {
3966 rte_bbdev_log_debug("Unexpected undefined pointer");
3971 dequeue_num = (avail < num) ? avail : num;
3973 for (i = 0; i < dequeue_num; ++i) {
3974 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3975 & q->sw_ring_wrap_mask))->req.op_addr;
3976 if (op->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
3977 ret = dequeue_enc_one_op_tb(q, &ops[i], dequeued_cbs,
3980 ret = dequeue_enc_one_op_cb(q, &ops[i], dequeued_cbs,
3985 dequeued_cbs += ret;
3988 q->aq_dequeued += aq_dequeued;
3989 q->sw_ring_tail += dequeued_cbs;
3991 /* Update enqueue stats */
3992 q_data->queue_stats.dequeued_count += i;
3997 /* Dequeue LDPC encode operations from ACC100 device. */
3999 acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
4000 struct rte_bbdev_enc_op **ops, uint16_t num)
4002 struct acc100_queue *q = q_data->queue_private;
4003 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
4004 uint32_t aq_dequeued = 0;
4005 uint16_t dequeue_num, i, dequeued_cbs = 0, dequeued_descs = 0;
4008 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4009 if (unlikely(ops == 0 && q == NULL))
4013 dequeue_num = RTE_MIN(avail, num);
4015 for (i = 0; i < dequeue_num; i++) {
4016 ret = dequeue_enc_one_op_cb(q, &ops[dequeued_cbs],
4017 dequeued_descs, &aq_dequeued);
4020 dequeued_cbs += ret;
4022 if (dequeued_cbs >= num)
4026 q->aq_dequeued += aq_dequeued;
4027 q->sw_ring_tail += dequeued_descs;
4029 /* Update enqueue stats */
4030 q_data->queue_stats.dequeued_count += dequeued_cbs;
4032 return dequeued_cbs;
4036 /* Dequeue decode operations from ACC100 device. */
4038 acc100_dequeue_dec(struct rte_bbdev_queue_data *q_data,
4039 struct rte_bbdev_dec_op **ops, uint16_t num)
4041 struct acc100_queue *q = q_data->queue_private;
4042 uint16_t dequeue_num;
4043 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
4044 uint32_t aq_dequeued = 0;
4046 uint16_t dequeued_cbs = 0;
4047 struct rte_bbdev_dec_op *op;
4050 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4051 if (unlikely(ops == 0 && q == NULL))
4055 dequeue_num = (avail < num) ? avail : num;
4057 for (i = 0; i < dequeue_num; ++i) {
4058 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
4059 & q->sw_ring_wrap_mask))->req.op_addr;
4060 if (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
4061 ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
4064 ret = dequeue_dec_one_op_cb(q_data, q, &ops[i],
4065 dequeued_cbs, &aq_dequeued);
4069 dequeued_cbs += ret;
4072 q->aq_dequeued += aq_dequeued;
4073 q->sw_ring_tail += dequeued_cbs;
4075 /* Update enqueue stats */
4076 q_data->queue_stats.dequeued_count += i;
4081 /* Dequeue decode operations from ACC100 device. */
4083 acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
4084 struct rte_bbdev_dec_op **ops, uint16_t num)
4086 struct acc100_queue *q = q_data->queue_private;
4087 uint16_t dequeue_num;
4088 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
4089 uint32_t aq_dequeued = 0;
4091 uint16_t dequeued_cbs = 0;
4092 struct rte_bbdev_dec_op *op;
4095 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4096 if (unlikely(ops == 0 && q == NULL))
4100 dequeue_num = RTE_MIN(avail, num);
4102 for (i = 0; i < dequeue_num; ++i) {
4103 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
4104 & q->sw_ring_wrap_mask))->req.op_addr;
4105 if (op->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
4106 ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
4109 ret = dequeue_ldpc_dec_one_op_cb(
4110 q_data, q, &ops[i], dequeued_cbs,
4115 dequeued_cbs += ret;
4118 q->aq_dequeued += aq_dequeued;
4119 q->sw_ring_tail += dequeued_cbs;
4121 /* Update enqueue stats */
4122 q_data->queue_stats.dequeued_count += i;
4127 /* Initialization Function */
4129 acc100_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
4131 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
4133 dev->dev_ops = &acc100_bbdev_ops;
4134 dev->enqueue_enc_ops = acc100_enqueue_enc;
4135 dev->enqueue_dec_ops = acc100_enqueue_dec;
4136 dev->dequeue_enc_ops = acc100_dequeue_enc;
4137 dev->dequeue_dec_ops = acc100_dequeue_dec;
4138 dev->enqueue_ldpc_enc_ops = acc100_enqueue_ldpc_enc;
4139 dev->enqueue_ldpc_dec_ops = acc100_enqueue_ldpc_dec;
4140 dev->dequeue_ldpc_enc_ops = acc100_dequeue_ldpc_enc;
4141 dev->dequeue_ldpc_dec_ops = acc100_dequeue_ldpc_dec;
4143 ((struct acc100_device *) dev->data->dev_private)->pf_device =
4144 !strcmp(drv->driver.name,
4145 RTE_STR(ACC100PF_DRIVER_NAME));
4146 ((struct acc100_device *) dev->data->dev_private)->mmio_base =
4147 pci_dev->mem_resource[0].addr;
4149 rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"",
4150 drv->driver.name, dev->data->name,
4151 (void *)pci_dev->mem_resource[0].addr,
4152 pci_dev->mem_resource[0].phys_addr);
4155 static int acc100_pci_probe(struct rte_pci_driver *pci_drv,
4156 struct rte_pci_device *pci_dev)
4158 struct rte_bbdev *bbdev = NULL;
4159 char dev_name[RTE_BBDEV_NAME_MAX_LEN];
4161 if (pci_dev == NULL) {
4162 rte_bbdev_log(ERR, "NULL PCI device");
4166 rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
4168 /* Allocate memory to be used privately by drivers */
4169 bbdev = rte_bbdev_allocate(pci_dev->device.name);
4173 /* allocate device private memory */
4174 bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
4175 sizeof(struct acc100_device), RTE_CACHE_LINE_SIZE,
4176 pci_dev->device.numa_node);
4178 if (bbdev->data->dev_private == NULL) {
4180 "Allocate of %zu bytes for device \"%s\" failed",
4181 sizeof(struct acc100_device), dev_name);
4182 rte_bbdev_release(bbdev);
4186 /* Fill HW specific part of device structure */
4187 bbdev->device = &pci_dev->device;
4188 bbdev->intr_handle = pci_dev->intr_handle;
4189 bbdev->data->socket_id = pci_dev->device.numa_node;
4191 /* Invoke ACC100 device initialization function */
4192 acc100_bbdev_init(bbdev, pci_drv);
4194 rte_bbdev_log_debug("Initialised bbdev %s (id = %u)",
4195 dev_name, bbdev->data->dev_id);
4199 static int acc100_pci_remove(struct rte_pci_device *pci_dev)
4201 struct rte_bbdev *bbdev;
4205 if (pci_dev == NULL)
4209 bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
4210 if (bbdev == NULL) {
4212 "Couldn't find HW dev \"%s\" to uninitialise it",
4213 pci_dev->device.name);
4216 dev_id = bbdev->data->dev_id;
4218 /* free device private memory before close */
4219 rte_free(bbdev->data->dev_private);
4222 ret = rte_bbdev_close(dev_id);
4225 "Device %i failed to close during uninit: %i",
4228 /* release bbdev from library */
4229 rte_bbdev_release(bbdev);
4231 rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
4236 static struct rte_pci_driver acc100_pci_pf_driver = {
4237 .probe = acc100_pci_probe,
4238 .remove = acc100_pci_remove,
4239 .id_table = pci_id_acc100_pf_map,
4240 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
4243 static struct rte_pci_driver acc100_pci_vf_driver = {
4244 .probe = acc100_pci_probe,
4245 .remove = acc100_pci_remove,
4246 .id_table = pci_id_acc100_vf_map,
4247 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
4250 RTE_PMD_REGISTER_PCI(ACC100PF_DRIVER_NAME, acc100_pci_pf_driver);
4251 RTE_PMD_REGISTER_PCI_TABLE(ACC100PF_DRIVER_NAME, pci_id_acc100_pf_map);
4252 RTE_PMD_REGISTER_PCI(ACC100VF_DRIVER_NAME, acc100_pci_vf_driver);
4253 RTE_PMD_REGISTER_PCI_TABLE(ACC100VF_DRIVER_NAME, pci_id_acc100_vf_map);
4256 * Workaround implementation to fix the power on status of some 5GUL engines
4257 * This requires DMA permission if ported outside DPDK
4258 * It consists in resolving the state of these engines by running a
4259 * dummy operation and resetting the engines to ensure state are reliably
4263 poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,
4264 struct rte_acc100_conf *conf)
4266 int i, template_idx, qg_idx;
4267 uint32_t address, status, value;
4268 printf("Need to clear power-on 5GUL status in internal memory\n");
4269 /* Reset LDPC Cores */
4270 for (i = 0; i < ACC100_ENGINES_MAX; i++)
4271 acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4272 ACC100_ENGINE_OFFSET * i, ACC100_RESET_HI);
4273 usleep(ACC100_LONG_WAIT);
4274 for (i = 0; i < ACC100_ENGINES_MAX; i++)
4275 acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4276 ACC100_ENGINE_OFFSET * i, ACC100_RESET_LO);
4277 usleep(ACC100_LONG_WAIT);
4278 /* Prepare dummy workload */
4279 alloc_2x64mb_sw_rings_mem(bbdev, d, 0);
4280 /* Set base addresses */
4281 uint32_t phys_high = (uint32_t)(d->sw_rings_iova >> 32);
4282 uint32_t phys_low = (uint32_t)(d->sw_rings_iova &
4283 ~(ACC100_SIZE_64MBYTE-1));
4284 acc100_reg_write(d, HWPfDmaFec5GulDescBaseHiRegVf, phys_high);
4285 acc100_reg_write(d, HWPfDmaFec5GulDescBaseLoRegVf, phys_low);
4287 /* Descriptor for a dummy 5GUL code block processing*/
4288 union acc100_dma_desc *desc = NULL;
4290 desc->req.data_ptrs[0].address = d->sw_rings_iova +
4291 ACC100_DESC_FCW_OFFSET;
4292 desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
4293 desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
4294 desc->req.data_ptrs[0].last = 0;
4295 desc->req.data_ptrs[0].dma_ext = 0;
4296 desc->req.data_ptrs[1].address = d->sw_rings_iova + 512;
4297 desc->req.data_ptrs[1].blkid = ACC100_DMA_BLKID_IN;
4298 desc->req.data_ptrs[1].last = 1;
4299 desc->req.data_ptrs[1].dma_ext = 0;
4300 desc->req.data_ptrs[1].blen = 44;
4301 desc->req.data_ptrs[2].address = d->sw_rings_iova + 1024;
4302 desc->req.data_ptrs[2].blkid = ACC100_DMA_BLKID_OUT_ENC;
4303 desc->req.data_ptrs[2].last = 1;
4304 desc->req.data_ptrs[2].dma_ext = 0;
4305 desc->req.data_ptrs[2].blen = 5;
4307 desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
4308 desc->req.fcw_ld.qm = 1;
4309 desc->req.fcw_ld.nfiller = 30;
4310 desc->req.fcw_ld.BG = 2 - 1;
4311 desc->req.fcw_ld.Zc = 7;
4312 desc->req.fcw_ld.ncb = 350;
4313 desc->req.fcw_ld.rm_e = 4;
4314 desc->req.fcw_ld.itmax = 10;
4315 desc->req.fcw_ld.gain_i = 1;
4316 desc->req.fcw_ld.gain_h = 1;
4318 int engines_to_restart[ACC100_SIG_UL_5G_LAST + 1] = {0};
4319 int num_failed_engine = 0;
4320 /* Detect engines in undefined state */
4321 for (template_idx = ACC100_SIG_UL_5G;
4322 template_idx <= ACC100_SIG_UL_5G_LAST;
4324 /* Check engine power-on status */
4325 address = HwPfFecUl5gIbDebugReg +
4326 ACC100_ENGINE_OFFSET * template_idx;
4327 status = (acc100_reg_read(d, address) >> 4) & 0xF;
4329 engines_to_restart[num_failed_engine] = template_idx;
4330 num_failed_engine++;
4334 int numQqsAcc = conf->q_ul_5g.num_qgroups;
4335 int numQgs = conf->q_ul_5g.num_qgroups;
4337 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4338 value |= (1 << qg_idx);
4339 /* Force each engine which is in unspecified state */
4340 for (i = 0; i < num_failed_engine; i++) {
4341 int failed_engine = engines_to_restart[i];
4342 printf("Force engine %d\n", failed_engine);
4343 for (template_idx = ACC100_SIG_UL_5G;
4344 template_idx <= ACC100_SIG_UL_5G_LAST;
4346 address = HWPfQmgrGrpTmplateReg4Indx
4347 + ACC100_BYTES_IN_WORD * template_idx;
4348 if (template_idx == failed_engine)
4349 acc100_reg_write(d, address, value);
4351 acc100_reg_write(d, address, 0);
4353 /* Reset descriptor header */
4354 desc->req.word0 = ACC100_DMA_DESC_TYPE;
4355 desc->req.word1 = 0;
4356 desc->req.word2 = 0;
4357 desc->req.word3 = 0;
4358 desc->req.numCBs = 1;
4359 desc->req.m2dlen = 2;
4360 desc->req.d2mlen = 1;
4361 /* Enqueue the code block for processing */
4362 union acc100_enqueue_reg_fmt enq_req;
4364 enq_req.addr_offset = ACC100_DESC_OFFSET;
4365 enq_req.num_elem = 1;
4366 enq_req.req_elem_addr = 0;
4368 acc100_reg_write(d, HWPfQmgrIngressAq + 0x100, enq_req.val);
4369 usleep(ACC100_LONG_WAIT * 100);
4370 if (desc->req.word0 != 2)
4371 printf("DMA Response %#"PRIx32"\n", desc->req.word0);
4374 /* Reset LDPC Cores */
4375 for (i = 0; i < ACC100_ENGINES_MAX; i++)
4376 acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4377 ACC100_ENGINE_OFFSET * i,
4379 usleep(ACC100_LONG_WAIT);
4380 for (i = 0; i < ACC100_ENGINES_MAX; i++)
4381 acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4382 ACC100_ENGINE_OFFSET * i,
4384 usleep(ACC100_LONG_WAIT);
4385 acc100_reg_write(d, HWPfHi5GHardResetReg, ACC100_RESET_HARD);
4386 usleep(ACC100_LONG_WAIT);
4388 /* Check engine power-on status again */
4389 for (template_idx = ACC100_SIG_UL_5G;
4390 template_idx <= ACC100_SIG_UL_5G_LAST;
4392 address = HwPfFecUl5gIbDebugReg +
4393 ACC100_ENGINE_OFFSET * template_idx;
4394 status = (acc100_reg_read(d, address) >> 4) & 0xF;
4395 address = HWPfQmgrGrpTmplateReg4Indx
4396 + ACC100_BYTES_IN_WORD * template_idx;
4398 acc100_reg_write(d, address, value);
4401 acc100_reg_write(d, address, 0);
4403 printf("Number of 5GUL engines %d\n", numEngines);
4405 if (d->sw_rings_base != NULL)
4406 rte_free(d->sw_rings_base);
4407 usleep(ACC100_LONG_WAIT);
4410 /* Initial configuration of a ACC100 device prior to running configure() */
4412 rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
4414 rte_bbdev_log(INFO, "rte_acc100_configure");
4415 uint32_t value, address, status;
4416 int qg_idx, template_idx, vf_idx, acc, i;
4417 struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
4419 /* Compile time checks */
4420 RTE_BUILD_BUG_ON(sizeof(struct acc100_dma_req_desc) != 256);
4421 RTE_BUILD_BUG_ON(sizeof(union acc100_dma_desc) != 256);
4422 RTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_td) != 24);
4423 RTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_te) != 32);
4425 if (bbdev == NULL) {
4427 "Invalid dev_name (%s), or device is not yet initialised",
4431 struct acc100_device *d = bbdev->data->dev_private;
4433 /* Store configuration */
4434 rte_memcpy(&d->acc100_conf, conf, sizeof(d->acc100_conf));
4436 /* PCIe Bridge configuration */
4437 acc100_reg_write(d, HwPfPcieGpexBridgeControl, ACC100_CFG_PCI_BRIDGE);
4438 for (i = 1; i < ACC100_GPEX_AXIMAP_NUM; i++)
4440 HwPfPcieGpexAxiAddrMappingWindowPexBaseHigh
4443 /* Prevent blocking AXI read on BRESP for AXI Write */
4444 address = HwPfPcieGpexAxiPioControl;
4445 value = ACC100_CFG_PCI_AXI;
4446 acc100_reg_write(d, address, value);
4448 /* 5GDL PLL phase shift */
4449 acc100_reg_write(d, HWPfChaDl5gPllPhshft0, 0x1);
4451 /* Explicitly releasing AXI as this may be stopped after PF FLR/BME */
4452 address = HWPfDmaAxiControl;
4454 acc100_reg_write(d, address, value);
4456 /* DDR Configuration */
4457 address = HWPfDdrBcTim6;
4458 value = acc100_reg_read(d, address);
4459 value &= 0xFFFFFFFB; /* Bit 2 */
4460 #ifdef ACC100_DDR_ECC_ENABLE
4463 acc100_reg_write(d, address, value);
4464 address = HWPfDdrPhyDqsCountNum;
4465 #ifdef ACC100_DDR_ECC_ENABLE
4470 acc100_reg_write(d, address, value);
4472 /* Set default descriptor signature */
4473 address = HWPfDmaDescriptorSignatuture;
4475 acc100_reg_write(d, address, value);
4477 /* Enable the Error Detection in DMA */
4478 value = ACC100_CFG_DMA_ERROR;
4479 address = HWPfDmaErrorDetectionEn;
4480 acc100_reg_write(d, address, value);
4482 /* AXI Cache configuration */
4483 value = ACC100_CFG_AXI_CACHE;
4484 address = HWPfDmaAxcacheReg;
4485 acc100_reg_write(d, address, value);
4487 /* Default DMA Configuration (Qmgr Enabled) */
4488 address = HWPfDmaConfig0Reg;
4490 acc100_reg_write(d, address, value);
4491 address = HWPfDmaQmanen;
4493 acc100_reg_write(d, address, value);
4495 /* Default RLIM/ALEN configuration */
4496 address = HWPfDmaConfig1Reg;
4497 value = (1 << 31) + (23 << 8) + (1 << 6) + 7;
4498 acc100_reg_write(d, address, value);
4500 /* Configure DMA Qmanager addresses */
4501 address = HWPfDmaQmgrAddrReg;
4502 value = HWPfQmgrEgressQueuesTemplate;
4503 acc100_reg_write(d, address, value);
4505 /* ===== Qmgr Configuration ===== */
4506 /* Configuration of the AQueue Depth QMGR_GRP_0_DEPTH_LOG2 for UL */
4507 int totalQgs = conf->q_ul_4g.num_qgroups +
4508 conf->q_ul_5g.num_qgroups +
4509 conf->q_dl_4g.num_qgroups +
4510 conf->q_dl_5g.num_qgroups;
4511 for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4512 address = HWPfQmgrDepthLog2Grp +
4513 ACC100_BYTES_IN_WORD * qg_idx;
4514 value = aqDepth(qg_idx, conf);
4515 acc100_reg_write(d, address, value);
4516 address = HWPfQmgrTholdGrp +
4517 ACC100_BYTES_IN_WORD * qg_idx;
4518 value = (1 << 16) + (1 << (aqDepth(qg_idx, conf) - 1));
4519 acc100_reg_write(d, address, value);
4522 /* Template Priority in incremental order */
4523 for (template_idx = 0; template_idx < ACC100_NUM_TMPL;
4525 address = HWPfQmgrGrpTmplateReg0Indx +
4526 ACC100_BYTES_IN_WORD * (template_idx % 8);
4527 value = ACC100_TMPL_PRI_0;
4528 acc100_reg_write(d, address, value);
4529 address = HWPfQmgrGrpTmplateReg1Indx +
4530 ACC100_BYTES_IN_WORD * (template_idx % 8);
4531 value = ACC100_TMPL_PRI_1;
4532 acc100_reg_write(d, address, value);
4533 address = HWPfQmgrGrpTmplateReg2indx +
4534 ACC100_BYTES_IN_WORD * (template_idx % 8);
4535 value = ACC100_TMPL_PRI_2;
4536 acc100_reg_write(d, address, value);
4537 address = HWPfQmgrGrpTmplateReg3Indx +
4538 ACC100_BYTES_IN_WORD * (template_idx % 8);
4539 value = ACC100_TMPL_PRI_3;
4540 acc100_reg_write(d, address, value);
4543 address = HWPfQmgrGrpPriority;
4544 value = ACC100_CFG_QMGR_HI_P;
4545 acc100_reg_write(d, address, value);
4547 /* Template Configuration */
4548 for (template_idx = 0; template_idx < ACC100_NUM_TMPL;
4551 address = HWPfQmgrGrpTmplateReg4Indx
4552 + ACC100_BYTES_IN_WORD * template_idx;
4553 acc100_reg_write(d, address, value);
4556 int numQgs = conf->q_ul_4g.num_qgroups;
4559 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4560 value |= (1 << qg_idx);
4561 for (template_idx = ACC100_SIG_UL_4G;
4562 template_idx <= ACC100_SIG_UL_4G_LAST;
4564 address = HWPfQmgrGrpTmplateReg4Indx
4565 + ACC100_BYTES_IN_WORD * template_idx;
4566 acc100_reg_write(d, address, value);
4569 numQqsAcc += numQgs;
4570 numQgs = conf->q_ul_5g.num_qgroups;
4573 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4574 value |= (1 << qg_idx);
4575 for (template_idx = ACC100_SIG_UL_5G;
4576 template_idx <= ACC100_SIG_UL_5G_LAST;
4578 /* Check engine power-on status */
4579 address = HwPfFecUl5gIbDebugReg +
4580 ACC100_ENGINE_OFFSET * template_idx;
4581 status = (acc100_reg_read(d, address) >> 4) & 0xF;
4582 address = HWPfQmgrGrpTmplateReg4Indx
4583 + ACC100_BYTES_IN_WORD * template_idx;
4585 acc100_reg_write(d, address, value);
4588 acc100_reg_write(d, address, 0);
4589 #if RTE_ACC100_SINGLE_FEC == 1
4593 printf("Number of 5GUL engines %d\n", numEngines);
4595 numQqsAcc += numQgs;
4596 numQgs = conf->q_dl_4g.num_qgroups;
4598 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4599 value |= (1 << qg_idx);
4600 for (template_idx = ACC100_SIG_DL_4G;
4601 template_idx <= ACC100_SIG_DL_4G_LAST;
4603 address = HWPfQmgrGrpTmplateReg4Indx
4604 + ACC100_BYTES_IN_WORD * template_idx;
4605 acc100_reg_write(d, address, value);
4606 #if RTE_ACC100_SINGLE_FEC == 1
4611 numQqsAcc += numQgs;
4612 numQgs = conf->q_dl_5g.num_qgroups;
4614 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4615 value |= (1 << qg_idx);
4616 for (template_idx = ACC100_SIG_DL_5G;
4617 template_idx <= ACC100_SIG_DL_5G_LAST;
4619 address = HWPfQmgrGrpTmplateReg4Indx
4620 + ACC100_BYTES_IN_WORD * template_idx;
4621 acc100_reg_write(d, address, value);
4622 #if RTE_ACC100_SINGLE_FEC == 1
4627 /* Queue Group Function mapping */
4628 int qman_func_id[5] = {0, 2, 1, 3, 4};
4629 address = HWPfQmgrGrpFunction0;
4631 for (qg_idx = 0; qg_idx < 8; qg_idx++) {
4632 acc = accFromQgid(qg_idx, conf);
4633 value |= qman_func_id[acc]<<(qg_idx * 4);
4635 acc100_reg_write(d, address, value);
4637 /* Configuration of the Arbitration QGroup depth to 1 */
4638 for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4639 address = HWPfQmgrArbQDepthGrp +
4640 ACC100_BYTES_IN_WORD * qg_idx;
4642 acc100_reg_write(d, address, value);
4645 /* Enabling AQueues through the Queue hierarchy*/
4646 for (vf_idx = 0; vf_idx < ACC100_NUM_VFS; vf_idx++) {
4647 for (qg_idx = 0; qg_idx < ACC100_NUM_QGRPS; qg_idx++) {
4649 if (vf_idx < conf->num_vf_bundles &&
4651 value = (1 << aqNum(qg_idx, conf)) - 1;
4652 address = HWPfQmgrAqEnableVf
4653 + vf_idx * ACC100_BYTES_IN_WORD;
4654 value += (qg_idx << 16);
4655 acc100_reg_write(d, address, value);
4659 /* This pointer to ARAM (256kB) is shifted by 2 (4B per register) */
4660 uint32_t aram_address = 0;
4661 for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4662 for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {
4663 address = HWPfQmgrVfBaseAddr + vf_idx
4664 * ACC100_BYTES_IN_WORD + qg_idx
4665 * ACC100_BYTES_IN_WORD * 64;
4666 value = aram_address;
4667 acc100_reg_write(d, address, value);
4668 /* Offset ARAM Address for next memory bank
4671 aram_address += aqNum(qg_idx, conf) *
4672 (1 << aqDepth(qg_idx, conf));
4676 if (aram_address > ACC100_WORDS_IN_ARAM_SIZE) {
4677 rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d\n",
4678 aram_address, ACC100_WORDS_IN_ARAM_SIZE);
4682 /* ==== HI Configuration ==== */
4684 /* Prevent Block on Transmit Error */
4685 address = HWPfHiBlockTransmitOnErrorEn;
4687 acc100_reg_write(d, address, value);
4688 /* Prevents to drop MSI */
4689 address = HWPfHiMsiDropEnableReg;
4691 acc100_reg_write(d, address, value);
4692 /* Set the PF Mode register */
4693 address = HWPfHiPfMode;
4694 value = (conf->pf_mode_en) ? ACC100_PF_VAL : 0;
4695 acc100_reg_write(d, address, value);
4696 /* Enable Error Detection in HW */
4697 address = HWPfDmaErrorDetectionEn;
4699 acc100_reg_write(d, address, value);
4701 /* QoS overflow init */
4703 address = HWPfQosmonAEvalOverflow0;
4704 acc100_reg_write(d, address, value);
4705 address = HWPfQosmonBEvalOverflow0;
4706 acc100_reg_write(d, address, value);
4708 /* HARQ DDR Configuration */
4709 unsigned int ddrSizeInMb = 512; /* Fixed to 512 MB per VF for now */
4710 for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {
4711 address = HWPfDmaVfDdrBaseRw + vf_idx
4713 value = ((vf_idx * (ddrSizeInMb / 64)) << 16) +
4715 acc100_reg_write(d, address, value);
4717 usleep(ACC100_LONG_WAIT);
4719 /* Workaround in case some 5GUL engines are in an unexpected state */
4720 if (numEngines < (ACC100_SIG_UL_5G_LAST + 1))
4721 poweron_cleanup(bbdev, d, conf);
4723 rte_bbdev_log_debug("PF Tip configuration complete for %s", dev_name);