1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
7 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_byteorder.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_hexdump.h>
17 #include <rte_bus_pci.h>
18 #ifdef RTE_BBDEV_OFFLOAD_COST
19 #include <rte_cycles.h>
22 #include <rte_bbdev.h>
23 #include <rte_bbdev_pmd.h>
24 #include "rte_acc100_pmd.h"
26 #ifdef RTE_LIBRTE_BBDEV_DEBUG
27 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, DEBUG);
29 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, NOTICE);
32 /* Write to MMIO register address */
34 mmio_write(void *addr, uint32_t value)
36 *((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
39 /* Write a register of a ACC100 device */
41 acc100_reg_write(struct acc100_device *d, uint32_t offset, uint32_t value)
43 void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
44 mmio_write(reg_addr, value);
45 usleep(ACC100_LONG_WAIT);
48 /* Read a register of a ACC100 device */
49 static inline uint32_t
50 acc100_reg_read(struct acc100_device *d, uint32_t offset)
53 void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
54 uint32_t ret = *((volatile uint32_t *)(reg_addr));
55 return rte_le_to_cpu_32(ret);
58 /* Basic Implementation of Log2 for exact 2^N */
59 static inline uint32_t
60 log2_basic(uint32_t value)
62 return (value == 0) ? 0 : rte_bsf32(value);
65 /* Calculate memory alignment offset assuming alignment is 2^N */
66 static inline uint32_t
67 calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)
69 rte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);
70 return (uint32_t)(alignment -
71 (unaligned_phy_mem & (alignment-1)));
74 /* Calculate the offset of the enqueue register */
75 static inline uint32_t
76 queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)
79 return ((vf_id << 12) + (qgrp_id << 7) + (aq_id << 3) +
82 return ((qgrp_id << 7) + (aq_id << 3) +
86 enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, NUM_ACC};
88 /* Return the accelerator enum for a Queue Group Index */
90 accFromQgid(int qg_idx, const struct rte_acc100_conf *acc100_conf)
92 int accQg[ACC100_NUM_QGRPS];
93 int NumQGroupsPerFn[NUM_ACC];
94 int acc, qgIdx, qgIndex = 0;
95 for (qgIdx = 0; qgIdx < ACC100_NUM_QGRPS; qgIdx++)
97 NumQGroupsPerFn[UL_4G] = acc100_conf->q_ul_4g.num_qgroups;
98 NumQGroupsPerFn[UL_5G] = acc100_conf->q_ul_5g.num_qgroups;
99 NumQGroupsPerFn[DL_4G] = acc100_conf->q_dl_4g.num_qgroups;
100 NumQGroupsPerFn[DL_5G] = acc100_conf->q_dl_5g.num_qgroups;
101 for (acc = UL_4G; acc < NUM_ACC; acc++)
102 for (qgIdx = 0; qgIdx < NumQGroupsPerFn[acc]; qgIdx++)
103 accQg[qgIndex++] = acc;
108 /* Return the queue topology for a Queue Group Index */
110 qtopFromAcc(struct rte_acc100_queue_topology **qtop, int acc_enum,
111 struct rte_acc100_conf *acc100_conf)
113 struct rte_acc100_queue_topology *p_qtop;
117 p_qtop = &(acc100_conf->q_ul_4g);
120 p_qtop = &(acc100_conf->q_ul_5g);
123 p_qtop = &(acc100_conf->q_dl_4g);
126 p_qtop = &(acc100_conf->q_dl_5g);
130 rte_bbdev_log(ERR, "Unexpected error evaluating qtopFromAcc");
136 /* Return the AQ depth for a Queue Group Index */
138 aqDepth(int qg_idx, struct rte_acc100_conf *acc100_conf)
140 struct rte_acc100_queue_topology *q_top = NULL;
141 int acc_enum = accFromQgid(qg_idx, acc100_conf);
142 qtopFromAcc(&q_top, acc_enum, acc100_conf);
143 if (unlikely(q_top == NULL))
145 return q_top->aq_depth_log2;
148 /* Return the AQ depth for a Queue Group Index */
150 aqNum(int qg_idx, struct rte_acc100_conf *acc100_conf)
152 struct rte_acc100_queue_topology *q_top = NULL;
153 int acc_enum = accFromQgid(qg_idx, acc100_conf);
154 qtopFromAcc(&q_top, acc_enum, acc100_conf);
155 if (unlikely(q_top == NULL))
157 return q_top->num_aqs_per_groups;
161 initQTop(struct rte_acc100_conf *acc100_conf)
163 acc100_conf->q_ul_4g.num_aqs_per_groups = 0;
164 acc100_conf->q_ul_4g.num_qgroups = 0;
165 acc100_conf->q_ul_4g.first_qgroup_index = -1;
166 acc100_conf->q_ul_5g.num_aqs_per_groups = 0;
167 acc100_conf->q_ul_5g.num_qgroups = 0;
168 acc100_conf->q_ul_5g.first_qgroup_index = -1;
169 acc100_conf->q_dl_4g.num_aqs_per_groups = 0;
170 acc100_conf->q_dl_4g.num_qgroups = 0;
171 acc100_conf->q_dl_4g.first_qgroup_index = -1;
172 acc100_conf->q_dl_5g.num_aqs_per_groups = 0;
173 acc100_conf->q_dl_5g.num_qgroups = 0;
174 acc100_conf->q_dl_5g.first_qgroup_index = -1;
178 updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,
179 struct acc100_device *d) {
181 struct rte_acc100_queue_topology *q_top = NULL;
182 qtopFromAcc(&q_top, acc, acc100_conf);
183 if (unlikely(q_top == NULL))
186 q_top->num_qgroups++;
187 if (q_top->first_qgroup_index == -1) {
188 q_top->first_qgroup_index = qg;
189 /* Can be optimized to assume all are enabled by default */
190 reg = acc100_reg_read(d, queue_offset(d->pf_device,
191 0, qg, ACC100_NUM_AQS - 1));
192 if (reg & ACC100_QUEUE_ENABLE) {
193 q_top->num_aqs_per_groups = ACC100_NUM_AQS;
196 q_top->num_aqs_per_groups = 0;
197 for (aq = 0; aq < ACC100_NUM_AQS; aq++) {
198 reg = acc100_reg_read(d, queue_offset(d->pf_device,
200 if (reg & ACC100_QUEUE_ENABLE)
201 q_top->num_aqs_per_groups++;
206 /* Fetch configuration enabled for the PF/VF using MMIO Read (slow) */
208 fetch_acc100_config(struct rte_bbdev *dev)
210 struct acc100_device *d = dev->data->dev_private;
211 struct rte_acc100_conf *acc100_conf = &d->acc100_conf;
212 const struct acc100_registry_addr *reg_addr;
214 uint32_t reg, reg_aq, reg_len0, reg_len1;
217 /* No need to retrieve the configuration is already done */
221 /* Choose correct registry addresses for the device type */
223 reg_addr = &pf_reg_addr;
225 reg_addr = &vf_reg_addr;
227 d->ddr_size = (1 + acc100_reg_read(d, reg_addr->ddr_range)) << 10;
229 /* Single VF Bundle by VF */
230 acc100_conf->num_vf_bundles = 1;
231 initQTop(acc100_conf);
233 struct rte_acc100_queue_topology *q_top = NULL;
234 int qman_func_id[ACC100_NUM_ACCS] = {ACC100_ACCMAP_0, ACC100_ACCMAP_1,
235 ACC100_ACCMAP_2, ACC100_ACCMAP_3, ACC100_ACCMAP_4};
236 reg = acc100_reg_read(d, reg_addr->qman_group_func);
237 for (qg = 0; qg < ACC100_NUM_QGRPS_PER_WORD; qg++) {
238 reg_aq = acc100_reg_read(d,
239 queue_offset(d->pf_device, 0, qg, 0));
240 if (reg_aq & ACC100_QUEUE_ENABLE) {
241 uint32_t idx = (reg >> (qg * 4)) & 0x7;
242 if (idx < ACC100_NUM_ACCS) {
243 acc = qman_func_id[idx];
244 updateQtop(acc, qg, acc100_conf, d);
249 /* Check the depth of the AQs*/
250 reg_len0 = acc100_reg_read(d, reg_addr->depth_log0_offset);
251 reg_len1 = acc100_reg_read(d, reg_addr->depth_log1_offset);
252 for (acc = 0; acc < NUM_ACC; acc++) {
253 qtopFromAcc(&q_top, acc, acc100_conf);
254 if (q_top->first_qgroup_index < ACC100_NUM_QGRPS_PER_WORD)
255 q_top->aq_depth_log2 = (reg_len0 >>
256 (q_top->first_qgroup_index * 4))
259 q_top->aq_depth_log2 = (reg_len1 >>
260 ((q_top->first_qgroup_index -
261 ACC100_NUM_QGRPS_PER_WORD) * 4))
267 reg_mode = acc100_reg_read(d, HWPfHiPfMode);
268 acc100_conf->pf_mode_en = (reg_mode == ACC100_PF_VAL) ? 1 : 0;
272 "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u\n",
273 (d->pf_device) ? "PF" : "VF",
274 (acc100_conf->input_pos_llr_1_bit) ? "POS" : "NEG",
275 (acc100_conf->output_pos_llr_1_bit) ? "POS" : "NEG",
276 acc100_conf->q_ul_4g.num_qgroups,
277 acc100_conf->q_dl_4g.num_qgroups,
278 acc100_conf->q_ul_5g.num_qgroups,
279 acc100_conf->q_dl_5g.num_qgroups,
280 acc100_conf->q_ul_4g.num_aqs_per_groups,
281 acc100_conf->q_dl_4g.num_aqs_per_groups,
282 acc100_conf->q_ul_5g.num_aqs_per_groups,
283 acc100_conf->q_dl_5g.num_aqs_per_groups,
284 acc100_conf->q_ul_4g.aq_depth_log2,
285 acc100_conf->q_dl_4g.aq_depth_log2,
286 acc100_conf->q_ul_5g.aq_depth_log2,
287 acc100_conf->q_dl_5g.aq_depth_log2);
291 free_base_addresses(void **base_addrs, int size)
294 for (i = 0; i < size; i++)
295 rte_free(base_addrs[i]);
298 static inline uint32_t
301 return sizeof(union acc100_dma_desc);
304 /* Allocate the 2 * 64MB block for the sw rings */
306 alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc100_device *d,
309 uint32_t sw_ring_size = ACC100_SIZE_64MBYTE;
310 d->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,
311 2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);
312 if (d->sw_rings_base == NULL) {
313 rte_bbdev_log(ERR, "Failed to allocate memory for %s:%u",
314 dev->device->driver->name,
318 uint32_t next_64mb_align_offset = calc_mem_alignment_offset(
319 d->sw_rings_base, ACC100_SIZE_64MBYTE);
320 d->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);
321 d->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +
322 next_64mb_align_offset;
323 d->sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
324 d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
329 /* Attempt to allocate minimised memory space for sw rings */
331 alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc100_device *d,
332 uint16_t num_queues, int socket)
334 rte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;
335 uint32_t next_64mb_align_offset;
336 rte_iova_t sw_ring_iova_end_addr;
337 void *base_addrs[ACC100_SW_RING_MEM_ALLOC_ATTEMPTS];
340 uint32_t q_sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
341 uint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;
343 /* Find an aligned block of memory to store sw rings */
344 while (i < ACC100_SW_RING_MEM_ALLOC_ATTEMPTS) {
346 * sw_ring allocated memory is guaranteed to be aligned to
347 * q_sw_ring_size at the condition that the requested size is
348 * less than the page size
350 sw_rings_base = rte_zmalloc_socket(
351 dev->device->driver->name,
352 dev_sw_ring_size, q_sw_ring_size, socket);
354 if (sw_rings_base == NULL) {
356 "Failed to allocate memory for %s:%u",
357 dev->device->driver->name,
362 sw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);
363 next_64mb_align_offset = calc_mem_alignment_offset(
364 sw_rings_base, ACC100_SIZE_64MBYTE);
365 next_64mb_align_addr_iova = sw_rings_base_iova +
366 next_64mb_align_offset;
367 sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;
369 /* Check if the end of the sw ring memory block is before the
370 * start of next 64MB aligned mem address
372 if (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {
373 d->sw_rings_iova = sw_rings_base_iova;
374 d->sw_rings = sw_rings_base;
375 d->sw_rings_base = sw_rings_base;
376 d->sw_ring_size = q_sw_ring_size;
377 d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
380 /* Store the address of the unaligned mem block */
381 base_addrs[i] = sw_rings_base;
385 /* Free all unaligned blocks of mem allocated in the loop */
386 free_base_addresses(base_addrs, i);
390 * Find queue_id of a device queue based on details from the Info Ring.
391 * If a queue isn't found UINT16_MAX is returned.
393 static inline uint16_t
394 get_queue_id_from_ring_info(struct rte_bbdev_data *data,
395 const union acc100_info_ring_data ring_data)
399 for (queue_id = 0; queue_id < data->num_queues; ++queue_id) {
400 struct acc100_queue *acc100_q =
401 data->queues[queue_id].queue_private;
402 if (acc100_q != NULL && acc100_q->aq_id == ring_data.aq_id &&
403 acc100_q->qgrp_id == ring_data.qg_id &&
404 acc100_q->vf_id == ring_data.vf_id)
411 /* Checks PF Info Ring to find the interrupt cause and handles it accordingly */
413 acc100_check_ir(struct acc100_device *acc100_dev)
415 volatile union acc100_info_ring_data *ring_data;
416 uint16_t info_ring_head = acc100_dev->info_ring_head;
417 if (acc100_dev->info_ring == NULL)
420 ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
421 ACC100_INFO_RING_MASK);
423 while (ring_data->valid) {
424 if ((ring_data->int_nb < ACC100_PF_INT_DMA_DL_DESC_IRQ) || (
426 ACC100_PF_INT_DMA_DL5G_DESC_IRQ))
427 rte_bbdev_log(WARNING, "InfoRing: ITR:%d Info:0x%x",
428 ring_data->int_nb, ring_data->detailed_info);
429 /* Initialize Info Ring entry and move forward */
432 ring_data = acc100_dev->info_ring +
433 (info_ring_head & ACC100_INFO_RING_MASK);
437 /* Checks PF Info Ring to find the interrupt cause and handles it accordingly */
439 acc100_pf_interrupt_handler(struct rte_bbdev *dev)
441 struct acc100_device *acc100_dev = dev->data->dev_private;
442 volatile union acc100_info_ring_data *ring_data;
443 struct acc100_deq_intr_details deq_intr_det;
445 ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
446 ACC100_INFO_RING_MASK);
448 while (ring_data->valid) {
451 "ACC100 PF Interrupt received, Info Ring data: 0x%x",
454 switch (ring_data->int_nb) {
455 case ACC100_PF_INT_DMA_DL_DESC_IRQ:
456 case ACC100_PF_INT_DMA_UL_DESC_IRQ:
457 case ACC100_PF_INT_DMA_UL5G_DESC_IRQ:
458 case ACC100_PF_INT_DMA_DL5G_DESC_IRQ:
459 deq_intr_det.queue_id = get_queue_id_from_ring_info(
460 dev->data, *ring_data);
461 if (deq_intr_det.queue_id == UINT16_MAX) {
463 "Couldn't find queue: aq_id: %u, qg_id: %u, vf_id: %u",
469 rte_bbdev_pmd_callback_process(dev,
470 RTE_BBDEV_EVENT_DEQUEUE, &deq_intr_det);
473 rte_bbdev_pmd_callback_process(dev,
474 RTE_BBDEV_EVENT_ERROR, NULL);
478 /* Initialize Info Ring entry and move forward */
480 ++acc100_dev->info_ring_head;
481 ring_data = acc100_dev->info_ring +
482 (acc100_dev->info_ring_head &
483 ACC100_INFO_RING_MASK);
487 /* Checks VF Info Ring to find the interrupt cause and handles it accordingly */
489 acc100_vf_interrupt_handler(struct rte_bbdev *dev)
491 struct acc100_device *acc100_dev = dev->data->dev_private;
492 volatile union acc100_info_ring_data *ring_data;
493 struct acc100_deq_intr_details deq_intr_det;
495 ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &
496 ACC100_INFO_RING_MASK);
498 while (ring_data->valid) {
501 "ACC100 VF Interrupt received, Info Ring data: 0x%x",
504 switch (ring_data->int_nb) {
505 case ACC100_VF_INT_DMA_DL_DESC_IRQ:
506 case ACC100_VF_INT_DMA_UL_DESC_IRQ:
507 case ACC100_VF_INT_DMA_UL5G_DESC_IRQ:
508 case ACC100_VF_INT_DMA_DL5G_DESC_IRQ:
509 /* VFs are not aware of their vf_id - it's set to 0 in
512 ring_data->vf_id = 0;
513 deq_intr_det.queue_id = get_queue_id_from_ring_info(
514 dev->data, *ring_data);
515 if (deq_intr_det.queue_id == UINT16_MAX) {
517 "Couldn't find queue: aq_id: %u, qg_id: %u",
522 rte_bbdev_pmd_callback_process(dev,
523 RTE_BBDEV_EVENT_DEQUEUE, &deq_intr_det);
526 rte_bbdev_pmd_callback_process(dev,
527 RTE_BBDEV_EVENT_ERROR, NULL);
531 /* Initialize Info Ring entry and move forward */
532 ring_data->valid = 0;
533 ++acc100_dev->info_ring_head;
534 ring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head
535 & ACC100_INFO_RING_MASK);
539 /* Interrupt handler triggered by ACC100 dev for handling specific interrupt */
541 acc100_dev_interrupt_handler(void *cb_arg)
543 struct rte_bbdev *dev = cb_arg;
544 struct acc100_device *acc100_dev = dev->data->dev_private;
547 if (acc100_dev->pf_device)
548 acc100_pf_interrupt_handler(dev);
550 acc100_vf_interrupt_handler(dev);
553 /* Allocate and setup inforing */
555 allocate_info_ring(struct rte_bbdev *dev)
557 struct acc100_device *d = dev->data->dev_private;
558 const struct acc100_registry_addr *reg_addr;
559 rte_iova_t info_ring_iova;
560 uint32_t phys_low, phys_high;
562 if (d->info_ring != NULL)
563 return 0; /* Already configured */
565 /* Choose correct registry addresses for the device type */
567 reg_addr = &pf_reg_addr;
569 reg_addr = &vf_reg_addr;
570 /* Allocate InfoRing */
571 d->info_ring = rte_zmalloc_socket("Info Ring",
572 ACC100_INFO_RING_NUM_ENTRIES *
573 sizeof(*d->info_ring), RTE_CACHE_LINE_SIZE,
574 dev->data->socket_id);
575 if (d->info_ring == NULL) {
577 "Failed to allocate Info Ring for %s:%u",
578 dev->device->driver->name,
582 info_ring_iova = rte_malloc_virt2iova(d->info_ring);
584 /* Setup Info Ring */
585 phys_high = (uint32_t)(info_ring_iova >> 32);
586 phys_low = (uint32_t)(info_ring_iova);
587 acc100_reg_write(d, reg_addr->info_ring_hi, phys_high);
588 acc100_reg_write(d, reg_addr->info_ring_lo, phys_low);
589 acc100_reg_write(d, reg_addr->info_ring_en, ACC100_REG_IRQ_EN_ALL);
590 d->info_ring_head = (acc100_reg_read(d, reg_addr->info_ring_ptr) &
591 0xFFF) / sizeof(union acc100_info_ring_data);
596 /* Allocate 64MB memory used for all software rings */
598 acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
600 uint32_t phys_low, phys_high, value;
601 struct acc100_device *d = dev->data->dev_private;
602 const struct acc100_registry_addr *reg_addr;
605 if (d->pf_device && !d->acc100_conf.pf_mode_en) {
606 rte_bbdev_log(NOTICE,
607 "%s has PF mode disabled. This PF can't be used.",
612 alloc_sw_rings_min_mem(dev, d, num_queues, socket_id);
614 /* If minimal memory space approach failed, then allocate
615 * the 2 * 64MB block for the sw rings
617 if (d->sw_rings == NULL)
618 alloc_2x64mb_sw_rings_mem(dev, d, socket_id);
620 if (d->sw_rings == NULL) {
621 rte_bbdev_log(NOTICE,
622 "Failure allocating sw_rings memory");
626 /* Configure ACC100 with the base address for DMA descriptor rings
627 * Same descriptor rings used for UL and DL DMA Engines
628 * Note : Assuming only VF0 bundle is used for PF mode
630 phys_high = (uint32_t)(d->sw_rings_iova >> 32);
631 phys_low = (uint32_t)(d->sw_rings_iova & ~(ACC100_SIZE_64MBYTE-1));
633 /* Choose correct registry addresses for the device type */
635 reg_addr = &pf_reg_addr;
637 reg_addr = &vf_reg_addr;
639 /* Read the populated cfg from ACC100 registers */
640 fetch_acc100_config(dev);
642 /* Release AXI from PF */
644 acc100_reg_write(d, HWPfDmaAxiControl, 1);
646 acc100_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);
647 acc100_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);
648 acc100_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);
649 acc100_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);
650 acc100_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);
651 acc100_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);
652 acc100_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);
653 acc100_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);
656 * Configure Ring Size to the max queue ring size
657 * (used for wrapping purpose)
659 value = log2_basic(d->sw_ring_size / 64);
660 acc100_reg_write(d, reg_addr->ring_size, value);
662 /* Configure tail pointer for use when SDONE enabled */
663 d->tail_ptrs = rte_zmalloc_socket(
664 dev->device->driver->name,
665 ACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t),
666 RTE_CACHE_LINE_SIZE, socket_id);
667 if (d->tail_ptrs == NULL) {
668 rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u",
669 dev->device->driver->name,
671 rte_free(d->sw_rings);
674 d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs);
676 phys_high = (uint32_t)(d->tail_ptr_iova >> 32);
677 phys_low = (uint32_t)(d->tail_ptr_iova);
678 acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);
679 acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);
680 acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);
681 acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);
682 acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);
683 acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);
684 acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);
685 acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);
687 ret = allocate_info_ring(dev);
689 rte_bbdev_log(ERR, "Failed to allocate info_ring for %s:%u",
690 dev->device->driver->name,
695 d->harq_layout = rte_zmalloc_socket("HARQ Layout",
696 ACC100_HARQ_LAYOUT * sizeof(*d->harq_layout),
697 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
698 if (d->harq_layout == NULL) {
699 rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u",
700 dev->device->driver->name,
702 rte_free(d->sw_rings);
706 /* Mark as configured properly */
707 d->configured = true;
710 "ACC100 (%s) configured sw_rings = %p, sw_rings_iova = %#"
711 PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova);
717 acc100_intr_enable(struct rte_bbdev *dev)
720 struct acc100_device *d = dev->data->dev_private;
722 /* Only MSI are currently supported */
723 if (dev->intr_handle->type == RTE_INTR_HANDLE_VFIO_MSI ||
724 dev->intr_handle->type == RTE_INTR_HANDLE_UIO) {
726 ret = allocate_info_ring(dev);
729 "Couldn't allocate info ring for device: %s",
734 ret = rte_intr_enable(dev->intr_handle);
737 "Couldn't enable interrupts for device: %s",
739 rte_free(d->info_ring);
742 ret = rte_intr_callback_register(dev->intr_handle,
743 acc100_dev_interrupt_handler, dev);
746 "Couldn't register interrupt callback for device: %s",
748 rte_free(d->info_ring);
755 rte_bbdev_log(ERR, "ACC100 (%s) supports only VFIO MSI interrupts",
760 /* Free memory used for software rings */
762 acc100_dev_close(struct rte_bbdev *dev)
764 struct acc100_device *d = dev->data->dev_private;
766 if (d->sw_rings_base != NULL) {
767 rte_free(d->tail_ptrs);
768 rte_free(d->info_ring);
769 rte_free(d->sw_rings_base);
770 d->sw_rings_base = NULL;
772 /* Ensure all in flight HW transactions are completed */
773 usleep(ACC100_LONG_WAIT);
778 * Report a ACC100 queue index which is free
779 * Return 0 to 16k for a valid queue_idx or -1 when no queue is available
780 * Note : Only supporting VF0 Bundle for PF mode
783 acc100_find_free_queue_idx(struct rte_bbdev *dev,
784 const struct rte_bbdev_queue_conf *conf)
786 struct acc100_device *d = dev->data->dev_private;
787 int op_2_acc[5] = {0, UL_4G, DL_4G, UL_5G, DL_5G};
788 int acc = op_2_acc[conf->op_type];
789 struct rte_acc100_queue_topology *qtop = NULL;
791 qtopFromAcc(&qtop, acc, &(d->acc100_conf));
794 /* Identify matching QGroup Index which are sorted in priority order */
795 uint16_t group_idx = qtop->first_qgroup_index;
796 group_idx += conf->priority;
797 if (group_idx >= ACC100_NUM_QGRPS ||
798 conf->priority >= qtop->num_qgroups) {
799 rte_bbdev_log(INFO, "Invalid Priority on %s, priority %u",
800 dev->data->name, conf->priority);
803 /* Find a free AQ_idx */
805 for (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {
806 if (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {
807 /* Mark the Queue as assigned */
808 d->q_assigned_bit_map[group_idx] |= (1 << aq_idx);
809 /* Report the AQ Index */
810 return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx;
813 rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u",
814 dev->data->name, conf->priority);
818 /* Setup ACC100 queue */
820 acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
821 const struct rte_bbdev_queue_conf *conf)
823 struct acc100_device *d = dev->data->dev_private;
824 struct acc100_queue *q;
827 /* Allocate the queue data structure. */
828 q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
829 RTE_CACHE_LINE_SIZE, conf->socket);
831 rte_bbdev_log(ERR, "Failed to allocate queue memory");
835 rte_bbdev_log(ERR, "Undefined device");
840 q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
841 q->ring_addr_iova = d->sw_rings_iova + (d->sw_ring_size * queue_id);
843 /* Prepare the Ring with default descriptor format */
844 union acc100_dma_desc *desc = NULL;
845 unsigned int desc_idx, b_idx;
846 int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
847 ACC100_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?
848 ACC100_FCW_TD_BLEN : ACC100_FCW_LD_BLEN));
850 for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
851 desc = q->ring_addr + desc_idx;
852 desc->req.word0 = ACC100_DMA_DESC_TYPE;
853 desc->req.word1 = 0; /**< Timestamp */
856 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
857 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
858 desc->req.data_ptrs[0].blen = fcw_len;
859 desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
860 desc->req.data_ptrs[0].last = 0;
861 desc->req.data_ptrs[0].dma_ext = 0;
862 for (b_idx = 1; b_idx < ACC100_DMA_MAX_NUM_POINTERS - 1;
864 desc->req.data_ptrs[b_idx].blkid = ACC100_DMA_BLKID_IN;
865 desc->req.data_ptrs[b_idx].last = 1;
866 desc->req.data_ptrs[b_idx].dma_ext = 0;
868 desc->req.data_ptrs[b_idx].blkid =
869 ACC100_DMA_BLKID_OUT_ENC;
870 desc->req.data_ptrs[b_idx].last = 1;
871 desc->req.data_ptrs[b_idx].dma_ext = 0;
873 /* Preset some fields of LDPC FCW */
874 desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
875 desc->req.fcw_ld.gain_i = 1;
876 desc->req.fcw_ld.gain_h = 1;
879 q->lb_in = rte_zmalloc_socket(dev->device->driver->name,
881 RTE_CACHE_LINE_SIZE, conf->socket);
882 if (q->lb_in == NULL) {
883 rte_bbdev_log(ERR, "Failed to allocate lb_in memory");
887 q->lb_in_addr_iova = rte_malloc_virt2iova(q->lb_in);
888 q->lb_out = rte_zmalloc_socket(dev->device->driver->name,
890 RTE_CACHE_LINE_SIZE, conf->socket);
891 if (q->lb_out == NULL) {
892 rte_bbdev_log(ERR, "Failed to allocate lb_out memory");
897 q->lb_out_addr_iova = rte_malloc_virt2iova(q->lb_out);
900 * Software queue ring wraps synchronously with the HW when it reaches
901 * the boundary of the maximum allocated queue size, no matter what the
902 * sw queue size is. This wrapping is guarded by setting the wrap_mask
903 * to represent the maximum queue size as allocated at the time when
904 * the device has been setup (in configure()).
906 * The queue depth is set to the queue size value (conf->queue_size).
907 * This limits the occupancy of the queue at any point of time, so that
908 * the queue does not get swamped with enqueue requests.
910 q->sw_ring_depth = conf->queue_size;
911 q->sw_ring_wrap_mask = d->sw_ring_max_depth - 1;
913 q->op_type = conf->op_type;
915 q_idx = acc100_find_free_queue_idx(dev, conf);
923 q->qgrp_id = (q_idx >> ACC100_GRP_ID_SHIFT) & 0xF;
924 q->vf_id = (q_idx >> ACC100_VF_ID_SHIFT) & 0x3F;
925 q->aq_id = q_idx & 0xF;
926 q->aq_depth = (conf->op_type == RTE_BBDEV_OP_TURBO_DEC) ?
927 (1 << d->acc100_conf.q_ul_4g.aq_depth_log2) :
928 (1 << d->acc100_conf.q_dl_4g.aq_depth_log2);
930 q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,
931 queue_offset(d->pf_device,
932 q->vf_id, q->qgrp_id, q->aq_id));
935 "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p",
936 dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id,
937 q->aq_id, q->aq_depth, q->mmio_reg_enqueue);
939 dev->data->queues[queue_id].queue_private = q;
943 /* Release ACC100 queue */
945 acc100_queue_release(struct rte_bbdev *dev, uint16_t q_id)
947 struct acc100_device *d = dev->data->dev_private;
948 struct acc100_queue *q = dev->data->queues[q_id].queue_private;
951 /* Mark the Queue as un-assigned */
952 d->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -
957 dev->data->queues[q_id].queue_private = NULL;
963 /* Get ACC100 device info */
965 acc100_dev_info_get(struct rte_bbdev *dev,
966 struct rte_bbdev_driver_info *dev_info)
968 struct acc100_device *d = dev->data->dev_private;
970 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
972 .type = RTE_BBDEV_OP_TURBO_DEC,
975 RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
976 RTE_BBDEV_TURBO_CRC_TYPE_24B |
977 RTE_BBDEV_TURBO_HALF_ITERATION_EVEN |
978 RTE_BBDEV_TURBO_EARLY_TERMINATION |
979 RTE_BBDEV_TURBO_DEC_INTERRUPTS |
980 RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
981 RTE_BBDEV_TURBO_MAP_DEC |
982 RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
983 RTE_BBDEV_TURBO_DEC_SCATTER_GATHER,
984 .max_llr_modulus = INT8_MAX,
986 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
987 .num_buffers_hard_out =
988 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
989 .num_buffers_soft_out =
990 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
994 .type = RTE_BBDEV_OP_TURBO_ENC,
997 RTE_BBDEV_TURBO_CRC_24B_ATTACH |
998 RTE_BBDEV_TURBO_RV_INDEX_BYPASS |
999 RTE_BBDEV_TURBO_RATE_MATCH |
1000 RTE_BBDEV_TURBO_ENC_INTERRUPTS |
1001 RTE_BBDEV_TURBO_ENC_SCATTER_GATHER,
1003 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
1005 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
1009 .type = RTE_BBDEV_OP_LDPC_ENC,
1012 RTE_BBDEV_LDPC_RATE_MATCH |
1013 RTE_BBDEV_LDPC_CRC_24B_ATTACH |
1014 RTE_BBDEV_LDPC_INTERLEAVER_BYPASS |
1015 RTE_BBDEV_LDPC_ENC_INTERRUPTS,
1017 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1019 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1023 .type = RTE_BBDEV_OP_LDPC_DEC,
1026 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
1027 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
1028 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
1029 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
1030 #ifdef ACC100_EXT_MEM
1031 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
1032 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
1033 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
1035 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
1036 RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS |
1037 RTE_BBDEV_LDPC_DECODE_BYPASS |
1038 RTE_BBDEV_LDPC_DEC_SCATTER_GATHER |
1039 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION |
1040 RTE_BBDEV_LDPC_LLR_COMPRESSION |
1041 RTE_BBDEV_LDPC_DEC_INTERRUPTS,
1045 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1046 .num_buffers_hard_out =
1047 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
1048 .num_buffers_soft_out = 0,
1051 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
1054 static struct rte_bbdev_queue_conf default_queue_conf;
1055 default_queue_conf.socket = dev->data->socket_id;
1056 default_queue_conf.queue_size = ACC100_MAX_QUEUE_DEPTH;
1058 dev_info->driver_name = dev->device->driver->name;
1060 /* Read and save the populated config from ACC100 registers */
1061 fetch_acc100_config(dev);
1063 /* This isn't ideal because it reports the maximum number of queues but
1064 * does not provide info on how many can be uplink/downlink or different
1067 dev_info->max_num_queues =
1068 d->acc100_conf.q_dl_5g.num_aqs_per_groups *
1069 d->acc100_conf.q_dl_5g.num_qgroups +
1070 d->acc100_conf.q_ul_5g.num_aqs_per_groups *
1071 d->acc100_conf.q_ul_5g.num_qgroups +
1072 d->acc100_conf.q_dl_4g.num_aqs_per_groups *
1073 d->acc100_conf.q_dl_4g.num_qgroups +
1074 d->acc100_conf.q_ul_4g.num_aqs_per_groups *
1075 d->acc100_conf.q_ul_4g.num_qgroups;
1076 dev_info->queue_size_lim = ACC100_MAX_QUEUE_DEPTH;
1077 dev_info->hardware_accelerated = true;
1078 dev_info->max_dl_queue_priority =
1079 d->acc100_conf.q_dl_4g.num_qgroups - 1;
1080 dev_info->max_ul_queue_priority =
1081 d->acc100_conf.q_ul_4g.num_qgroups - 1;
1082 dev_info->default_queue_conf = default_queue_conf;
1083 dev_info->cpu_flag_reqs = NULL;
1084 dev_info->min_alignment = 64;
1085 dev_info->capabilities = bbdev_capabilities;
1086 #ifdef ACC100_EXT_MEM
1087 dev_info->harq_buffer_size = d->ddr_size;
1089 dev_info->harq_buffer_size = 0;
1095 acc100_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
1097 struct acc100_queue *q = dev->data->queues[queue_id].queue_private;
1099 if (dev->intr_handle->type != RTE_INTR_HANDLE_VFIO_MSI &&
1100 dev->intr_handle->type != RTE_INTR_HANDLE_UIO)
1108 acc100_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
1110 struct acc100_queue *q = dev->data->queues[queue_id].queue_private;
1112 if (dev->intr_handle->type != RTE_INTR_HANDLE_VFIO_MSI &&
1113 dev->intr_handle->type != RTE_INTR_HANDLE_UIO)
1120 static const struct rte_bbdev_ops acc100_bbdev_ops = {
1121 .setup_queues = acc100_setup_queues,
1122 .intr_enable = acc100_intr_enable,
1123 .close = acc100_dev_close,
1124 .info_get = acc100_dev_info_get,
1125 .queue_setup = acc100_queue_setup,
1126 .queue_release = acc100_queue_release,
1127 .queue_intr_enable = acc100_queue_intr_enable,
1128 .queue_intr_disable = acc100_queue_intr_disable
1131 /* ACC100 PCI PF address map */
1132 static struct rte_pci_id pci_id_acc100_pf_map[] = {
1134 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_PF_DEVICE_ID)
1139 /* ACC100 PCI VF address map */
1140 static struct rte_pci_id pci_id_acc100_vf_map[] = {
1142 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_VF_DEVICE_ID)
1147 /* Read flag value 0/1 from bitmap */
1149 check_bit(uint32_t bitmap, uint32_t bitmask)
1151 return bitmap & bitmask;
1154 static inline char *
1155 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
1157 if (unlikely(len > rte_pktmbuf_tailroom(m)))
1160 char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
1161 m->data_len = (uint16_t)(m->data_len + len);
1162 m_head->pkt_len = (m_head->pkt_len + len);
1166 /* Fill in a frame control word for turbo encoding. */
1168 acc100_fcw_te_fill(const struct rte_bbdev_enc_op *op, struct acc100_fcw_te *fcw)
1170 fcw->code_block_mode = op->turbo_enc.code_block_mode;
1171 if (fcw->code_block_mode == 0) { /* For TB mode */
1172 fcw->k_neg = op->turbo_enc.tb_params.k_neg;
1173 fcw->k_pos = op->turbo_enc.tb_params.k_pos;
1174 fcw->c_neg = op->turbo_enc.tb_params.c_neg;
1175 fcw->c = op->turbo_enc.tb_params.c;
1176 fcw->ncb_neg = op->turbo_enc.tb_params.ncb_neg;
1177 fcw->ncb_pos = op->turbo_enc.tb_params.ncb_pos;
1179 if (check_bit(op->turbo_enc.op_flags,
1180 RTE_BBDEV_TURBO_RATE_MATCH)) {
1182 fcw->cab = op->turbo_enc.tb_params.cab;
1183 fcw->ea = op->turbo_enc.tb_params.ea;
1184 fcw->eb = op->turbo_enc.tb_params.eb;
1186 /* E is set to the encoding output size when RM is
1190 fcw->cab = fcw->c_neg;
1191 fcw->ea = 3 * fcw->k_neg + 12;
1192 fcw->eb = 3 * fcw->k_pos + 12;
1194 } else { /* For CB mode */
1195 fcw->k_pos = op->turbo_enc.cb_params.k;
1196 fcw->ncb_pos = op->turbo_enc.cb_params.ncb;
1198 if (check_bit(op->turbo_enc.op_flags,
1199 RTE_BBDEV_TURBO_RATE_MATCH)) {
1201 fcw->eb = op->turbo_enc.cb_params.e;
1203 /* E is set to the encoding output size when RM is
1207 fcw->eb = 3 * fcw->k_pos + 12;
1211 fcw->bypass_rv_idx1 = check_bit(op->turbo_enc.op_flags,
1212 RTE_BBDEV_TURBO_RV_INDEX_BYPASS);
1213 fcw->code_block_crc = check_bit(op->turbo_enc.op_flags,
1214 RTE_BBDEV_TURBO_CRC_24B_ATTACH);
1215 fcw->rv_idx1 = op->turbo_enc.rv_index;
1218 /* Compute value of k0.
1219 * Based on 3GPP 38.212 Table 5.4.2.1-2
1220 * Starting position of different redundancy versions, k0
1222 static inline uint16_t
1223 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
1227 uint16_t n = (bg == 1 ? ACC100_N_ZC_1 : ACC100_N_ZC_2) * z_c;
1230 return (bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * z_c;
1231 else if (rv_index == 2)
1232 return (bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * z_c;
1234 return (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c;
1236 /* LBRM case - includes a division by N */
1238 return (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb)
1240 else if (rv_index == 2)
1241 return (((bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * n_cb)
1244 return (((bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * n_cb)
1248 /* Fill in a frame control word for LDPC encoding. */
1250 acc100_fcw_le_fill(const struct rte_bbdev_enc_op *op,
1251 struct acc100_fcw_le *fcw, int num_cb)
1253 fcw->qm = op->ldpc_enc.q_m;
1254 fcw->nfiller = op->ldpc_enc.n_filler;
1255 fcw->BG = (op->ldpc_enc.basegraph - 1);
1256 fcw->Zc = op->ldpc_enc.z_c;
1257 fcw->ncb = op->ldpc_enc.n_cb;
1258 fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,
1259 op->ldpc_enc.rv_index);
1260 fcw->rm_e = op->ldpc_enc.cb_params.e;
1261 fcw->crc_select = check_bit(op->ldpc_enc.op_flags,
1262 RTE_BBDEV_LDPC_CRC_24B_ATTACH);
1263 fcw->bypass_intlv = check_bit(op->ldpc_enc.op_flags,
1264 RTE_BBDEV_LDPC_INTERLEAVER_BYPASS);
1265 fcw->mcb_count = num_cb;
1268 /* Fill in a frame control word for turbo decoding. */
1270 acc100_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_td *fcw)
1272 /* Note : Early termination is always enabled for 4GUL */
1274 if (op->turbo_dec.code_block_mode == 0)
1275 fcw->k_pos = op->turbo_dec.tb_params.k_pos;
1277 fcw->k_pos = op->turbo_dec.cb_params.k;
1278 fcw->turbo_crc_type = check_bit(op->turbo_dec.op_flags,
1279 RTE_BBDEV_TURBO_CRC_TYPE_24B);
1280 fcw->bypass_sb_deint = 0;
1281 fcw->raw_decoder_input_on = 0;
1282 fcw->max_iter = op->turbo_dec.iter_max;
1283 fcw->half_iter_on = !check_bit(op->turbo_dec.op_flags,
1284 RTE_BBDEV_TURBO_HALF_ITERATION_EVEN);
1287 /* Fill in a frame control word for LDPC decoding. */
1289 acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,
1290 union acc100_harq_layout_data *harq_layout)
1292 uint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;
1293 uint16_t harq_index;
1295 bool harq_prun = false;
1297 fcw->qm = op->ldpc_dec.q_m;
1298 fcw->nfiller = op->ldpc_dec.n_filler;
1299 fcw->BG = (op->ldpc_dec.basegraph - 1);
1300 fcw->Zc = op->ldpc_dec.z_c;
1301 fcw->ncb = op->ldpc_dec.n_cb;
1302 fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_dec.basegraph,
1303 op->ldpc_dec.rv_index);
1304 if (op->ldpc_dec.code_block_mode == 1)
1305 fcw->rm_e = op->ldpc_dec.cb_params.e;
1307 fcw->rm_e = (op->ldpc_dec.tb_params.r <
1308 op->ldpc_dec.tb_params.cab) ?
1309 op->ldpc_dec.tb_params.ea :
1310 op->ldpc_dec.tb_params.eb;
1312 fcw->hcin_en = check_bit(op->ldpc_dec.op_flags,
1313 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
1314 fcw->hcout_en = check_bit(op->ldpc_dec.op_flags,
1315 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
1316 fcw->crc_select = check_bit(op->ldpc_dec.op_flags,
1317 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
1318 fcw->bypass_dec = check_bit(op->ldpc_dec.op_flags,
1319 RTE_BBDEV_LDPC_DECODE_BYPASS);
1320 fcw->bypass_intlv = check_bit(op->ldpc_dec.op_flags,
1321 RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS);
1322 if (op->ldpc_dec.q_m == 1) {
1323 fcw->bypass_intlv = 1;
1326 fcw->hcin_decomp_mode = check_bit(op->ldpc_dec.op_flags,
1327 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1328 fcw->hcout_comp_mode = check_bit(op->ldpc_dec.op_flags,
1329 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1330 fcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,
1331 RTE_BBDEV_LDPC_LLR_COMPRESSION);
1332 harq_index = op->ldpc_dec.harq_combined_output.offset /
1334 #ifdef ACC100_EXT_MEM
1335 /* Limit cases when HARQ pruning is valid */
1336 harq_prun = ((op->ldpc_dec.harq_combined_output.offset %
1337 ACC100_HARQ_OFFSET) == 0) &&
1338 (op->ldpc_dec.harq_combined_output.offset <= UINT16_MAX
1339 * ACC100_HARQ_OFFSET);
1341 if (fcw->hcin_en > 0) {
1342 harq_in_length = op->ldpc_dec.harq_combined_input.length;
1343 if (fcw->hcin_decomp_mode > 0)
1344 harq_in_length = harq_in_length * 8 / 6;
1345 harq_in_length = RTE_ALIGN(harq_in_length, 64);
1346 if ((harq_layout[harq_index].offset > 0) & harq_prun) {
1347 rte_bbdev_log_debug("HARQ IN offset unexpected for now\n");
1348 fcw->hcin_size0 = harq_layout[harq_index].size0;
1349 fcw->hcin_offset = harq_layout[harq_index].offset;
1350 fcw->hcin_size1 = harq_in_length -
1351 harq_layout[harq_index].offset;
1353 fcw->hcin_size0 = harq_in_length;
1354 fcw->hcin_offset = 0;
1355 fcw->hcin_size1 = 0;
1358 fcw->hcin_size0 = 0;
1359 fcw->hcin_offset = 0;
1360 fcw->hcin_size1 = 0;
1363 fcw->itmax = op->ldpc_dec.iter_max;
1364 fcw->itstop = check_bit(op->ldpc_dec.op_flags,
1365 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
1366 fcw->synd_precoder = fcw->itstop;
1368 * These are all implicitly set
1369 * fcw->synd_post = 0;
1371 * fcw->so_bypass_rm = 0;
1372 * fcw->so_bypass_intlv = 0;
1373 * fcw->dec_convllr = 0;
1374 * fcw->hcout_convllr = 0;
1375 * fcw->hcout_size1 = 0;
1377 * fcw->hcout_offset = 0;
1378 * fcw->negstop_th = 0;
1379 * fcw->negstop_it = 0;
1380 * fcw->negstop_en = 0;
1384 if (fcw->hcout_en > 0) {
1385 parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)
1386 * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
1387 k0_p = (fcw->k0 > parity_offset) ?
1388 fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;
1389 ncb_p = fcw->ncb - op->ldpc_dec.n_filler;
1390 l = k0_p + fcw->rm_e;
1391 harq_out_length = (uint16_t) fcw->hcin_size0;
1392 harq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);
1393 harq_out_length = (harq_out_length + 0x3F) & 0xFFC0;
1394 if ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) &&
1396 fcw->hcout_size0 = (uint16_t) fcw->hcin_size0;
1397 fcw->hcout_offset = k0_p & 0xFFC0;
1398 fcw->hcout_size1 = harq_out_length - fcw->hcout_offset;
1400 fcw->hcout_size0 = harq_out_length;
1401 fcw->hcout_size1 = 0;
1402 fcw->hcout_offset = 0;
1404 harq_layout[harq_index].offset = fcw->hcout_offset;
1405 harq_layout[harq_index].size0 = fcw->hcout_size0;
1407 fcw->hcout_size0 = 0;
1408 fcw->hcout_size1 = 0;
1409 fcw->hcout_offset = 0;
1414 * Fills descriptor with data pointers of one block type.
1417 * Pointer to DMA descriptor.
1419 * Pointer to pointer to input data which will be encoded. It can be changed
1420 * and points to next segment in scatter-gather case.
1422 * Input offset in rte_mbuf structure. It is used for calculating the point
1423 * where data is starting.
1425 * Length of currently processed Code Block
1426 * @param seg_total_left
1427 * It indicates how many bytes still left in segment (mbuf) for further
1430 * Store information about device capabilities
1431 * @param next_triplet
1432 * Index for ACC100 DMA Descriptor triplet
1435 * Returns index of next triplet on success, other value if lengths of
1436 * pkt and processed cb do not match.
1440 acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,
1441 struct rte_mbuf **input, uint32_t *offset, uint32_t cb_len,
1442 uint32_t *seg_total_left, int next_triplet)
1445 struct rte_mbuf *m = *input;
1447 part_len = (*seg_total_left < cb_len) ? *seg_total_left : cb_len;
1449 *seg_total_left -= part_len;
1451 desc->data_ptrs[next_triplet].address =
1452 rte_pktmbuf_iova_offset(m, *offset);
1453 desc->data_ptrs[next_triplet].blen = part_len;
1454 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
1455 desc->data_ptrs[next_triplet].last = 0;
1456 desc->data_ptrs[next_triplet].dma_ext = 0;
1457 *offset += part_len;
1460 while (cb_len > 0) {
1461 if (next_triplet < ACC100_DMA_MAX_NUM_POINTERS &&
1465 *seg_total_left = rte_pktmbuf_data_len(m);
1466 part_len = (*seg_total_left < cb_len) ?
1469 desc->data_ptrs[next_triplet].address =
1470 rte_pktmbuf_iova_offset(m, 0);
1471 desc->data_ptrs[next_triplet].blen = part_len;
1472 desc->data_ptrs[next_triplet].blkid =
1473 ACC100_DMA_BLKID_IN;
1474 desc->data_ptrs[next_triplet].last = 0;
1475 desc->data_ptrs[next_triplet].dma_ext = 0;
1477 *seg_total_left -= part_len;
1478 /* Initializing offset for next segment (mbuf) */
1483 "Some data still left for processing: "
1484 "data_left: %u, next_triplet: %u, next_mbuf: %p",
1485 cb_len, next_triplet, m->next);
1489 /* Storing new mbuf as it could be changed in scatter-gather case*/
1492 return next_triplet;
1495 /* Fills descriptor with data pointers of one block type.
1496 * Returns index of next triplet on success, other value if lengths of
1497 * output data and processed mbuf do not match.
1500 acc100_dma_fill_blk_type_out(struct acc100_dma_req_desc *desc,
1501 struct rte_mbuf *output, uint32_t out_offset,
1502 uint32_t output_len, int next_triplet, int blk_id)
1504 desc->data_ptrs[next_triplet].address =
1505 rte_pktmbuf_iova_offset(output, out_offset);
1506 desc->data_ptrs[next_triplet].blen = output_len;
1507 desc->data_ptrs[next_triplet].blkid = blk_id;
1508 desc->data_ptrs[next_triplet].last = 0;
1509 desc->data_ptrs[next_triplet].dma_ext = 0;
1512 return next_triplet;
1516 acc100_header_init(struct acc100_dma_req_desc *desc)
1518 desc->word0 = ACC100_DMA_DESC_TYPE;
1519 desc->word1 = 0; /**< Timestamp could be disabled */
1525 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1526 /* Check if any input data is unexpectedly left for processing */
1528 check_mbuf_total_left(uint32_t mbuf_total_left)
1530 if (mbuf_total_left == 0)
1533 "Some date still left for processing: mbuf_total_left = %u",
1540 acc100_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
1541 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1542 struct rte_mbuf *output, uint32_t *in_offset,
1543 uint32_t *out_offset, uint32_t *out_length,
1544 uint32_t *mbuf_total_left, uint32_t *seg_total_left, uint8_t r)
1546 int next_triplet = 1; /* FCW already done */
1547 uint32_t e, ea, eb, length;
1548 uint16_t k, k_neg, k_pos;
1551 desc->word0 = ACC100_DMA_DESC_TYPE;
1552 desc->word1 = 0; /**< Timestamp could be disabled */
1557 if (op->turbo_enc.code_block_mode == 0) {
1558 ea = op->turbo_enc.tb_params.ea;
1559 eb = op->turbo_enc.tb_params.eb;
1560 cab = op->turbo_enc.tb_params.cab;
1561 k_neg = op->turbo_enc.tb_params.k_neg;
1562 k_pos = op->turbo_enc.tb_params.k_pos;
1563 c_neg = op->turbo_enc.tb_params.c_neg;
1564 e = (r < cab) ? ea : eb;
1565 k = (r < c_neg) ? k_neg : k_pos;
1567 e = op->turbo_enc.cb_params.e;
1568 k = op->turbo_enc.cb_params.k;
1571 if (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
1572 length = (k - 24) >> 3;
1576 if (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < length))) {
1578 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1579 *mbuf_total_left, length);
1583 next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1584 length, seg_total_left, next_triplet);
1585 if (unlikely(next_triplet < 0)) {
1587 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1591 desc->data_ptrs[next_triplet - 1].last = 1;
1592 desc->m2dlen = next_triplet;
1593 *mbuf_total_left -= length;
1595 /* Set output length */
1596 if (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_RATE_MATCH))
1597 /* Integer round up division by 8 */
1598 *out_length = (e + 7) >> 3;
1600 *out_length = (k >> 3) * 3 + 2;
1602 next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1603 *out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1604 if (unlikely(next_triplet < 0)) {
1606 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1610 op->turbo_enc.output.length += *out_length;
1611 *out_offset += *out_length;
1612 desc->data_ptrs[next_triplet - 1].last = 1;
1613 desc->d2mlen = next_triplet - desc->m2dlen;
1621 acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,
1622 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1623 struct rte_mbuf *output, uint32_t *in_offset,
1624 uint32_t *out_offset, uint32_t *out_length,
1625 uint32_t *mbuf_total_left, uint32_t *seg_total_left)
1627 int next_triplet = 1; /* FCW already done */
1628 uint16_t K, in_length_in_bits, in_length_in_bytes;
1629 struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1631 acc100_header_init(desc);
1633 K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1634 in_length_in_bits = K - enc->n_filler;
1635 if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) ||
1636 (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH))
1637 in_length_in_bits -= 24;
1638 in_length_in_bytes = in_length_in_bits >> 3;
1640 if (unlikely((*mbuf_total_left == 0) ||
1641 (*mbuf_total_left < in_length_in_bytes))) {
1643 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1644 *mbuf_total_left, in_length_in_bytes);
1648 next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1650 seg_total_left, next_triplet);
1651 if (unlikely(next_triplet < 0)) {
1653 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1657 desc->data_ptrs[next_triplet - 1].last = 1;
1658 desc->m2dlen = next_triplet;
1659 *mbuf_total_left -= in_length_in_bytes;
1661 /* Set output length */
1662 /* Integer round up division by 8 */
1663 *out_length = (enc->cb_params.e + 7) >> 3;
1665 next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1666 *out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1667 op->ldpc_enc.output.length += *out_length;
1668 *out_offset += *out_length;
1669 desc->data_ptrs[next_triplet - 1].last = 1;
1670 desc->data_ptrs[next_triplet - 1].dma_ext = 0;
1671 desc->d2mlen = next_triplet - desc->m2dlen;
1679 acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,
1680 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1681 struct rte_mbuf *h_output, struct rte_mbuf *s_output,
1682 uint32_t *in_offset, uint32_t *h_out_offset,
1683 uint32_t *s_out_offset, uint32_t *h_out_length,
1684 uint32_t *s_out_length, uint32_t *mbuf_total_left,
1685 uint32_t *seg_total_left, uint8_t r)
1687 int next_triplet = 1; /* FCW already done */
1689 uint16_t crc24_overlap = 0;
1692 desc->word0 = ACC100_DMA_DESC_TYPE;
1693 desc->word1 = 0; /**< Timestamp could be disabled */
1698 if (op->turbo_dec.code_block_mode == 0) {
1699 k = (r < op->turbo_dec.tb_params.c_neg)
1700 ? op->turbo_dec.tb_params.k_neg
1701 : op->turbo_dec.tb_params.k_pos;
1702 e = (r < op->turbo_dec.tb_params.cab)
1703 ? op->turbo_dec.tb_params.ea
1704 : op->turbo_dec.tb_params.eb;
1706 k = op->turbo_dec.cb_params.k;
1707 e = op->turbo_dec.cb_params.e;
1710 if ((op->turbo_dec.code_block_mode == 0)
1711 && !check_bit(op->turbo_dec.op_flags,
1712 RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))
1715 /* Calculates circular buffer size.
1716 * According to 3gpp 36.212 section 5.1.4.2
1720 * where nCol is 32 and nRow can be calculated from:
1722 * where D is the size of each output from turbo encoder block (k + 4).
1724 kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
1726 if (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < kw))) {
1728 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1729 *mbuf_total_left, kw);
1733 next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset, kw,
1734 seg_total_left, next_triplet);
1735 if (unlikely(next_triplet < 0)) {
1737 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1741 desc->data_ptrs[next_triplet - 1].last = 1;
1742 desc->m2dlen = next_triplet;
1743 *mbuf_total_left -= kw;
1745 next_triplet = acc100_dma_fill_blk_type_out(
1746 desc, h_output, *h_out_offset,
1747 k >> 3, next_triplet, ACC100_DMA_BLKID_OUT_HARD);
1748 if (unlikely(next_triplet < 0)) {
1750 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1755 *h_out_length = ((k - crc24_overlap) >> 3);
1756 op->turbo_dec.hard_output.length += *h_out_length;
1757 *h_out_offset += *h_out_length;
1760 if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
1761 if (check_bit(op->turbo_dec.op_flags,
1762 RTE_BBDEV_TURBO_EQUALIZER))
1765 *s_out_length = (k * 3) + 12;
1767 next_triplet = acc100_dma_fill_blk_type_out(desc, s_output,
1768 *s_out_offset, *s_out_length, next_triplet,
1769 ACC100_DMA_BLKID_OUT_SOFT);
1770 if (unlikely(next_triplet < 0)) {
1772 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1777 op->turbo_dec.soft_output.length += *s_out_length;
1778 *s_out_offset += *s_out_length;
1781 desc->data_ptrs[next_triplet - 1].last = 1;
1782 desc->d2mlen = next_triplet - desc->m2dlen;
1790 acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
1791 struct acc100_dma_req_desc *desc,
1792 struct rte_mbuf **input, struct rte_mbuf *h_output,
1793 uint32_t *in_offset, uint32_t *h_out_offset,
1794 uint32_t *h_out_length, uint32_t *mbuf_total_left,
1795 uint32_t *seg_total_left,
1796 struct acc100_fcw_ld *fcw)
1798 struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1799 int next_triplet = 1; /* FCW already done */
1800 uint32_t input_length;
1801 uint16_t output_length, crc24_overlap = 0;
1802 uint16_t sys_cols, K, h_p_size, h_np_size;
1803 bool h_comp = check_bit(dec->op_flags,
1804 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1806 acc100_header_init(desc);
1808 if (check_bit(op->ldpc_dec.op_flags,
1809 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1812 /* Compute some LDPC BG lengths */
1813 input_length = dec->cb_params.e;
1814 if (check_bit(op->ldpc_dec.op_flags,
1815 RTE_BBDEV_LDPC_LLR_COMPRESSION))
1816 input_length = (input_length * 3 + 3) / 4;
1817 sys_cols = (dec->basegraph == 1) ? 22 : 10;
1818 K = sys_cols * dec->z_c;
1819 output_length = K - dec->n_filler - crc24_overlap;
1821 if (unlikely((*mbuf_total_left == 0) ||
1822 (*mbuf_total_left < input_length))) {
1824 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1825 *mbuf_total_left, input_length);
1829 next_triplet = acc100_dma_fill_blk_type_in(desc, input,
1830 in_offset, input_length,
1831 seg_total_left, next_triplet);
1833 if (unlikely(next_triplet < 0)) {
1835 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1840 if (check_bit(op->ldpc_dec.op_flags,
1841 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1842 h_p_size = fcw->hcin_size0 + fcw->hcin_size1;
1844 h_p_size = (h_p_size * 3 + 3) / 4;
1845 desc->data_ptrs[next_triplet].address =
1846 dec->harq_combined_input.offset;
1847 desc->data_ptrs[next_triplet].blen = h_p_size;
1848 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN_HARQ;
1849 desc->data_ptrs[next_triplet].dma_ext = 1;
1850 #ifndef ACC100_EXT_MEM
1851 acc100_dma_fill_blk_type_out(
1853 op->ldpc_dec.harq_combined_input.data,
1854 op->ldpc_dec.harq_combined_input.offset,
1857 ACC100_DMA_BLKID_IN_HARQ);
1862 desc->data_ptrs[next_triplet - 1].last = 1;
1863 desc->m2dlen = next_triplet;
1864 *mbuf_total_left -= input_length;
1866 next_triplet = acc100_dma_fill_blk_type_out(desc, h_output,
1867 *h_out_offset, output_length >> 3, next_triplet,
1868 ACC100_DMA_BLKID_OUT_HARD);
1870 if (check_bit(op->ldpc_dec.op_flags,
1871 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1872 /* Pruned size of the HARQ */
1873 h_p_size = fcw->hcout_size0 + fcw->hcout_size1;
1874 /* Non-Pruned size of the HARQ */
1875 h_np_size = fcw->hcout_offset > 0 ?
1876 fcw->hcout_offset + fcw->hcout_size1 :
1879 h_np_size = (h_np_size * 3 + 3) / 4;
1880 h_p_size = (h_p_size * 3 + 3) / 4;
1882 dec->harq_combined_output.length = h_np_size;
1883 desc->data_ptrs[next_triplet].address =
1884 dec->harq_combined_output.offset;
1885 desc->data_ptrs[next_triplet].blen = h_p_size;
1886 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARQ;
1887 desc->data_ptrs[next_triplet].dma_ext = 1;
1888 #ifndef ACC100_EXT_MEM
1889 acc100_dma_fill_blk_type_out(
1891 dec->harq_combined_output.data,
1892 dec->harq_combined_output.offset,
1895 ACC100_DMA_BLKID_OUT_HARQ);
1900 *h_out_length = output_length >> 3;
1901 dec->hard_output.length += *h_out_length;
1902 *h_out_offset += *h_out_length;
1903 desc->data_ptrs[next_triplet - 1].last = 1;
1904 desc->d2mlen = next_triplet - desc->m2dlen;
1912 acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,
1913 struct acc100_dma_req_desc *desc,
1914 struct rte_mbuf *input, struct rte_mbuf *h_output,
1915 uint32_t *in_offset, uint32_t *h_out_offset,
1916 uint32_t *h_out_length,
1917 union acc100_harq_layout_data *harq_layout)
1919 int next_triplet = 1; /* FCW already done */
1920 desc->data_ptrs[next_triplet].address =
1921 rte_pktmbuf_iova_offset(input, *in_offset);
1924 if (check_bit(op->ldpc_dec.op_flags,
1925 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1926 struct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;
1927 desc->data_ptrs[next_triplet].address = hi.offset;
1928 #ifndef ACC100_EXT_MEM
1929 desc->data_ptrs[next_triplet].address =
1930 rte_pktmbuf_iova_offset(hi.data, hi.offset);
1935 desc->data_ptrs[next_triplet].address =
1936 rte_pktmbuf_iova_offset(h_output, *h_out_offset);
1937 *h_out_length = desc->data_ptrs[next_triplet].blen;
1940 if (check_bit(op->ldpc_dec.op_flags,
1941 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1942 desc->data_ptrs[next_triplet].address =
1943 op->ldpc_dec.harq_combined_output.offset;
1944 /* Adjust based on previous operation */
1945 struct rte_bbdev_dec_op *prev_op = desc->op_addr;
1946 op->ldpc_dec.harq_combined_output.length =
1947 prev_op->ldpc_dec.harq_combined_output.length;
1948 int16_t hq_idx = op->ldpc_dec.harq_combined_output.offset /
1950 int16_t prev_hq_idx =
1951 prev_op->ldpc_dec.harq_combined_output.offset
1952 / ACC100_HARQ_OFFSET;
1953 harq_layout[hq_idx].val = harq_layout[prev_hq_idx].val;
1954 #ifndef ACC100_EXT_MEM
1955 struct rte_bbdev_op_data ho =
1956 op->ldpc_dec.harq_combined_output;
1957 desc->data_ptrs[next_triplet].address =
1958 rte_pktmbuf_iova_offset(ho.data, ho.offset);
1963 op->ldpc_dec.hard_output.length += *h_out_length;
1968 /* Enqueue a number of operations to HW and update software rings */
1970 acc100_dma_enqueue(struct acc100_queue *q, uint16_t n,
1971 struct rte_bbdev_stats *queue_stats)
1973 union acc100_enqueue_reg_fmt enq_req;
1974 #ifdef RTE_BBDEV_OFFLOAD_COST
1975 uint64_t start_time = 0;
1976 queue_stats->acc_offload_cycles = 0;
1978 RTE_SET_USED(queue_stats);
1982 /* Setting offset, 100b for 256 DMA Desc */
1983 enq_req.addr_offset = ACC100_DESC_OFFSET;
1985 /* Split ops into batches */
1987 union acc100_dma_desc *desc;
1988 uint16_t enq_batch_size;
1990 rte_iova_t req_elem_addr;
1992 enq_batch_size = RTE_MIN(n, MAX_ENQ_BATCH_SIZE);
1994 /* Set flag on last descriptor in a batch */
1995 desc = q->ring_addr + ((q->sw_ring_head + enq_batch_size - 1) &
1996 q->sw_ring_wrap_mask);
1997 desc->req.last_desc_in_batch = 1;
1999 /* Calculate the 1st descriptor's address */
2000 offset = ((q->sw_ring_head & q->sw_ring_wrap_mask) *
2001 sizeof(union acc100_dma_desc));
2002 req_elem_addr = q->ring_addr_iova + offset;
2004 /* Fill enqueue struct */
2005 enq_req.num_elem = enq_batch_size;
2006 /* low 6 bits are not needed */
2007 enq_req.req_elem_addr = (uint32_t)(req_elem_addr >> 6);
2009 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2010 rte_memdump(stderr, "Req sdone", desc, sizeof(*desc));
2012 rte_bbdev_log_debug(
2013 "Enqueue %u reqs (phys %#"PRIx64") to reg %p",
2016 (void *)q->mmio_reg_enqueue);
2020 #ifdef RTE_BBDEV_OFFLOAD_COST
2021 /* Start time measurement for enqueue function offload. */
2022 start_time = rte_rdtsc_precise();
2024 rte_bbdev_log(DEBUG, "Debug : MMIO Enqueue");
2025 mmio_write(q->mmio_reg_enqueue, enq_req.val);
2027 #ifdef RTE_BBDEV_OFFLOAD_COST
2028 queue_stats->acc_offload_cycles +=
2029 rte_rdtsc_precise() - start_time;
2033 q->sw_ring_head += enq_batch_size;
2034 n -= enq_batch_size;
2041 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2042 /* Validates turbo encoder parameters */
2044 validate_enc_op(struct rte_bbdev_enc_op *op)
2046 struct rte_bbdev_op_turbo_enc *turbo_enc = &op->turbo_enc;
2047 struct rte_bbdev_op_enc_turbo_cb_params *cb = NULL;
2048 struct rte_bbdev_op_enc_turbo_tb_params *tb = NULL;
2049 uint16_t kw, kw_neg, kw_pos;
2051 if (op->mempool == NULL) {
2052 rte_bbdev_log(ERR, "Invalid mempool pointer");
2055 if (turbo_enc->input.data == NULL) {
2056 rte_bbdev_log(ERR, "Invalid input pointer");
2059 if (turbo_enc->output.data == NULL) {
2060 rte_bbdev_log(ERR, "Invalid output pointer");
2063 if (turbo_enc->rv_index > 3) {
2065 "rv_index (%u) is out of range 0 <= value <= 3",
2066 turbo_enc->rv_index);
2069 if (turbo_enc->code_block_mode != 0 &&
2070 turbo_enc->code_block_mode != 1) {
2072 "code_block_mode (%u) is out of range 0 <= value <= 1",
2073 turbo_enc->code_block_mode);
2077 if (turbo_enc->code_block_mode == 0) {
2078 tb = &turbo_enc->tb_params;
2079 if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE
2080 || tb->k_neg > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2083 "k_neg (%u) is out of range %u <= value <= %u",
2084 tb->k_neg, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2085 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2088 if (tb->k_pos < RTE_BBDEV_TURBO_MIN_CB_SIZE
2089 || tb->k_pos > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2091 "k_pos (%u) is out of range %u <= value <= %u",
2092 tb->k_pos, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2093 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2096 if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))
2098 "c_neg (%u) is out of range 0 <= value <= %u",
2100 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1);
2101 if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
2103 "c (%u) is out of range 1 <= value <= %u",
2104 tb->c, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
2107 if (tb->cab > tb->c) {
2109 "cab (%u) is greater than c (%u)",
2113 if ((tb->ea < RTE_BBDEV_TURBO_MIN_CB_SIZE || (tb->ea % 2))
2114 && tb->r < tb->cab) {
2116 "ea (%u) is less than %u or it is not even",
2117 tb->ea, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2120 if ((tb->eb < RTE_BBDEV_TURBO_MIN_CB_SIZE || (tb->eb % 2))
2121 && tb->c > tb->cab) {
2123 "eb (%u) is less than %u or it is not even",
2124 tb->eb, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2128 kw_neg = 3 * RTE_ALIGN_CEIL(tb->k_neg + 4,
2129 RTE_BBDEV_TURBO_C_SUBBLOCK);
2130 if (tb->ncb_neg < tb->k_neg || tb->ncb_neg > kw_neg) {
2132 "ncb_neg (%u) is out of range (%u) k_neg <= value <= (%u) kw_neg",
2133 tb->ncb_neg, tb->k_neg, kw_neg);
2137 kw_pos = 3 * RTE_ALIGN_CEIL(tb->k_pos + 4,
2138 RTE_BBDEV_TURBO_C_SUBBLOCK);
2139 if (tb->ncb_pos < tb->k_pos || tb->ncb_pos > kw_pos) {
2141 "ncb_pos (%u) is out of range (%u) k_pos <= value <= (%u) kw_pos",
2142 tb->ncb_pos, tb->k_pos, kw_pos);
2145 if (tb->r > (tb->c - 1)) {
2147 "r (%u) is greater than c - 1 (%u)",
2152 cb = &turbo_enc->cb_params;
2153 if (cb->k < RTE_BBDEV_TURBO_MIN_CB_SIZE
2154 || cb->k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2156 "k (%u) is out of range %u <= value <= %u",
2157 cb->k, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2158 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2162 if (cb->e < RTE_BBDEV_TURBO_MIN_CB_SIZE || (cb->e % 2)) {
2164 "e (%u) is less than %u or it is not even",
2165 cb->e, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2169 kw = RTE_ALIGN_CEIL(cb->k + 4, RTE_BBDEV_TURBO_C_SUBBLOCK) * 3;
2170 if (cb->ncb < cb->k || cb->ncb > kw) {
2172 "ncb (%u) is out of range (%u) k <= value <= (%u) kw",
2173 cb->ncb, cb->k, kw);
2180 /* Validates LDPC encoder parameters */
2182 validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
2184 struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
2186 if (op->mempool == NULL) {
2187 rte_bbdev_log(ERR, "Invalid mempool pointer");
2190 if (ldpc_enc->input.data == NULL) {
2191 rte_bbdev_log(ERR, "Invalid input pointer");
2194 if (ldpc_enc->output.data == NULL) {
2195 rte_bbdev_log(ERR, "Invalid output pointer");
2198 if (ldpc_enc->input.length >
2199 RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {
2200 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
2201 ldpc_enc->input.length,
2202 RTE_BBDEV_LDPC_MAX_CB_SIZE);
2205 if ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {
2207 "BG (%u) is out of range 1 <= value <= 2",
2208 ldpc_enc->basegraph);
2211 if (ldpc_enc->rv_index > 3) {
2213 "rv_index (%u) is out of range 0 <= value <= 3",
2214 ldpc_enc->rv_index);
2217 if (ldpc_enc->code_block_mode > 1) {
2219 "code_block_mode (%u) is out of range 0 <= value <= 1",
2220 ldpc_enc->code_block_mode);
2223 int K = (ldpc_enc->basegraph == 1 ? 22 : 10) * ldpc_enc->z_c;
2224 if (ldpc_enc->n_filler >= K) {
2226 "K and F are not compatible %u %u",
2227 K, ldpc_enc->n_filler);
2233 /* Validates LDPC decoder parameters */
2235 validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
2237 struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
2239 if (op->mempool == NULL) {
2240 rte_bbdev_log(ERR, "Invalid mempool pointer");
2243 if ((ldpc_dec->basegraph > 2) || (ldpc_dec->basegraph == 0)) {
2245 "BG (%u) is out of range 1 <= value <= 2",
2246 ldpc_dec->basegraph);
2249 if (ldpc_dec->iter_max == 0) {
2251 "iter_max (%u) is equal to 0",
2252 ldpc_dec->iter_max);
2255 if (ldpc_dec->rv_index > 3) {
2257 "rv_index (%u) is out of range 0 <= value <= 3",
2258 ldpc_dec->rv_index);
2261 if (ldpc_dec->code_block_mode > 1) {
2263 "code_block_mode (%u) is out of range 0 <= value <= 1",
2264 ldpc_dec->code_block_mode);
2267 int K = (ldpc_dec->basegraph == 1 ? 22 : 10) * ldpc_dec->z_c;
2268 if (ldpc_dec->n_filler >= K) {
2270 "K and F are not compatible %u %u",
2271 K, ldpc_dec->n_filler);
2278 /* Enqueue one encode operations for ACC100 device in CB mode */
2280 enqueue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2281 uint16_t total_enqueued_cbs)
2283 union acc100_dma_desc *desc = NULL;
2285 uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2287 struct rte_mbuf *input, *output_head, *output;
2289 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2290 /* Validate op structure */
2291 if (validate_enc_op(op) == -1) {
2292 rte_bbdev_log(ERR, "Turbo encoder validation failed");
2297 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2298 & q->sw_ring_wrap_mask);
2299 desc = q->ring_addr + desc_idx;
2300 acc100_fcw_te_fill(op, &desc->req.fcw_te);
2302 input = op->turbo_enc.input.data;
2303 output_head = output = op->turbo_enc.output.data;
2304 in_offset = op->turbo_enc.input.offset;
2305 out_offset = op->turbo_enc.output.offset;
2307 mbuf_total_left = op->turbo_enc.input.length;
2308 seg_total_left = rte_pktmbuf_data_len(op->turbo_enc.input.data)
2311 ret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,
2312 &in_offset, &out_offset, &out_length, &mbuf_total_left,
2313 &seg_total_left, 0);
2315 if (unlikely(ret < 0))
2318 mbuf_append(output_head, output, out_length);
2320 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2321 rte_memdump(stderr, "FCW", &desc->req.fcw_te,
2322 sizeof(desc->req.fcw_te) - 8);
2323 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2324 if (check_mbuf_total_left(mbuf_total_left) != 0)
2327 /* One CB (one op) was successfully prepared to enqueue */
2331 /* Enqueue one encode operations for ACC100 device in CB mode */
2333 enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,
2334 uint16_t total_enqueued_cbs, int16_t num)
2336 union acc100_dma_desc *desc = NULL;
2337 uint32_t out_length;
2338 struct rte_mbuf *output_head, *output;
2339 int i, next_triplet;
2340 uint16_t in_length_in_bytes;
2341 struct rte_bbdev_op_ldpc_enc *enc = &ops[0]->ldpc_enc;
2343 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2344 /* Validate op structure */
2345 if (validate_ldpc_enc_op(ops[0]) == -1) {
2346 rte_bbdev_log(ERR, "LDPC encoder validation failed");
2351 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2352 & q->sw_ring_wrap_mask);
2353 desc = q->ring_addr + desc_idx;
2354 acc100_fcw_le_fill(ops[0], &desc->req.fcw_le, num);
2356 /** This could be done at polling */
2357 acc100_header_init(&desc->req);
2358 desc->req.numCBs = num;
2360 in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;
2361 out_length = (enc->cb_params.e + 7) >> 3;
2362 desc->req.m2dlen = 1 + num;
2363 desc->req.d2mlen = num;
2366 for (i = 0; i < num; i++) {
2367 desc->req.data_ptrs[next_triplet].address =
2368 rte_pktmbuf_iova_offset(ops[i]->ldpc_enc.input.data, 0);
2369 desc->req.data_ptrs[next_triplet].blen = in_length_in_bytes;
2371 desc->req.data_ptrs[next_triplet].address =
2372 rte_pktmbuf_iova_offset(
2373 ops[i]->ldpc_enc.output.data, 0);
2374 desc->req.data_ptrs[next_triplet].blen = out_length;
2376 ops[i]->ldpc_enc.output.length = out_length;
2377 output_head = output = ops[i]->ldpc_enc.output.data;
2378 mbuf_append(output_head, output, out_length);
2379 output->data_len = out_length;
2382 desc->req.op_addr = ops[0];
2384 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2385 rte_memdump(stderr, "FCW", &desc->req.fcw_le,
2386 sizeof(desc->req.fcw_le) - 8);
2387 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2390 /* One CB (one op) was successfully prepared to enqueue */
2394 /* Enqueue one encode operations for ACC100 device in CB mode */
2396 enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2397 uint16_t total_enqueued_cbs)
2399 union acc100_dma_desc *desc = NULL;
2401 uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2403 struct rte_mbuf *input, *output_head, *output;
2405 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2406 /* Validate op structure */
2407 if (validate_ldpc_enc_op(op) == -1) {
2408 rte_bbdev_log(ERR, "LDPC encoder validation failed");
2413 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2414 & q->sw_ring_wrap_mask);
2415 desc = q->ring_addr + desc_idx;
2416 acc100_fcw_le_fill(op, &desc->req.fcw_le, 1);
2418 input = op->ldpc_enc.input.data;
2419 output_head = output = op->ldpc_enc.output.data;
2420 in_offset = op->ldpc_enc.input.offset;
2421 out_offset = op->ldpc_enc.output.offset;
2423 mbuf_total_left = op->ldpc_enc.input.length;
2424 seg_total_left = rte_pktmbuf_data_len(op->ldpc_enc.input.data)
2427 ret = acc100_dma_desc_le_fill(op, &desc->req, &input, output,
2428 &in_offset, &out_offset, &out_length, &mbuf_total_left,
2431 if (unlikely(ret < 0))
2434 mbuf_append(output_head, output, out_length);
2436 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2437 rte_memdump(stderr, "FCW", &desc->req.fcw_le,
2438 sizeof(desc->req.fcw_le) - 8);
2439 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2441 if (check_mbuf_total_left(mbuf_total_left) != 0)
2444 /* One CB (one op) was successfully prepared to enqueue */
2449 /* Enqueue one encode operations for ACC100 device in TB mode. */
2451 enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
2452 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
2454 union acc100_dma_desc *desc = NULL;
2457 uint32_t in_offset, out_offset, out_length, mbuf_total_left,
2459 struct rte_mbuf *input, *output_head, *output;
2460 uint16_t current_enqueued_cbs = 0;
2462 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2463 /* Validate op structure */
2464 if (validate_enc_op(op) == -1) {
2465 rte_bbdev_log(ERR, "Turbo encoder validation failed");
2470 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2471 & q->sw_ring_wrap_mask);
2472 desc = q->ring_addr + desc_idx;
2473 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
2474 acc100_fcw_te_fill(op, &desc->req.fcw_te);
2476 input = op->turbo_enc.input.data;
2477 output_head = output = op->turbo_enc.output.data;
2478 in_offset = op->turbo_enc.input.offset;
2479 out_offset = op->turbo_enc.output.offset;
2481 mbuf_total_left = op->turbo_enc.input.length;
2483 c = op->turbo_enc.tb_params.c;
2484 r = op->turbo_enc.tb_params.r;
2486 while (mbuf_total_left > 0 && r < c) {
2487 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
2488 /* Set up DMA descriptor */
2489 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
2490 & q->sw_ring_wrap_mask);
2491 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
2492 desc->req.data_ptrs[0].blen = ACC100_FCW_TE_BLEN;
2494 ret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,
2495 &in_offset, &out_offset, &out_length,
2496 &mbuf_total_left, &seg_total_left, r);
2497 if (unlikely(ret < 0))
2499 mbuf_append(output_head, output, out_length);
2501 /* Set total number of CBs in TB */
2502 desc->req.cbs_in_tb = cbs_in_tb;
2503 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2504 rte_memdump(stderr, "FCW", &desc->req.fcw_te,
2505 sizeof(desc->req.fcw_te) - 8);
2506 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2509 if (seg_total_left == 0) {
2510 /* Go to the next mbuf */
2511 input = input->next;
2513 output = output->next;
2517 total_enqueued_cbs++;
2518 current_enqueued_cbs++;
2522 if (unlikely(desc == NULL))
2523 return current_enqueued_cbs;
2525 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2526 if (check_mbuf_total_left(mbuf_total_left) != 0)
2530 /* Set SDone on last CB descriptor for TB mode. */
2531 desc->req.sdone_enable = 1;
2532 desc->req.irq_enable = q->irq_enable;
2534 return current_enqueued_cbs;
2537 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2538 /* Validates turbo decoder parameters */
2540 validate_dec_op(struct rte_bbdev_dec_op *op)
2542 struct rte_bbdev_op_turbo_dec *turbo_dec = &op->turbo_dec;
2543 struct rte_bbdev_op_dec_turbo_cb_params *cb = NULL;
2544 struct rte_bbdev_op_dec_turbo_tb_params *tb = NULL;
2546 if (op->mempool == NULL) {
2547 rte_bbdev_log(ERR, "Invalid mempool pointer");
2550 if (turbo_dec->input.data == NULL) {
2551 rte_bbdev_log(ERR, "Invalid input pointer");
2554 if (turbo_dec->hard_output.data == NULL) {
2555 rte_bbdev_log(ERR, "Invalid hard_output pointer");
2558 if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
2559 turbo_dec->soft_output.data == NULL) {
2560 rte_bbdev_log(ERR, "Invalid soft_output pointer");
2563 if (turbo_dec->rv_index > 3) {
2565 "rv_index (%u) is out of range 0 <= value <= 3",
2566 turbo_dec->rv_index);
2569 if (turbo_dec->iter_min < 1) {
2571 "iter_min (%u) is less than 1",
2572 turbo_dec->iter_min);
2575 if (turbo_dec->iter_max <= 2) {
2577 "iter_max (%u) is less than or equal to 2",
2578 turbo_dec->iter_max);
2581 if (turbo_dec->iter_min > turbo_dec->iter_max) {
2583 "iter_min (%u) is greater than iter_max (%u)",
2584 turbo_dec->iter_min, turbo_dec->iter_max);
2587 if (turbo_dec->code_block_mode != 0 &&
2588 turbo_dec->code_block_mode != 1) {
2590 "code_block_mode (%u) is out of range 0 <= value <= 1",
2591 turbo_dec->code_block_mode);
2595 if (turbo_dec->code_block_mode == 0) {
2596 tb = &turbo_dec->tb_params;
2597 if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE
2598 || tb->k_neg > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2601 "k_neg (%u) is out of range %u <= value <= %u",
2602 tb->k_neg, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2603 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2606 if ((tb->k_pos < RTE_BBDEV_TURBO_MIN_CB_SIZE
2607 || tb->k_pos > RTE_BBDEV_TURBO_MAX_CB_SIZE)
2608 && tb->c > tb->c_neg) {
2610 "k_pos (%u) is out of range %u <= value <= %u",
2611 tb->k_pos, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2612 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2615 if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))
2617 "c_neg (%u) is out of range 0 <= value <= %u",
2619 RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1);
2620 if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
2622 "c (%u) is out of range 1 <= value <= %u",
2623 tb->c, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
2626 if (tb->cab > tb->c) {
2628 "cab (%u) is greater than c (%u)",
2632 if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2633 (tb->ea < RTE_BBDEV_TURBO_MIN_CB_SIZE
2637 "ea (%u) is less than %u or it is not even",
2638 tb->ea, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2641 if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2642 (tb->eb < RTE_BBDEV_TURBO_MIN_CB_SIZE
2644 && tb->c > tb->cab) {
2646 "eb (%u) is less than %u or it is not even",
2647 tb->eb, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2650 cb = &turbo_dec->cb_params;
2651 if (cb->k < RTE_BBDEV_TURBO_MIN_CB_SIZE
2652 || cb->k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
2654 "k (%u) is out of range %u <= value <= %u",
2655 cb->k, RTE_BBDEV_TURBO_MIN_CB_SIZE,
2656 RTE_BBDEV_TURBO_MAX_CB_SIZE);
2659 if (check_bit(turbo_dec->op_flags, RTE_BBDEV_TURBO_EQUALIZER) &&
2660 (cb->e < RTE_BBDEV_TURBO_MIN_CB_SIZE ||
2663 "e (%u) is less than %u or it is not even",
2664 cb->e, RTE_BBDEV_TURBO_MIN_CB_SIZE);
2673 /** Enqueue one decode operations for ACC100 device in CB mode */
2675 enqueue_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2676 uint16_t total_enqueued_cbs)
2678 union acc100_dma_desc *desc = NULL;
2680 uint32_t in_offset, h_out_offset, s_out_offset, s_out_length,
2681 h_out_length, mbuf_total_left, seg_total_left;
2682 struct rte_mbuf *input, *h_output_head, *h_output,
2683 *s_output_head, *s_output;
2685 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2686 /* Validate op structure */
2687 if (validate_dec_op(op) == -1) {
2688 rte_bbdev_log(ERR, "Turbo decoder validation failed");
2693 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2694 & q->sw_ring_wrap_mask);
2695 desc = q->ring_addr + desc_idx;
2696 acc100_fcw_td_fill(op, &desc->req.fcw_td);
2698 input = op->turbo_dec.input.data;
2699 h_output_head = h_output = op->turbo_dec.hard_output.data;
2700 s_output_head = s_output = op->turbo_dec.soft_output.data;
2701 in_offset = op->turbo_dec.input.offset;
2702 h_out_offset = op->turbo_dec.hard_output.offset;
2703 s_out_offset = op->turbo_dec.soft_output.offset;
2704 h_out_length = s_out_length = 0;
2705 mbuf_total_left = op->turbo_dec.input.length;
2706 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
2708 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2709 if (unlikely(input == NULL)) {
2710 rte_bbdev_log(ERR, "Invalid mbuf pointer");
2715 /* Set up DMA descriptor */
2716 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
2717 & q->sw_ring_wrap_mask);
2719 ret = acc100_dma_desc_td_fill(op, &desc->req, &input, h_output,
2720 s_output, &in_offset, &h_out_offset, &s_out_offset,
2721 &h_out_length, &s_out_length, &mbuf_total_left,
2722 &seg_total_left, 0);
2724 if (unlikely(ret < 0))
2728 mbuf_append(h_output_head, h_output, h_out_length);
2731 if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT))
2732 mbuf_append(s_output_head, s_output, s_out_length);
2734 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2735 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
2736 sizeof(desc->req.fcw_td) - 8);
2737 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2738 if (check_mbuf_total_left(mbuf_total_left) != 0)
2742 /* One CB (one op) was successfully prepared to enqueue */
2747 harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2748 uint16_t total_enqueued_cbs) {
2749 struct acc100_fcw_ld *fcw;
2750 union acc100_dma_desc *desc;
2751 int next_triplet = 1;
2752 struct rte_mbuf *hq_output_head, *hq_output;
2753 uint16_t harq_dma_length_in, harq_dma_length_out;
2754 uint16_t harq_in_length = op->ldpc_dec.harq_combined_input.length;
2755 if (harq_in_length == 0) {
2756 rte_bbdev_log(ERR, "Loopback of invalid null size\n");
2760 int h_comp = check_bit(op->ldpc_dec.op_flags,
2761 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION
2764 harq_in_length = harq_in_length * 8 / 6;
2765 harq_in_length = RTE_ALIGN(harq_in_length, 64);
2766 harq_dma_length_in = harq_in_length * 6 / 8;
2768 harq_in_length = RTE_ALIGN(harq_in_length, 64);
2769 harq_dma_length_in = harq_in_length;
2771 harq_dma_length_out = harq_dma_length_in;
2773 bool ddr_mem_in = check_bit(op->ldpc_dec.op_flags,
2774 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE);
2775 union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
2776 uint16_t harq_index = (ddr_mem_in ?
2777 op->ldpc_dec.harq_combined_input.offset :
2778 op->ldpc_dec.harq_combined_output.offset)
2779 / ACC100_HARQ_OFFSET;
2781 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2782 & q->sw_ring_wrap_mask);
2783 desc = q->ring_addr + desc_idx;
2784 fcw = &desc->req.fcw_ld;
2785 /* Set the FCW from loopback into DDR */
2786 memset(fcw, 0, sizeof(struct acc100_fcw_ld));
2787 fcw->FCWversion = ACC100_FCW_VER;
2790 if (harq_in_length < 16 * ACC100_N_ZC_1)
2792 fcw->ncb = fcw->Zc * ACC100_N_ZC_1;
2797 rte_bbdev_log(DEBUG, "Loopback IN %d Index %d offset %d length %d %d\n",
2798 ddr_mem_in, harq_index,
2799 harq_layout[harq_index].offset, harq_in_length,
2800 harq_dma_length_in);
2802 if (ddr_mem_in && (harq_layout[harq_index].offset > 0)) {
2803 fcw->hcin_size0 = harq_layout[harq_index].size0;
2804 fcw->hcin_offset = harq_layout[harq_index].offset;
2805 fcw->hcin_size1 = harq_in_length - fcw->hcin_offset;
2806 harq_dma_length_in = (fcw->hcin_size0 + fcw->hcin_size1);
2808 harq_dma_length_in = harq_dma_length_in * 6 / 8;
2810 fcw->hcin_size0 = harq_in_length;
2812 harq_layout[harq_index].val = 0;
2813 rte_bbdev_log(DEBUG, "Loopback FCW Config %d %d %d\n",
2814 fcw->hcin_size0, fcw->hcin_offset, fcw->hcin_size1);
2815 fcw->hcout_size0 = harq_in_length;
2816 fcw->hcin_decomp_mode = h_comp;
2817 fcw->hcout_comp_mode = h_comp;
2821 /* Set the prefix of descriptor. This could be done at polling */
2822 acc100_header_init(&desc->req);
2824 /* Null LLR input for Decoder */
2825 desc->req.data_ptrs[next_triplet].address =
2827 desc->req.data_ptrs[next_triplet].blen = 2;
2828 desc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
2829 desc->req.data_ptrs[next_triplet].last = 0;
2830 desc->req.data_ptrs[next_triplet].dma_ext = 0;
2833 /* HARQ Combine input from either Memory interface */
2835 next_triplet = acc100_dma_fill_blk_type_out(&desc->req,
2836 op->ldpc_dec.harq_combined_input.data,
2837 op->ldpc_dec.harq_combined_input.offset,
2840 ACC100_DMA_BLKID_IN_HARQ);
2842 desc->req.data_ptrs[next_triplet].address =
2843 op->ldpc_dec.harq_combined_input.offset;
2844 desc->req.data_ptrs[next_triplet].blen =
2846 desc->req.data_ptrs[next_triplet].blkid =
2847 ACC100_DMA_BLKID_IN_HARQ;
2848 desc->req.data_ptrs[next_triplet].dma_ext = 1;
2851 desc->req.data_ptrs[next_triplet - 1].last = 1;
2852 desc->req.m2dlen = next_triplet;
2854 /* Dropped decoder hard output */
2855 desc->req.data_ptrs[next_triplet].address =
2856 q->lb_out_addr_iova;
2857 desc->req.data_ptrs[next_triplet].blen = ACC100_BYTES_IN_WORD;
2858 desc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARD;
2859 desc->req.data_ptrs[next_triplet].last = 0;
2860 desc->req.data_ptrs[next_triplet].dma_ext = 0;
2863 /* HARQ Combine output to either Memory interface */
2864 if (check_bit(op->ldpc_dec.op_flags,
2865 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE
2867 desc->req.data_ptrs[next_triplet].address =
2868 op->ldpc_dec.harq_combined_output.offset;
2869 desc->req.data_ptrs[next_triplet].blen =
2870 harq_dma_length_out;
2871 desc->req.data_ptrs[next_triplet].blkid =
2872 ACC100_DMA_BLKID_OUT_HARQ;
2873 desc->req.data_ptrs[next_triplet].dma_ext = 1;
2876 hq_output_head = op->ldpc_dec.harq_combined_output.data;
2877 hq_output = op->ldpc_dec.harq_combined_output.data;
2878 next_triplet = acc100_dma_fill_blk_type_out(
2880 op->ldpc_dec.harq_combined_output.data,
2881 op->ldpc_dec.harq_combined_output.offset,
2882 harq_dma_length_out,
2884 ACC100_DMA_BLKID_OUT_HARQ);
2886 mbuf_append(hq_output_head, hq_output, harq_dma_length_out);
2887 op->ldpc_dec.harq_combined_output.length =
2888 harq_dma_length_out;
2890 desc->req.data_ptrs[next_triplet - 1].last = 1;
2891 desc->req.d2mlen = next_triplet - desc->req.m2dlen;
2892 desc->req.op_addr = op;
2894 /* One CB (one op) was successfully prepared to enqueue */
2898 /** Enqueue one decode operations for ACC100 device in CB mode */
2900 enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
2901 uint16_t total_enqueued_cbs, bool same_op)
2904 if (unlikely(check_bit(op->ldpc_dec.op_flags,
2905 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK))) {
2906 ret = harq_loopback(q, op, total_enqueued_cbs);
2910 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2911 /* Validate op structure */
2912 if (validate_ldpc_dec_op(op) == -1) {
2913 rte_bbdev_log(ERR, "LDPC decoder validation failed");
2917 union acc100_dma_desc *desc;
2918 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
2919 & q->sw_ring_wrap_mask);
2920 desc = q->ring_addr + desc_idx;
2921 struct rte_mbuf *input, *h_output_head, *h_output;
2922 uint32_t in_offset, h_out_offset, mbuf_total_left, h_out_length = 0;
2923 input = op->ldpc_dec.input.data;
2924 h_output_head = h_output = op->ldpc_dec.hard_output.data;
2925 in_offset = op->ldpc_dec.input.offset;
2926 h_out_offset = op->ldpc_dec.hard_output.offset;
2927 mbuf_total_left = op->ldpc_dec.input.length;
2928 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2929 if (unlikely(input == NULL)) {
2930 rte_bbdev_log(ERR, "Invalid mbuf pointer");
2934 union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
2937 union acc100_dma_desc *prev_desc;
2938 desc_idx = ((q->sw_ring_head + total_enqueued_cbs - 1)
2939 & q->sw_ring_wrap_mask);
2940 prev_desc = q->ring_addr + desc_idx;
2941 uint8_t *prev_ptr = (uint8_t *) prev_desc;
2942 uint8_t *new_ptr = (uint8_t *) desc;
2943 /* Copy first 4 words and BDESCs */
2944 rte_memcpy(new_ptr, prev_ptr, ACC100_5GUL_SIZE_0);
2945 rte_memcpy(new_ptr + ACC100_5GUL_OFFSET_0,
2946 prev_ptr + ACC100_5GUL_OFFSET_0,
2947 ACC100_5GUL_SIZE_1);
2948 desc->req.op_addr = prev_desc->req.op_addr;
2950 rte_memcpy(new_ptr + ACC100_DESC_FCW_OFFSET,
2951 prev_ptr + ACC100_DESC_FCW_OFFSET,
2952 ACC100_FCW_LD_BLEN);
2953 acc100_dma_desc_ld_update(op, &desc->req, input, h_output,
2954 &in_offset, &h_out_offset,
2955 &h_out_length, harq_layout);
2957 struct acc100_fcw_ld *fcw;
2958 uint32_t seg_total_left;
2959 fcw = &desc->req.fcw_ld;
2960 acc100_fcw_ld_fill(op, fcw, harq_layout);
2962 /* Special handling when overusing mbuf */
2963 if (fcw->rm_e < ACC100_MAX_E_MBUF)
2964 seg_total_left = rte_pktmbuf_data_len(input)
2967 seg_total_left = fcw->rm_e;
2969 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input, h_output,
2970 &in_offset, &h_out_offset,
2971 &h_out_length, &mbuf_total_left,
2972 &seg_total_left, fcw);
2973 if (unlikely(ret < 0))
2978 mbuf_append(h_output_head, h_output, h_out_length);
2979 #ifndef ACC100_EXT_MEM
2980 if (op->ldpc_dec.harq_combined_output.length > 0) {
2981 /* Push the HARQ output into host memory */
2982 struct rte_mbuf *hq_output_head, *hq_output;
2983 hq_output_head = op->ldpc_dec.harq_combined_output.data;
2984 hq_output = op->ldpc_dec.harq_combined_output.data;
2985 mbuf_append(hq_output_head, hq_output,
2986 op->ldpc_dec.harq_combined_output.length);
2990 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2991 rte_memdump(stderr, "FCW", &desc->req.fcw_ld,
2992 sizeof(desc->req.fcw_ld) - 8);
2993 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
2996 /* One CB (one op) was successfully prepared to enqueue */
3001 /* Enqueue one decode operations for ACC100 device in TB mode */
3003 enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
3004 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
3006 union acc100_dma_desc *desc = NULL;
3009 uint32_t in_offset, h_out_offset,
3010 h_out_length, mbuf_total_left, seg_total_left;
3011 struct rte_mbuf *input, *h_output_head, *h_output;
3012 uint16_t current_enqueued_cbs = 0;
3014 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3015 /* Validate op structure */
3016 if (validate_ldpc_dec_op(op) == -1) {
3017 rte_bbdev_log(ERR, "LDPC decoder validation failed");
3022 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
3023 & q->sw_ring_wrap_mask);
3024 desc = q->ring_addr + desc_idx;
3025 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
3026 union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
3027 acc100_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout);
3029 input = op->ldpc_dec.input.data;
3030 h_output_head = h_output = op->ldpc_dec.hard_output.data;
3031 in_offset = op->ldpc_dec.input.offset;
3032 h_out_offset = op->ldpc_dec.hard_output.offset;
3034 mbuf_total_left = op->ldpc_dec.input.length;
3035 c = op->ldpc_dec.tb_params.c;
3036 r = op->ldpc_dec.tb_params.r;
3038 while (mbuf_total_left > 0 && r < c) {
3040 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
3042 /* Set up DMA descriptor */
3043 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
3044 & q->sw_ring_wrap_mask);
3045 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
3046 desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
3047 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input,
3048 h_output, &in_offset, &h_out_offset,
3050 &mbuf_total_left, &seg_total_left,
3053 if (unlikely(ret < 0))
3057 mbuf_append(h_output_head, h_output, h_out_length);
3059 /* Set total number of CBs in TB */
3060 desc->req.cbs_in_tb = cbs_in_tb;
3061 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3062 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
3063 sizeof(desc->req.fcw_td) - 8);
3064 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
3067 if (seg_total_left == 0) {
3068 /* Go to the next mbuf */
3069 input = input->next;
3071 h_output = h_output->next;
3074 total_enqueued_cbs++;
3075 current_enqueued_cbs++;
3079 if (unlikely(desc == NULL))
3080 return current_enqueued_cbs;
3082 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3083 if (check_mbuf_total_left(mbuf_total_left) != 0)
3086 /* Set SDone on last CB descriptor for TB mode */
3087 desc->req.sdone_enable = 1;
3088 desc->req.irq_enable = q->irq_enable;
3090 return current_enqueued_cbs;
3093 /* Enqueue one decode operations for ACC100 device in TB mode */
3095 enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
3096 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
3098 union acc100_dma_desc *desc = NULL;
3101 uint32_t in_offset, h_out_offset, s_out_offset, s_out_length,
3102 h_out_length, mbuf_total_left, seg_total_left;
3103 struct rte_mbuf *input, *h_output_head, *h_output,
3104 *s_output_head, *s_output;
3105 uint16_t current_enqueued_cbs = 0;
3107 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3108 /* Validate op structure */
3109 if (validate_dec_op(op) == -1) {
3110 rte_bbdev_log(ERR, "Turbo decoder validation failed");
3115 uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
3116 & q->sw_ring_wrap_mask);
3117 desc = q->ring_addr + desc_idx;
3118 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
3119 acc100_fcw_td_fill(op, &desc->req.fcw_td);
3121 input = op->turbo_dec.input.data;
3122 h_output_head = h_output = op->turbo_dec.hard_output.data;
3123 s_output_head = s_output = op->turbo_dec.soft_output.data;
3124 in_offset = op->turbo_dec.input.offset;
3125 h_out_offset = op->turbo_dec.hard_output.offset;
3126 s_out_offset = op->turbo_dec.soft_output.offset;
3127 h_out_length = s_out_length = 0;
3128 mbuf_total_left = op->turbo_dec.input.length;
3129 c = op->turbo_dec.tb_params.c;
3130 r = op->turbo_dec.tb_params.r;
3132 while (mbuf_total_left > 0 && r < c) {
3134 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
3136 /* Set up DMA descriptor */
3137 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
3138 & q->sw_ring_wrap_mask);
3139 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
3140 desc->req.data_ptrs[0].blen = ACC100_FCW_TD_BLEN;
3141 ret = acc100_dma_desc_td_fill(op, &desc->req, &input,
3142 h_output, s_output, &in_offset, &h_out_offset,
3143 &s_out_offset, &h_out_length, &s_out_length,
3144 &mbuf_total_left, &seg_total_left, r);
3146 if (unlikely(ret < 0))
3150 mbuf_append(h_output_head, h_output, h_out_length);
3153 if (check_bit(op->turbo_dec.op_flags,
3154 RTE_BBDEV_TURBO_SOFT_OUTPUT))
3155 mbuf_append(s_output_head, s_output, s_out_length);
3157 /* Set total number of CBs in TB */
3158 desc->req.cbs_in_tb = cbs_in_tb;
3159 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3160 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
3161 sizeof(desc->req.fcw_td) - 8);
3162 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
3165 if (seg_total_left == 0) {
3166 /* Go to the next mbuf */
3167 input = input->next;
3169 h_output = h_output->next;
3172 if (check_bit(op->turbo_dec.op_flags,
3173 RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
3174 s_output = s_output->next;
3179 total_enqueued_cbs++;
3180 current_enqueued_cbs++;
3184 if (unlikely(desc == NULL))
3185 return current_enqueued_cbs;
3187 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3188 if (check_mbuf_total_left(mbuf_total_left) != 0)
3191 /* Set SDone on last CB descriptor for TB mode */
3192 desc->req.sdone_enable = 1;
3193 desc->req.irq_enable = q->irq_enable;
3195 return current_enqueued_cbs;
3198 /* Calculates number of CBs in processed encoder TB based on 'r' and input
3201 static inline uint8_t
3202 get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)
3204 uint8_t c, c_neg, r, crc24_bits = 0;
3205 uint16_t k, k_neg, k_pos;
3206 uint8_t cbs_in_tb = 0;
3209 length = turbo_enc->input.length;
3210 r = turbo_enc->tb_params.r;
3211 c = turbo_enc->tb_params.c;
3212 c_neg = turbo_enc->tb_params.c_neg;
3213 k_neg = turbo_enc->tb_params.k_neg;
3214 k_pos = turbo_enc->tb_params.k_pos;
3216 if (check_bit(turbo_enc->op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
3218 while (length > 0 && r < c) {
3219 k = (r < c_neg) ? k_neg : k_pos;
3220 length -= (k - crc24_bits) >> 3;
3228 /* Calculates number of CBs in processed decoder TB based on 'r' and input
3231 static inline uint16_t
3232 get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)
3234 uint8_t c, c_neg, r = 0;
3235 uint16_t kw, k, k_neg, k_pos, cbs_in_tb = 0;
3238 length = turbo_dec->input.length;
3239 r = turbo_dec->tb_params.r;
3240 c = turbo_dec->tb_params.c;
3241 c_neg = turbo_dec->tb_params.c_neg;
3242 k_neg = turbo_dec->tb_params.k_neg;
3243 k_pos = turbo_dec->tb_params.k_pos;
3244 while (length > 0 && r < c) {
3245 k = (r < c_neg) ? k_neg : k_pos;
3246 kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
3255 /* Calculates number of CBs in processed decoder TB based on 'r' and input
3258 static inline uint16_t
3259 get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)
3261 uint16_t r, cbs_in_tb = 0;
3262 int32_t length = ldpc_dec->input.length;
3263 r = ldpc_dec->tb_params.r;
3264 while (length > 0 && r < ldpc_dec->tb_params.c) {
3265 length -= (r < ldpc_dec->tb_params.cab) ?
3266 ldpc_dec->tb_params.ea :
3267 ldpc_dec->tb_params.eb;
3274 /* Enqueue encode operations for ACC100 device in CB mode. */
3276 acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
3277 struct rte_bbdev_enc_op **ops, uint16_t num)
3279 struct acc100_queue *q = q_data->queue_private;
3280 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3282 union acc100_dma_desc *desc;
3285 for (i = 0; i < num; ++i) {
3286 /* Check if there are available space for further processing */
3287 if (unlikely(avail - 1 < 0))
3291 ret = enqueue_enc_one_op_cb(q, ops[i], i);
3296 if (unlikely(i == 0))
3297 return 0; /* Nothing to enqueue */
3299 /* Set SDone in last CB in enqueued ops for CB mode*/
3300 desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3301 & q->sw_ring_wrap_mask);
3302 desc->req.sdone_enable = 1;
3303 desc->req.irq_enable = q->irq_enable;
3305 acc100_dma_enqueue(q, i, &q_data->queue_stats);
3308 q_data->queue_stats.enqueued_count += i;
3309 q_data->queue_stats.enqueue_err_count += num - i;
3313 /* Check we can mux encode operations with common FCW */
3315 check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {
3319 for (i = 1; i < num; ++i) {
3320 /* Only mux compatible code blocks */
3321 if (memcmp((uint8_t *)(&ops[i]->ldpc_enc) + ACC100_ENC_OFFSET,
3322 (uint8_t *)(&ops[0]->ldpc_enc) +
3324 ACC100_CMP_ENC_SIZE) != 0)
3330 /** Enqueue encode operations for ACC100 device in CB mode. */
3331 static inline uint16_t
3332 acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
3333 struct rte_bbdev_enc_op **ops, uint16_t num)
3335 struct acc100_queue *q = q_data->queue_private;
3336 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3338 union acc100_dma_desc *desc;
3339 int ret, desc_idx = 0;
3340 int16_t enq, left = num;
3343 if (unlikely(avail < 1))
3346 enq = RTE_MIN(left, ACC100_MUX_5GDL_DESC);
3347 if (check_mux(&ops[i], enq)) {
3348 ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],
3354 ret = enqueue_ldpc_enc_one_op_cb(q, ops[i], desc_idx);
3363 if (unlikely(i == 0))
3364 return 0; /* Nothing to enqueue */
3366 /* Set SDone in last CB in enqueued ops for CB mode*/
3367 desc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1)
3368 & q->sw_ring_wrap_mask);
3369 desc->req.sdone_enable = 1;
3370 desc->req.irq_enable = q->irq_enable;
3372 acc100_dma_enqueue(q, desc_idx, &q_data->queue_stats);
3375 q_data->queue_stats.enqueued_count += i;
3376 q_data->queue_stats.enqueue_err_count += num - i;
3381 /* Enqueue encode operations for ACC100 device in TB mode. */
3383 acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,
3384 struct rte_bbdev_enc_op **ops, uint16_t num)
3386 struct acc100_queue *q = q_data->queue_private;
3387 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3388 uint16_t i, enqueued_cbs = 0;
3392 for (i = 0; i < num; ++i) {
3393 cbs_in_tb = get_num_cbs_in_tb_enc(&ops[i]->turbo_enc);
3394 /* Check if there are available space for further processing */
3395 if (unlikely(avail - cbs_in_tb < 0))
3399 ret = enqueue_enc_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);
3402 enqueued_cbs += ret;
3404 if (unlikely(enqueued_cbs == 0))
3405 return 0; /* Nothing to enqueue */
3407 acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3410 q_data->queue_stats.enqueued_count += i;
3411 q_data->queue_stats.enqueue_err_count += num - i;
3416 /* Enqueue encode operations for ACC100 device. */
3418 acc100_enqueue_enc(struct rte_bbdev_queue_data *q_data,
3419 struct rte_bbdev_enc_op **ops, uint16_t num)
3421 if (unlikely(num == 0))
3423 if (ops[0]->turbo_enc.code_block_mode == 0)
3424 return acc100_enqueue_enc_tb(q_data, ops, num);
3426 return acc100_enqueue_enc_cb(q_data, ops, num);
3429 /* Enqueue encode operations for ACC100 device. */
3431 acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
3432 struct rte_bbdev_enc_op **ops, uint16_t num)
3434 if (unlikely(num == 0))
3436 if (ops[0]->ldpc_enc.code_block_mode == 0)
3437 return acc100_enqueue_enc_tb(q_data, ops, num);
3439 return acc100_enqueue_ldpc_enc_cb(q_data, ops, num);
3443 /* Enqueue decode operations for ACC100 device in CB mode */
3445 acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,
3446 struct rte_bbdev_dec_op **ops, uint16_t num)
3448 struct acc100_queue *q = q_data->queue_private;
3449 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3451 union acc100_dma_desc *desc;
3454 for (i = 0; i < num; ++i) {
3455 /* Check if there are available space for further processing */
3456 if (unlikely(avail - 1 < 0))
3460 ret = enqueue_dec_one_op_cb(q, ops[i], i);
3465 if (unlikely(i == 0))
3466 return 0; /* Nothing to enqueue */
3468 /* Set SDone in last CB in enqueued ops for CB mode*/
3469 desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3470 & q->sw_ring_wrap_mask);
3471 desc->req.sdone_enable = 1;
3472 desc->req.irq_enable = q->irq_enable;
3474 acc100_dma_enqueue(q, i, &q_data->queue_stats);
3477 q_data->queue_stats.enqueued_count += i;
3478 q_data->queue_stats.enqueue_err_count += num - i;
3483 /* Check we can mux encode operations with common FCW */
3485 cmp_ldpc_dec_op(struct rte_bbdev_dec_op **ops) {
3486 /* Only mux compatible code blocks */
3487 if (memcmp((uint8_t *)(&ops[0]->ldpc_dec) + ACC100_DEC_OFFSET,
3488 (uint8_t *)(&ops[1]->ldpc_dec) +
3489 ACC100_DEC_OFFSET, ACC100_CMP_DEC_SIZE) != 0) {
3496 /* Enqueue decode operations for ACC100 device in TB mode */
3498 acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,
3499 struct rte_bbdev_dec_op **ops, uint16_t num)
3501 struct acc100_queue *q = q_data->queue_private;
3502 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3503 uint16_t i, enqueued_cbs = 0;
3507 for (i = 0; i < num; ++i) {
3508 cbs_in_tb = get_num_cbs_in_tb_ldpc_dec(&ops[i]->ldpc_dec);
3509 /* Check if there are available space for further processing */
3510 if (unlikely(avail - cbs_in_tb < 0))
3514 ret = enqueue_ldpc_dec_one_op_tb(q, ops[i],
3515 enqueued_cbs, cbs_in_tb);
3518 enqueued_cbs += ret;
3521 acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3524 q_data->queue_stats.enqueued_count += i;
3525 q_data->queue_stats.enqueue_err_count += num - i;
3529 /* Enqueue decode operations for ACC100 device in CB mode */
3531 acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
3532 struct rte_bbdev_dec_op **ops, uint16_t num)
3534 struct acc100_queue *q = q_data->queue_private;
3535 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3537 union acc100_dma_desc *desc;
3539 bool same_op = false;
3540 for (i = 0; i < num; ++i) {
3541 /* Check if there are available space for further processing */
3542 if (unlikely(avail < 1))
3547 same_op = cmp_ldpc_dec_op(&ops[i-1]);
3548 rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n",
3549 i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index,
3550 ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count,
3551 ops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c,
3552 ops[i]->ldpc_dec.n_cb, ops[i]->ldpc_dec.q_m,
3553 ops[i]->ldpc_dec.n_filler, ops[i]->ldpc_dec.cb_params.e,
3555 ret = enqueue_ldpc_dec_one_op_cb(q, ops[i], i, same_op);
3560 if (unlikely(i == 0))
3561 return 0; /* Nothing to enqueue */
3563 /* Set SDone in last CB in enqueued ops for CB mode*/
3564 desc = q->ring_addr + ((q->sw_ring_head + i - 1)
3565 & q->sw_ring_wrap_mask);
3567 desc->req.sdone_enable = 1;
3568 desc->req.irq_enable = q->irq_enable;
3570 acc100_dma_enqueue(q, i, &q_data->queue_stats);
3573 q_data->queue_stats.enqueued_count += i;
3574 q_data->queue_stats.enqueue_err_count += num - i;
3579 /* Enqueue decode operations for ACC100 device in TB mode */
3581 acc100_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data,
3582 struct rte_bbdev_dec_op **ops, uint16_t num)
3584 struct acc100_queue *q = q_data->queue_private;
3585 int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
3586 uint16_t i, enqueued_cbs = 0;
3590 for (i = 0; i < num; ++i) {
3591 cbs_in_tb = get_num_cbs_in_tb_dec(&ops[i]->turbo_dec);
3592 /* Check if there are available space for further processing */
3593 if (unlikely(avail - cbs_in_tb < 0))
3597 ret = enqueue_dec_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);
3600 enqueued_cbs += ret;
3603 acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
3606 q_data->queue_stats.enqueued_count += i;
3607 q_data->queue_stats.enqueue_err_count += num - i;
3612 /* Enqueue decode operations for ACC100 device. */
3614 acc100_enqueue_dec(struct rte_bbdev_queue_data *q_data,
3615 struct rte_bbdev_dec_op **ops, uint16_t num)
3617 if (unlikely(num == 0))
3619 if (ops[0]->turbo_dec.code_block_mode == 0)
3620 return acc100_enqueue_dec_tb(q_data, ops, num);
3622 return acc100_enqueue_dec_cb(q_data, ops, num);
3625 /* Enqueue decode operations for ACC100 device. */
3627 acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
3628 struct rte_bbdev_dec_op **ops, uint16_t num)
3630 struct acc100_queue *q = q_data->queue_private;
3631 int32_t aq_avail = q->aq_depth +
3632 (q->aq_dequeued - q->aq_enqueued) / 128;
3634 if (unlikely((aq_avail == 0) || (num == 0)))
3637 if (ops[0]->ldpc_dec.code_block_mode == 0)
3638 return acc100_enqueue_ldpc_dec_tb(q_data, ops, num);
3640 return acc100_enqueue_ldpc_dec_cb(q_data, ops, num);
3644 /* Dequeue one encode operations from ACC100 device in CB mode */
3646 dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
3647 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
3649 union acc100_dma_desc *desc, atom_desc;
3650 union acc100_dma_rsp_desc rsp;
3651 struct rte_bbdev_enc_op *op;
3654 desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
3655 & q->sw_ring_wrap_mask);
3656 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3659 /* Check fdone bit */
3660 if (!(atom_desc.rsp.val & ACC100_FDONE))
3663 rsp.val = atom_desc.rsp.val;
3664 rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
3667 op = desc->req.op_addr;
3669 /* Clearing status, it will be set based on response */
3672 op->status |= ((rsp.input_err)
3673 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3674 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3675 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3677 if (desc->req.last_desc_in_batch) {
3679 desc->req.last_desc_in_batch = 0;
3681 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3682 desc->rsp.add_info_0 = 0; /*Reserved bits */
3683 desc->rsp.add_info_1 = 0; /*Reserved bits */
3685 /* Flag that the muxing cause loss of opaque data */
3686 op->opaque_data = (void *)-1;
3687 for (i = 0 ; i < desc->req.numCBs; i++)
3690 /* One CB (op) was successfully dequeued */
3691 return desc->req.numCBs;
3694 /* Dequeue one encode operations from ACC100 device in TB mode */
3696 dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
3697 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
3699 union acc100_dma_desc *desc, *last_desc, atom_desc;
3700 union acc100_dma_rsp_desc rsp;
3701 struct rte_bbdev_enc_op *op;
3703 uint16_t current_dequeued_cbs = 0, cbs_in_tb;
3705 desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
3706 & q->sw_ring_wrap_mask);
3707 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3710 /* Check fdone bit */
3711 if (!(atom_desc.rsp.val & ACC100_FDONE))
3714 /* Get number of CBs in dequeued TB */
3715 cbs_in_tb = desc->req.cbs_in_tb;
3717 last_desc = q->ring_addr + ((q->sw_ring_tail
3718 + total_dequeued_cbs + cbs_in_tb - 1)
3719 & q->sw_ring_wrap_mask);
3720 /* Check if last CB in TB is ready to dequeue (and thus
3721 * the whole TB) - checking sdone bit. If not return.
3723 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
3725 if (!(atom_desc.rsp.val & ACC100_SDONE))
3729 op = desc->req.op_addr;
3731 /* Clearing status, it will be set based on response */
3734 while (i < cbs_in_tb) {
3735 desc = q->ring_addr + ((q->sw_ring_tail
3736 + total_dequeued_cbs)
3737 & q->sw_ring_wrap_mask);
3738 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3740 rsp.val = atom_desc.rsp.val;
3741 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
3744 op->status |= ((rsp.input_err)
3745 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3746 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3747 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3749 if (desc->req.last_desc_in_batch) {
3751 desc->req.last_desc_in_batch = 0;
3753 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3754 desc->rsp.add_info_0 = 0;
3755 desc->rsp.add_info_1 = 0;
3756 total_dequeued_cbs++;
3757 current_dequeued_cbs++;
3763 return current_dequeued_cbs;
3766 /* Dequeue one decode operation from ACC100 device in CB mode */
3768 dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
3769 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3770 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3772 union acc100_dma_desc *desc, atom_desc;
3773 union acc100_dma_rsp_desc rsp;
3774 struct rte_bbdev_dec_op *op;
3776 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3777 & q->sw_ring_wrap_mask);
3778 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3781 /* Check fdone bit */
3782 if (!(atom_desc.rsp.val & ACC100_FDONE))
3785 rsp.val = atom_desc.rsp.val;
3786 rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
3789 op = desc->req.op_addr;
3791 /* Clearing status, it will be set based on response */
3793 op->status |= ((rsp.input_err)
3794 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3795 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3796 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3797 if (op->status != 0) {
3798 q_data->queue_stats.dequeue_err_count++;
3799 acc100_check_ir(q->d);
3802 /* CRC invalid if error exists */
3804 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3805 op->turbo_dec.iter_count = (uint8_t) rsp.iter_cnt / 2;
3806 /* Check if this is the last desc in batch (Atomic Queue) */
3807 if (desc->req.last_desc_in_batch) {
3809 desc->req.last_desc_in_batch = 0;
3811 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3812 desc->rsp.add_info_0 = 0;
3813 desc->rsp.add_info_1 = 0;
3816 /* One CB (op) was successfully dequeued */
3820 /* Dequeue one decode operations from ACC100 device in CB mode */
3822 dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
3823 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3824 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3826 union acc100_dma_desc *desc, atom_desc;
3827 union acc100_dma_rsp_desc rsp;
3828 struct rte_bbdev_dec_op *op;
3830 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3831 & q->sw_ring_wrap_mask);
3832 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3835 /* Check fdone bit */
3836 if (!(atom_desc.rsp.val & ACC100_FDONE))
3839 rsp.val = atom_desc.rsp.val;
3842 op = desc->req.op_addr;
3844 /* Clearing status, it will be set based on response */
3846 op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;
3847 op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;
3848 op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;
3849 if (op->status != 0)
3850 q_data->queue_stats.dequeue_err_count++;
3852 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3853 if (op->ldpc_dec.hard_output.length > 0 && !rsp.synd_ok)
3854 op->status |= 1 << RTE_BBDEV_SYNDROME_ERROR;
3855 op->ldpc_dec.iter_count = (uint8_t) rsp.iter_cnt;
3857 if (op->status & (1 << RTE_BBDEV_DRV_ERROR))
3858 acc100_check_ir(q->d);
3860 /* Check if this is the last desc in batch (Atomic Queue) */
3861 if (desc->req.last_desc_in_batch) {
3863 desc->req.last_desc_in_batch = 0;
3866 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3867 desc->rsp.add_info_0 = 0;
3868 desc->rsp.add_info_1 = 0;
3872 /* One CB (op) was successfully dequeued */
3876 /* Dequeue one decode operations from ACC100 device in TB mode. */
3878 dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
3879 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
3881 union acc100_dma_desc *desc, *last_desc, atom_desc;
3882 union acc100_dma_rsp_desc rsp;
3883 struct rte_bbdev_dec_op *op;
3884 uint8_t cbs_in_tb = 1, cb_idx = 0;
3886 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3887 & q->sw_ring_wrap_mask);
3888 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3891 /* Check fdone bit */
3892 if (!(atom_desc.rsp.val & ACC100_FDONE))
3896 op = desc->req.op_addr;
3898 /* Get number of CBs in dequeued TB */
3899 cbs_in_tb = desc->req.cbs_in_tb;
3901 last_desc = q->ring_addr + ((q->sw_ring_tail
3902 + dequeued_cbs + cbs_in_tb - 1)
3903 & q->sw_ring_wrap_mask);
3904 /* Check if last CB in TB is ready to dequeue (and thus
3905 * the whole TB) - checking sdone bit. If not return.
3907 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
3909 if (!(atom_desc.rsp.val & ACC100_SDONE))
3912 /* Clearing status, it will be set based on response */
3915 /* Read remaining CBs if exists */
3916 while (cb_idx < cbs_in_tb) {
3917 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3918 & q->sw_ring_wrap_mask);
3919 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
3921 rsp.val = atom_desc.rsp.val;
3922 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
3925 op->status |= ((rsp.input_err)
3926 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
3927 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3928 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
3930 /* CRC invalid if error exists */
3932 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
3933 op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
3934 op->turbo_dec.iter_count);
3936 /* Check if this is the last desc in batch (Atomic Queue) */
3937 if (desc->req.last_desc_in_batch) {
3939 desc->req.last_desc_in_batch = 0;
3941 desc->rsp.val = ACC100_DMA_DESC_TYPE;
3942 desc->rsp.add_info_0 = 0;
3943 desc->rsp.add_info_1 = 0;
3953 /* Dequeue encode operations from ACC100 device. */
3955 acc100_dequeue_enc(struct rte_bbdev_queue_data *q_data,
3956 struct rte_bbdev_enc_op **ops, uint16_t num)
3958 struct acc100_queue *q = q_data->queue_private;
3959 uint16_t dequeue_num;
3960 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
3961 uint32_t aq_dequeued = 0;
3962 uint16_t i, dequeued_cbs = 0;
3963 struct rte_bbdev_enc_op *op;
3966 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3967 if (unlikely(ops == NULL || q == NULL)) {
3968 rte_bbdev_log_debug("Unexpected undefined pointer");
3973 dequeue_num = (avail < num) ? avail : num;
3975 for (i = 0; i < dequeue_num; ++i) {
3976 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
3977 & q->sw_ring_wrap_mask))->req.op_addr;
3978 if (op->turbo_enc.code_block_mode == 0)
3979 ret = dequeue_enc_one_op_tb(q, &ops[i], dequeued_cbs,
3982 ret = dequeue_enc_one_op_cb(q, &ops[i], dequeued_cbs,
3987 dequeued_cbs += ret;
3990 q->aq_dequeued += aq_dequeued;
3991 q->sw_ring_tail += dequeued_cbs;
3993 /* Update enqueue stats */
3994 q_data->queue_stats.dequeued_count += i;
3999 /* Dequeue LDPC encode operations from ACC100 device. */
4001 acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
4002 struct rte_bbdev_enc_op **ops, uint16_t num)
4004 struct acc100_queue *q = q_data->queue_private;
4005 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
4006 uint32_t aq_dequeued = 0;
4007 uint16_t dequeue_num, i, dequeued_cbs = 0, dequeued_descs = 0;
4010 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4011 if (unlikely(ops == 0 && q == NULL))
4015 dequeue_num = RTE_MIN(avail, num);
4017 for (i = 0; i < dequeue_num; i++) {
4018 ret = dequeue_enc_one_op_cb(q, &ops[dequeued_cbs],
4019 dequeued_descs, &aq_dequeued);
4022 dequeued_cbs += ret;
4024 if (dequeued_cbs >= num)
4028 q->aq_dequeued += aq_dequeued;
4029 q->sw_ring_tail += dequeued_descs;
4031 /* Update enqueue stats */
4032 q_data->queue_stats.dequeued_count += dequeued_cbs;
4034 return dequeued_cbs;
4038 /* Dequeue decode operations from ACC100 device. */
4040 acc100_dequeue_dec(struct rte_bbdev_queue_data *q_data,
4041 struct rte_bbdev_dec_op **ops, uint16_t num)
4043 struct acc100_queue *q = q_data->queue_private;
4044 uint16_t dequeue_num;
4045 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
4046 uint32_t aq_dequeued = 0;
4048 uint16_t dequeued_cbs = 0;
4049 struct rte_bbdev_dec_op *op;
4052 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4053 if (unlikely(ops == 0 && q == NULL))
4057 dequeue_num = (avail < num) ? avail : num;
4059 for (i = 0; i < dequeue_num; ++i) {
4060 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
4061 & q->sw_ring_wrap_mask))->req.op_addr;
4062 if (op->turbo_dec.code_block_mode == 0)
4063 ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
4066 ret = dequeue_dec_one_op_cb(q_data, q, &ops[i],
4067 dequeued_cbs, &aq_dequeued);
4071 dequeued_cbs += ret;
4074 q->aq_dequeued += aq_dequeued;
4075 q->sw_ring_tail += dequeued_cbs;
4077 /* Update enqueue stats */
4078 q_data->queue_stats.dequeued_count += i;
4083 /* Dequeue decode operations from ACC100 device. */
4085 acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
4086 struct rte_bbdev_dec_op **ops, uint16_t num)
4088 struct acc100_queue *q = q_data->queue_private;
4089 uint16_t dequeue_num;
4090 uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
4091 uint32_t aq_dequeued = 0;
4093 uint16_t dequeued_cbs = 0;
4094 struct rte_bbdev_dec_op *op;
4097 #ifdef RTE_LIBRTE_BBDEV_DEBUG
4098 if (unlikely(ops == 0 && q == NULL))
4102 dequeue_num = RTE_MIN(avail, num);
4104 for (i = 0; i < dequeue_num; ++i) {
4105 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
4106 & q->sw_ring_wrap_mask))->req.op_addr;
4107 if (op->ldpc_dec.code_block_mode == 0)
4108 ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
4111 ret = dequeue_ldpc_dec_one_op_cb(
4112 q_data, q, &ops[i], dequeued_cbs,
4117 dequeued_cbs += ret;
4120 q->aq_dequeued += aq_dequeued;
4121 q->sw_ring_tail += dequeued_cbs;
4123 /* Update enqueue stats */
4124 q_data->queue_stats.dequeued_count += i;
4129 /* Initialization Function */
4131 acc100_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
4133 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
4135 dev->dev_ops = &acc100_bbdev_ops;
4136 dev->enqueue_enc_ops = acc100_enqueue_enc;
4137 dev->enqueue_dec_ops = acc100_enqueue_dec;
4138 dev->dequeue_enc_ops = acc100_dequeue_enc;
4139 dev->dequeue_dec_ops = acc100_dequeue_dec;
4140 dev->enqueue_ldpc_enc_ops = acc100_enqueue_ldpc_enc;
4141 dev->enqueue_ldpc_dec_ops = acc100_enqueue_ldpc_dec;
4142 dev->dequeue_ldpc_enc_ops = acc100_dequeue_ldpc_enc;
4143 dev->dequeue_ldpc_dec_ops = acc100_dequeue_ldpc_dec;
4145 ((struct acc100_device *) dev->data->dev_private)->pf_device =
4146 !strcmp(drv->driver.name,
4147 RTE_STR(ACC100PF_DRIVER_NAME));
4148 ((struct acc100_device *) dev->data->dev_private)->mmio_base =
4149 pci_dev->mem_resource[0].addr;
4151 rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"",
4152 drv->driver.name, dev->data->name,
4153 (void *)pci_dev->mem_resource[0].addr,
4154 pci_dev->mem_resource[0].phys_addr);
4157 static int acc100_pci_probe(struct rte_pci_driver *pci_drv,
4158 struct rte_pci_device *pci_dev)
4160 struct rte_bbdev *bbdev = NULL;
4161 char dev_name[RTE_BBDEV_NAME_MAX_LEN];
4163 if (pci_dev == NULL) {
4164 rte_bbdev_log(ERR, "NULL PCI device");
4168 rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
4170 /* Allocate memory to be used privately by drivers */
4171 bbdev = rte_bbdev_allocate(pci_dev->device.name);
4175 /* allocate device private memory */
4176 bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
4177 sizeof(struct acc100_device), RTE_CACHE_LINE_SIZE,
4178 pci_dev->device.numa_node);
4180 if (bbdev->data->dev_private == NULL) {
4182 "Allocate of %zu bytes for device \"%s\" failed",
4183 sizeof(struct acc100_device), dev_name);
4184 rte_bbdev_release(bbdev);
4188 /* Fill HW specific part of device structure */
4189 bbdev->device = &pci_dev->device;
4190 bbdev->intr_handle = &pci_dev->intr_handle;
4191 bbdev->data->socket_id = pci_dev->device.numa_node;
4193 /* Invoke ACC100 device initialization function */
4194 acc100_bbdev_init(bbdev, pci_drv);
4196 rte_bbdev_log_debug("Initialised bbdev %s (id = %u)",
4197 dev_name, bbdev->data->dev_id);
4201 static int acc100_pci_remove(struct rte_pci_device *pci_dev)
4203 struct rte_bbdev *bbdev;
4207 if (pci_dev == NULL)
4211 bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
4212 if (bbdev == NULL) {
4214 "Couldn't find HW dev \"%s\" to uninitialise it",
4215 pci_dev->device.name);
4218 dev_id = bbdev->data->dev_id;
4220 /* free device private memory before close */
4221 rte_free(bbdev->data->dev_private);
4224 ret = rte_bbdev_close(dev_id);
4227 "Device %i failed to close during uninit: %i",
4230 /* release bbdev from library */
4231 rte_bbdev_release(bbdev);
4233 rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
4238 static struct rte_pci_driver acc100_pci_pf_driver = {
4239 .probe = acc100_pci_probe,
4240 .remove = acc100_pci_remove,
4241 .id_table = pci_id_acc100_pf_map,
4242 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
4245 static struct rte_pci_driver acc100_pci_vf_driver = {
4246 .probe = acc100_pci_probe,
4247 .remove = acc100_pci_remove,
4248 .id_table = pci_id_acc100_vf_map,
4249 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
4252 RTE_PMD_REGISTER_PCI(ACC100PF_DRIVER_NAME, acc100_pci_pf_driver);
4253 RTE_PMD_REGISTER_PCI_TABLE(ACC100PF_DRIVER_NAME, pci_id_acc100_pf_map);
4254 RTE_PMD_REGISTER_PCI(ACC100VF_DRIVER_NAME, acc100_pci_vf_driver);
4255 RTE_PMD_REGISTER_PCI_TABLE(ACC100VF_DRIVER_NAME, pci_id_acc100_vf_map);
4258 * Workaround implementation to fix the power on status of some 5GUL engines
4259 * This requires DMA permission if ported outside DPDK
4260 * It consists in resolving the state of these engines by running a
4261 * dummy operation and resetting the engines to ensure state are reliably
4265 poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,
4266 struct rte_acc100_conf *conf)
4268 int i, template_idx, qg_idx;
4269 uint32_t address, status, value;
4270 printf("Need to clear power-on 5GUL status in internal memory\n");
4271 /* Reset LDPC Cores */
4272 for (i = 0; i < ACC100_ENGINES_MAX; i++)
4273 acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4274 ACC100_ENGINE_OFFSET * i, ACC100_RESET_HI);
4275 usleep(ACC100_LONG_WAIT);
4276 for (i = 0; i < ACC100_ENGINES_MAX; i++)
4277 acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4278 ACC100_ENGINE_OFFSET * i, ACC100_RESET_LO);
4279 usleep(ACC100_LONG_WAIT);
4280 /* Prepare dummy workload */
4281 alloc_2x64mb_sw_rings_mem(bbdev, d, 0);
4282 /* Set base addresses */
4283 uint32_t phys_high = (uint32_t)(d->sw_rings_iova >> 32);
4284 uint32_t phys_low = (uint32_t)(d->sw_rings_iova &
4285 ~(ACC100_SIZE_64MBYTE-1));
4286 acc100_reg_write(d, HWPfDmaFec5GulDescBaseHiRegVf, phys_high);
4287 acc100_reg_write(d, HWPfDmaFec5GulDescBaseLoRegVf, phys_low);
4289 /* Descriptor for a dummy 5GUL code block processing*/
4290 union acc100_dma_desc *desc = NULL;
4292 desc->req.data_ptrs[0].address = d->sw_rings_iova +
4293 ACC100_DESC_FCW_OFFSET;
4294 desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
4295 desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
4296 desc->req.data_ptrs[0].last = 0;
4297 desc->req.data_ptrs[0].dma_ext = 0;
4298 desc->req.data_ptrs[1].address = d->sw_rings_iova + 512;
4299 desc->req.data_ptrs[1].blkid = ACC100_DMA_BLKID_IN;
4300 desc->req.data_ptrs[1].last = 1;
4301 desc->req.data_ptrs[1].dma_ext = 0;
4302 desc->req.data_ptrs[1].blen = 44;
4303 desc->req.data_ptrs[2].address = d->sw_rings_iova + 1024;
4304 desc->req.data_ptrs[2].blkid = ACC100_DMA_BLKID_OUT_ENC;
4305 desc->req.data_ptrs[2].last = 1;
4306 desc->req.data_ptrs[2].dma_ext = 0;
4307 desc->req.data_ptrs[2].blen = 5;
4309 desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
4310 desc->req.fcw_ld.qm = 1;
4311 desc->req.fcw_ld.nfiller = 30;
4312 desc->req.fcw_ld.BG = 2 - 1;
4313 desc->req.fcw_ld.Zc = 7;
4314 desc->req.fcw_ld.ncb = 350;
4315 desc->req.fcw_ld.rm_e = 4;
4316 desc->req.fcw_ld.itmax = 10;
4317 desc->req.fcw_ld.gain_i = 1;
4318 desc->req.fcw_ld.gain_h = 1;
4320 int engines_to_restart[ACC100_SIG_UL_5G_LAST + 1] = {0};
4321 int num_failed_engine = 0;
4322 /* Detect engines in undefined state */
4323 for (template_idx = ACC100_SIG_UL_5G;
4324 template_idx <= ACC100_SIG_UL_5G_LAST;
4326 /* Check engine power-on status */
4327 address = HwPfFecUl5gIbDebugReg +
4328 ACC100_ENGINE_OFFSET * template_idx;
4329 status = (acc100_reg_read(d, address) >> 4) & 0xF;
4331 engines_to_restart[num_failed_engine] = template_idx;
4332 num_failed_engine++;
4336 int numQqsAcc = conf->q_ul_5g.num_qgroups;
4337 int numQgs = conf->q_ul_5g.num_qgroups;
4339 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4340 value |= (1 << qg_idx);
4341 /* Force each engine which is in unspecified state */
4342 for (i = 0; i < num_failed_engine; i++) {
4343 int failed_engine = engines_to_restart[i];
4344 printf("Force engine %d\n", failed_engine);
4345 for (template_idx = ACC100_SIG_UL_5G;
4346 template_idx <= ACC100_SIG_UL_5G_LAST;
4348 address = HWPfQmgrGrpTmplateReg4Indx
4349 + ACC100_BYTES_IN_WORD * template_idx;
4350 if (template_idx == failed_engine)
4351 acc100_reg_write(d, address, value);
4353 acc100_reg_write(d, address, 0);
4355 /* Reset descriptor header */
4356 desc->req.word0 = ACC100_DMA_DESC_TYPE;
4357 desc->req.word1 = 0;
4358 desc->req.word2 = 0;
4359 desc->req.word3 = 0;
4360 desc->req.numCBs = 1;
4361 desc->req.m2dlen = 2;
4362 desc->req.d2mlen = 1;
4363 /* Enqueue the code block for processing */
4364 union acc100_enqueue_reg_fmt enq_req;
4366 enq_req.addr_offset = ACC100_DESC_OFFSET;
4367 enq_req.num_elem = 1;
4368 enq_req.req_elem_addr = 0;
4370 acc100_reg_write(d, HWPfQmgrIngressAq + 0x100, enq_req.val);
4371 usleep(ACC100_LONG_WAIT * 100);
4372 if (desc->req.word0 != 2)
4373 printf("DMA Response %#"PRIx32"\n", desc->req.word0);
4376 /* Reset LDPC Cores */
4377 for (i = 0; i < ACC100_ENGINES_MAX; i++)
4378 acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4379 ACC100_ENGINE_OFFSET * i,
4381 usleep(ACC100_LONG_WAIT);
4382 for (i = 0; i < ACC100_ENGINES_MAX; i++)
4383 acc100_reg_write(d, HWPfFecUl5gCntrlReg +
4384 ACC100_ENGINE_OFFSET * i,
4386 usleep(ACC100_LONG_WAIT);
4387 acc100_reg_write(d, HWPfHi5GHardResetReg, ACC100_RESET_HARD);
4388 usleep(ACC100_LONG_WAIT);
4390 /* Check engine power-on status again */
4391 for (template_idx = ACC100_SIG_UL_5G;
4392 template_idx <= ACC100_SIG_UL_5G_LAST;
4394 address = HwPfFecUl5gIbDebugReg +
4395 ACC100_ENGINE_OFFSET * template_idx;
4396 status = (acc100_reg_read(d, address) >> 4) & 0xF;
4397 address = HWPfQmgrGrpTmplateReg4Indx
4398 + ACC100_BYTES_IN_WORD * template_idx;
4400 acc100_reg_write(d, address, value);
4403 acc100_reg_write(d, address, 0);
4405 printf("Number of 5GUL engines %d\n", numEngines);
4407 if (d->sw_rings_base != NULL)
4408 rte_free(d->sw_rings_base);
4409 usleep(ACC100_LONG_WAIT);
4412 /* Initial configuration of a ACC100 device prior to running configure() */
4414 rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)
4416 rte_bbdev_log(INFO, "rte_acc100_configure");
4417 uint32_t value, address, status;
4418 int qg_idx, template_idx, vf_idx, acc, i;
4419 struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
4421 /* Compile time checks */
4422 RTE_BUILD_BUG_ON(sizeof(struct acc100_dma_req_desc) != 256);
4423 RTE_BUILD_BUG_ON(sizeof(union acc100_dma_desc) != 256);
4424 RTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_td) != 24);
4425 RTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_te) != 32);
4427 if (bbdev == NULL) {
4429 "Invalid dev_name (%s), or device is not yet initialised",
4433 struct acc100_device *d = bbdev->data->dev_private;
4435 /* Store configuration */
4436 rte_memcpy(&d->acc100_conf, conf, sizeof(d->acc100_conf));
4438 /* PCIe Bridge configuration */
4439 acc100_reg_write(d, HwPfPcieGpexBridgeControl, ACC100_CFG_PCI_BRIDGE);
4440 for (i = 1; i < ACC100_GPEX_AXIMAP_NUM; i++)
4442 HwPfPcieGpexAxiAddrMappingWindowPexBaseHigh
4445 /* Prevent blocking AXI read on BRESP for AXI Write */
4446 address = HwPfPcieGpexAxiPioControl;
4447 value = ACC100_CFG_PCI_AXI;
4448 acc100_reg_write(d, address, value);
4450 /* 5GDL PLL phase shift */
4451 acc100_reg_write(d, HWPfChaDl5gPllPhshft0, 0x1);
4453 /* Explicitly releasing AXI as this may be stopped after PF FLR/BME */
4454 address = HWPfDmaAxiControl;
4456 acc100_reg_write(d, address, value);
4458 /* DDR Configuration */
4459 address = HWPfDdrBcTim6;
4460 value = acc100_reg_read(d, address);
4461 value &= 0xFFFFFFFB; /* Bit 2 */
4462 #ifdef ACC100_DDR_ECC_ENABLE
4465 acc100_reg_write(d, address, value);
4466 address = HWPfDdrPhyDqsCountNum;
4467 #ifdef ACC100_DDR_ECC_ENABLE
4472 acc100_reg_write(d, address, value);
4474 /* Set default descriptor signature */
4475 address = HWPfDmaDescriptorSignatuture;
4477 acc100_reg_write(d, address, value);
4479 /* Enable the Error Detection in DMA */
4480 value = ACC100_CFG_DMA_ERROR;
4481 address = HWPfDmaErrorDetectionEn;
4482 acc100_reg_write(d, address, value);
4484 /* AXI Cache configuration */
4485 value = ACC100_CFG_AXI_CACHE;
4486 address = HWPfDmaAxcacheReg;
4487 acc100_reg_write(d, address, value);
4489 /* Default DMA Configuration (Qmgr Enabled) */
4490 address = HWPfDmaConfig0Reg;
4492 acc100_reg_write(d, address, value);
4493 address = HWPfDmaQmanen;
4495 acc100_reg_write(d, address, value);
4497 /* Default RLIM/ALEN configuration */
4498 address = HWPfDmaConfig1Reg;
4499 value = (1 << 31) + (23 << 8) + (1 << 6) + 7;
4500 acc100_reg_write(d, address, value);
4502 /* Configure DMA Qmanager addresses */
4503 address = HWPfDmaQmgrAddrReg;
4504 value = HWPfQmgrEgressQueuesTemplate;
4505 acc100_reg_write(d, address, value);
4507 /* ===== Qmgr Configuration ===== */
4508 /* Configuration of the AQueue Depth QMGR_GRP_0_DEPTH_LOG2 for UL */
4509 int totalQgs = conf->q_ul_4g.num_qgroups +
4510 conf->q_ul_5g.num_qgroups +
4511 conf->q_dl_4g.num_qgroups +
4512 conf->q_dl_5g.num_qgroups;
4513 for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4514 address = HWPfQmgrDepthLog2Grp +
4515 ACC100_BYTES_IN_WORD * qg_idx;
4516 value = aqDepth(qg_idx, conf);
4517 acc100_reg_write(d, address, value);
4518 address = HWPfQmgrTholdGrp +
4519 ACC100_BYTES_IN_WORD * qg_idx;
4520 value = (1 << 16) + (1 << (aqDepth(qg_idx, conf) - 1));
4521 acc100_reg_write(d, address, value);
4524 /* Template Priority in incremental order */
4525 for (template_idx = 0; template_idx < ACC100_NUM_TMPL;
4527 address = HWPfQmgrGrpTmplateReg0Indx +
4528 ACC100_BYTES_IN_WORD * (template_idx % 8);
4529 value = ACC100_TMPL_PRI_0;
4530 acc100_reg_write(d, address, value);
4531 address = HWPfQmgrGrpTmplateReg1Indx +
4532 ACC100_BYTES_IN_WORD * (template_idx % 8);
4533 value = ACC100_TMPL_PRI_1;
4534 acc100_reg_write(d, address, value);
4535 address = HWPfQmgrGrpTmplateReg2indx +
4536 ACC100_BYTES_IN_WORD * (template_idx % 8);
4537 value = ACC100_TMPL_PRI_2;
4538 acc100_reg_write(d, address, value);
4539 address = HWPfQmgrGrpTmplateReg3Indx +
4540 ACC100_BYTES_IN_WORD * (template_idx % 8);
4541 value = ACC100_TMPL_PRI_3;
4542 acc100_reg_write(d, address, value);
4545 address = HWPfQmgrGrpPriority;
4546 value = ACC100_CFG_QMGR_HI_P;
4547 acc100_reg_write(d, address, value);
4549 /* Template Configuration */
4550 for (template_idx = 0; template_idx < ACC100_NUM_TMPL;
4553 address = HWPfQmgrGrpTmplateReg4Indx
4554 + ACC100_BYTES_IN_WORD * template_idx;
4555 acc100_reg_write(d, address, value);
4558 int numQgs = conf->q_ul_4g.num_qgroups;
4561 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4562 value |= (1 << qg_idx);
4563 for (template_idx = ACC100_SIG_UL_4G;
4564 template_idx <= ACC100_SIG_UL_4G_LAST;
4566 address = HWPfQmgrGrpTmplateReg4Indx
4567 + ACC100_BYTES_IN_WORD * template_idx;
4568 acc100_reg_write(d, address, value);
4571 numQqsAcc += numQgs;
4572 numQgs = conf->q_ul_5g.num_qgroups;
4575 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4576 value |= (1 << qg_idx);
4577 for (template_idx = ACC100_SIG_UL_5G;
4578 template_idx <= ACC100_SIG_UL_5G_LAST;
4580 /* Check engine power-on status */
4581 address = HwPfFecUl5gIbDebugReg +
4582 ACC100_ENGINE_OFFSET * template_idx;
4583 status = (acc100_reg_read(d, address) >> 4) & 0xF;
4584 address = HWPfQmgrGrpTmplateReg4Indx
4585 + ACC100_BYTES_IN_WORD * template_idx;
4587 acc100_reg_write(d, address, value);
4590 acc100_reg_write(d, address, 0);
4591 #if RTE_ACC100_SINGLE_FEC == 1
4595 printf("Number of 5GUL engines %d\n", numEngines);
4597 numQqsAcc += numQgs;
4598 numQgs = conf->q_dl_4g.num_qgroups;
4600 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4601 value |= (1 << qg_idx);
4602 for (template_idx = ACC100_SIG_DL_4G;
4603 template_idx <= ACC100_SIG_DL_4G_LAST;
4605 address = HWPfQmgrGrpTmplateReg4Indx
4606 + ACC100_BYTES_IN_WORD * template_idx;
4607 acc100_reg_write(d, address, value);
4608 #if RTE_ACC100_SINGLE_FEC == 1
4613 numQqsAcc += numQgs;
4614 numQgs = conf->q_dl_5g.num_qgroups;
4616 for (qg_idx = numQqsAcc; qg_idx < (numQgs + numQqsAcc); qg_idx++)
4617 value |= (1 << qg_idx);
4618 for (template_idx = ACC100_SIG_DL_5G;
4619 template_idx <= ACC100_SIG_DL_5G_LAST;
4621 address = HWPfQmgrGrpTmplateReg4Indx
4622 + ACC100_BYTES_IN_WORD * template_idx;
4623 acc100_reg_write(d, address, value);
4624 #if RTE_ACC100_SINGLE_FEC == 1
4629 /* Queue Group Function mapping */
4630 int qman_func_id[5] = {0, 2, 1, 3, 4};
4631 address = HWPfQmgrGrpFunction0;
4633 for (qg_idx = 0; qg_idx < 8; qg_idx++) {
4634 acc = accFromQgid(qg_idx, conf);
4635 value |= qman_func_id[acc]<<(qg_idx * 4);
4637 acc100_reg_write(d, address, value);
4639 /* Configuration of the Arbitration QGroup depth to 1 */
4640 for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4641 address = HWPfQmgrArbQDepthGrp +
4642 ACC100_BYTES_IN_WORD * qg_idx;
4644 acc100_reg_write(d, address, value);
4647 /* Enabling AQueues through the Queue hierarchy*/
4648 for (vf_idx = 0; vf_idx < ACC100_NUM_VFS; vf_idx++) {
4649 for (qg_idx = 0; qg_idx < ACC100_NUM_QGRPS; qg_idx++) {
4651 if (vf_idx < conf->num_vf_bundles &&
4653 value = (1 << aqNum(qg_idx, conf)) - 1;
4654 address = HWPfQmgrAqEnableVf
4655 + vf_idx * ACC100_BYTES_IN_WORD;
4656 value += (qg_idx << 16);
4657 acc100_reg_write(d, address, value);
4661 /* This pointer to ARAM (256kB) is shifted by 2 (4B per register) */
4662 uint32_t aram_address = 0;
4663 for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {
4664 for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {
4665 address = HWPfQmgrVfBaseAddr + vf_idx
4666 * ACC100_BYTES_IN_WORD + qg_idx
4667 * ACC100_BYTES_IN_WORD * 64;
4668 value = aram_address;
4669 acc100_reg_write(d, address, value);
4670 /* Offset ARAM Address for next memory bank
4673 aram_address += aqNum(qg_idx, conf) *
4674 (1 << aqDepth(qg_idx, conf));
4678 if (aram_address > ACC100_WORDS_IN_ARAM_SIZE) {
4679 rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d\n",
4680 aram_address, ACC100_WORDS_IN_ARAM_SIZE);
4684 /* ==== HI Configuration ==== */
4686 /* Prevent Block on Transmit Error */
4687 address = HWPfHiBlockTransmitOnErrorEn;
4689 acc100_reg_write(d, address, value);
4690 /* Prevents to drop MSI */
4691 address = HWPfHiMsiDropEnableReg;
4693 acc100_reg_write(d, address, value);
4694 /* Set the PF Mode register */
4695 address = HWPfHiPfMode;
4696 value = (conf->pf_mode_en) ? ACC100_PF_VAL : 0;
4697 acc100_reg_write(d, address, value);
4698 /* Enable Error Detection in HW */
4699 address = HWPfDmaErrorDetectionEn;
4701 acc100_reg_write(d, address, value);
4703 /* QoS overflow init */
4705 address = HWPfQosmonAEvalOverflow0;
4706 acc100_reg_write(d, address, value);
4707 address = HWPfQosmonBEvalOverflow0;
4708 acc100_reg_write(d, address, value);
4710 /* HARQ DDR Configuration */
4711 unsigned int ddrSizeInMb = 512; /* Fixed to 512 MB per VF for now */
4712 for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {
4713 address = HWPfDmaVfDdrBaseRw + vf_idx
4715 value = ((vf_idx * (ddrSizeInMb / 64)) << 16) +
4717 acc100_reg_write(d, address, value);
4719 usleep(ACC100_LONG_WAIT);
4721 /* Workaround in case some 5GUL engines are in an unexpected state */
4722 if (numEngines < (ACC100_SIG_UL_5G_LAST + 1))
4723 poweron_cleanup(bbdev, d, conf);
4725 rte_bbdev_log_debug("PF Tip configuration complete for %s", dev_name);