baseband/acc100: add LDPC processing functions
[dpdk.git] / drivers / baseband / acc100 / rte_acc100_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_log.h>
9 #include <rte_dev.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_byteorder.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_hexdump.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #ifdef RTE_BBDEV_OFFLOAD_COST
19 #include <rte_cycles.h>
20 #endif
21
22 #include <rte_bbdev.h>
23 #include <rte_bbdev_pmd.h>
24 #include "rte_acc100_pmd.h"
25
26 #ifdef RTE_LIBRTE_BBDEV_DEBUG
27 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, DEBUG);
28 #else
29 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, NOTICE);
30 #endif
31
32 /* Write to MMIO register address */
33 static inline void
34 mmio_write(void *addr, uint32_t value)
35 {
36         *((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
37 }
38
39 /* Write a register of a ACC100 device */
40 static inline void
41 acc100_reg_write(struct acc100_device *d, uint32_t offset, uint32_t payload)
42 {
43         void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
44         mmio_write(reg_addr, payload);
45         usleep(ACC100_LONG_WAIT);
46 }
47
48 /* Read a register of a ACC100 device */
49 static inline uint32_t
50 acc100_reg_read(struct acc100_device *d, uint32_t offset)
51 {
52
53         void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
54         uint32_t ret = *((volatile uint32_t *)(reg_addr));
55         return rte_le_to_cpu_32(ret);
56 }
57
58 /* Basic Implementation of Log2 for exact 2^N */
59 static inline uint32_t
60 log2_basic(uint32_t value)
61 {
62         return (value == 0) ? 0 : rte_bsf32(value);
63 }
64
65 /* Calculate memory alignment offset assuming alignment is 2^N */
66 static inline uint32_t
67 calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)
68 {
69         rte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);
70         return (uint32_t)(alignment -
71                         (unaligned_phy_mem & (alignment-1)));
72 }
73
74 /* Calculate the offset of the enqueue register */
75 static inline uint32_t
76 queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)
77 {
78         if (pf_device)
79                 return ((vf_id << 12) + (qgrp_id << 7) + (aq_id << 3) +
80                                 HWPfQmgrIngressAq);
81         else
82                 return ((qgrp_id << 7) + (aq_id << 3) +
83                                 HWVfQmgrIngressAq);
84 }
85
86 enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, NUM_ACC};
87
88 /* Return the queue topology for a Queue Group Index */
89 static inline void
90 qtopFromAcc(struct rte_acc100_queue_topology **qtop, int acc_enum,
91                 struct rte_acc100_conf *acc100_conf)
92 {
93         struct rte_acc100_queue_topology *p_qtop;
94         p_qtop = NULL;
95         switch (acc_enum) {
96         case UL_4G:
97                 p_qtop = &(acc100_conf->q_ul_4g);
98                 break;
99         case UL_5G:
100                 p_qtop = &(acc100_conf->q_ul_5g);
101                 break;
102         case DL_4G:
103                 p_qtop = &(acc100_conf->q_dl_4g);
104                 break;
105         case DL_5G:
106                 p_qtop = &(acc100_conf->q_dl_5g);
107                 break;
108         default:
109                 /* NOTREACHED */
110                 rte_bbdev_log(ERR, "Unexpected error evaluating qtopFromAcc");
111                 break;
112         }
113         *qtop = p_qtop;
114 }
115
116 static void
117 initQTop(struct rte_acc100_conf *acc100_conf)
118 {
119         acc100_conf->q_ul_4g.num_aqs_per_groups = 0;
120         acc100_conf->q_ul_4g.num_qgroups = 0;
121         acc100_conf->q_ul_4g.first_qgroup_index = -1;
122         acc100_conf->q_ul_5g.num_aqs_per_groups = 0;
123         acc100_conf->q_ul_5g.num_qgroups = 0;
124         acc100_conf->q_ul_5g.first_qgroup_index = -1;
125         acc100_conf->q_dl_4g.num_aqs_per_groups = 0;
126         acc100_conf->q_dl_4g.num_qgroups = 0;
127         acc100_conf->q_dl_4g.first_qgroup_index = -1;
128         acc100_conf->q_dl_5g.num_aqs_per_groups = 0;
129         acc100_conf->q_dl_5g.num_qgroups = 0;
130         acc100_conf->q_dl_5g.first_qgroup_index = -1;
131 }
132
133 static inline void
134 updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,
135                 struct acc100_device *d) {
136         uint32_t reg;
137         struct rte_acc100_queue_topology *q_top = NULL;
138         qtopFromAcc(&q_top, acc, acc100_conf);
139         if (unlikely(q_top == NULL))
140                 return;
141         uint16_t aq;
142         q_top->num_qgroups++;
143         if (q_top->first_qgroup_index == -1) {
144                 q_top->first_qgroup_index = qg;
145                 /* Can be optimized to assume all are enabled by default */
146                 reg = acc100_reg_read(d, queue_offset(d->pf_device,
147                                 0, qg, ACC100_NUM_AQS - 1));
148                 if (reg & ACC100_QUEUE_ENABLE) {
149                         q_top->num_aqs_per_groups = ACC100_NUM_AQS;
150                         return;
151                 }
152                 q_top->num_aqs_per_groups = 0;
153                 for (aq = 0; aq < ACC100_NUM_AQS; aq++) {
154                         reg = acc100_reg_read(d, queue_offset(d->pf_device,
155                                         0, qg, aq));
156                         if (reg & ACC100_QUEUE_ENABLE)
157                                 q_top->num_aqs_per_groups++;
158                 }
159         }
160 }
161
162 /* Fetch configuration enabled for the PF/VF using MMIO Read (slow) */
163 static inline void
164 fetch_acc100_config(struct rte_bbdev *dev)
165 {
166         struct acc100_device *d = dev->data->dev_private;
167         struct rte_acc100_conf *acc100_conf = &d->acc100_conf;
168         const struct acc100_registry_addr *reg_addr;
169         uint8_t acc, qg;
170         uint32_t reg, reg_aq, reg_len0, reg_len1;
171         uint32_t reg_mode;
172
173         /* No need to retrieve the configuration is already done */
174         if (d->configured)
175                 return;
176
177         /* Choose correct registry addresses for the device type */
178         if (d->pf_device)
179                 reg_addr = &pf_reg_addr;
180         else
181                 reg_addr = &vf_reg_addr;
182
183         d->ddr_size = (1 + acc100_reg_read(d, reg_addr->ddr_range)) << 10;
184
185         /* Single VF Bundle by VF */
186         acc100_conf->num_vf_bundles = 1;
187         initQTop(acc100_conf);
188
189         struct rte_acc100_queue_topology *q_top = NULL;
190         int qman_func_id[ACC100_NUM_ACCS] = {ACC100_ACCMAP_0, ACC100_ACCMAP_1,
191                         ACC100_ACCMAP_2, ACC100_ACCMAP_3, ACC100_ACCMAP_4};
192         reg = acc100_reg_read(d, reg_addr->qman_group_func);
193         for (qg = 0; qg < ACC100_NUM_QGRPS_PER_WORD; qg++) {
194                 reg_aq = acc100_reg_read(d,
195                                 queue_offset(d->pf_device, 0, qg, 0));
196                 if (reg_aq & ACC100_QUEUE_ENABLE) {
197                         uint32_t idx = (reg >> (qg * 4)) & 0x7;
198                         if (idx < ACC100_NUM_ACCS) {
199                                 acc = qman_func_id[idx];
200                                 updateQtop(acc, qg, acc100_conf, d);
201                         }
202                 }
203         }
204
205         /* Check the depth of the AQs*/
206         reg_len0 = acc100_reg_read(d, reg_addr->depth_log0_offset);
207         reg_len1 = acc100_reg_read(d, reg_addr->depth_log1_offset);
208         for (acc = 0; acc < NUM_ACC; acc++) {
209                 qtopFromAcc(&q_top, acc, acc100_conf);
210                 if (q_top->first_qgroup_index < ACC100_NUM_QGRPS_PER_WORD)
211                         q_top->aq_depth_log2 = (reg_len0 >>
212                                         (q_top->first_qgroup_index * 4))
213                                         & 0xF;
214                 else
215                         q_top->aq_depth_log2 = (reg_len1 >>
216                                         ((q_top->first_qgroup_index -
217                                         ACC100_NUM_QGRPS_PER_WORD) * 4))
218                                         & 0xF;
219         }
220
221         /* Read PF mode */
222         if (d->pf_device) {
223                 reg_mode = acc100_reg_read(d, HWPfHiPfMode);
224                 acc100_conf->pf_mode_en = (reg_mode == ACC100_PF_VAL) ? 1 : 0;
225         }
226
227         rte_bbdev_log_debug(
228                         "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u\n",
229                         (d->pf_device) ? "PF" : "VF",
230                         (acc100_conf->input_pos_llr_1_bit) ? "POS" : "NEG",
231                         (acc100_conf->output_pos_llr_1_bit) ? "POS" : "NEG",
232                         acc100_conf->q_ul_4g.num_qgroups,
233                         acc100_conf->q_dl_4g.num_qgroups,
234                         acc100_conf->q_ul_5g.num_qgroups,
235                         acc100_conf->q_dl_5g.num_qgroups,
236                         acc100_conf->q_ul_4g.num_aqs_per_groups,
237                         acc100_conf->q_dl_4g.num_aqs_per_groups,
238                         acc100_conf->q_ul_5g.num_aqs_per_groups,
239                         acc100_conf->q_dl_5g.num_aqs_per_groups,
240                         acc100_conf->q_ul_4g.aq_depth_log2,
241                         acc100_conf->q_dl_4g.aq_depth_log2,
242                         acc100_conf->q_ul_5g.aq_depth_log2,
243                         acc100_conf->q_dl_5g.aq_depth_log2);
244 }
245
246 static void
247 free_base_addresses(void **base_addrs, int size)
248 {
249         int i;
250         for (i = 0; i < size; i++)
251                 rte_free(base_addrs[i]);
252 }
253
254 static inline uint32_t
255 get_desc_len(void)
256 {
257         return sizeof(union acc100_dma_desc);
258 }
259
260 /* Allocate the 2 * 64MB block for the sw rings */
261 static int
262 alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc100_device *d,
263                 int socket)
264 {
265         uint32_t sw_ring_size = ACC100_SIZE_64MBYTE;
266         d->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,
267                         2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);
268         if (d->sw_rings_base == NULL) {
269                 rte_bbdev_log(ERR, "Failed to allocate memory for %s:%u",
270                                 dev->device->driver->name,
271                                 dev->data->dev_id);
272                 return -ENOMEM;
273         }
274         uint32_t next_64mb_align_offset = calc_mem_alignment_offset(
275                         d->sw_rings_base, ACC100_SIZE_64MBYTE);
276         d->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);
277         d->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +
278                         next_64mb_align_offset;
279         d->sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
280         d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
281
282         return 0;
283 }
284
285 /* Attempt to allocate minimised memory space for sw rings */
286 static void
287 alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc100_device *d,
288                 uint16_t num_queues, int socket)
289 {
290         rte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;
291         uint32_t next_64mb_align_offset;
292         rte_iova_t sw_ring_iova_end_addr;
293         void *base_addrs[ACC100_SW_RING_MEM_ALLOC_ATTEMPTS];
294         void *sw_rings_base;
295         int i = 0;
296         uint32_t q_sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
297         uint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;
298
299         /* Find an aligned block of memory to store sw rings */
300         while (i < ACC100_SW_RING_MEM_ALLOC_ATTEMPTS) {
301                 /*
302                  * sw_ring allocated memory is guaranteed to be aligned to
303                  * q_sw_ring_size at the condition that the requested size is
304                  * less than the page size
305                  */
306                 sw_rings_base = rte_zmalloc_socket(
307                                 dev->device->driver->name,
308                                 dev_sw_ring_size, q_sw_ring_size, socket);
309
310                 if (sw_rings_base == NULL) {
311                         rte_bbdev_log(ERR,
312                                         "Failed to allocate memory for %s:%u",
313                                         dev->device->driver->name,
314                                         dev->data->dev_id);
315                         break;
316                 }
317
318                 sw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);
319                 next_64mb_align_offset = calc_mem_alignment_offset(
320                                 sw_rings_base, ACC100_SIZE_64MBYTE);
321                 next_64mb_align_addr_iova = sw_rings_base_iova +
322                                 next_64mb_align_offset;
323                 sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;
324
325                 /* Check if the end of the sw ring memory block is before the
326                  * start of next 64MB aligned mem address
327                  */
328                 if (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {
329                         d->sw_rings_iova = sw_rings_base_iova;
330                         d->sw_rings = sw_rings_base;
331                         d->sw_rings_base = sw_rings_base;
332                         d->sw_ring_size = q_sw_ring_size;
333                         d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
334                         break;
335                 }
336                 /* Store the address of the unaligned mem block */
337                 base_addrs[i] = sw_rings_base;
338                 i++;
339         }
340
341         /* Free all unaligned blocks of mem allocated in the loop */
342         free_base_addresses(base_addrs, i);
343 }
344
345
346 /* Allocate 64MB memory used for all software rings */
347 static int
348 acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
349 {
350         uint32_t phys_low, phys_high, payload;
351         struct acc100_device *d = dev->data->dev_private;
352         const struct acc100_registry_addr *reg_addr;
353
354         if (d->pf_device && !d->acc100_conf.pf_mode_en) {
355                 rte_bbdev_log(NOTICE,
356                                 "%s has PF mode disabled. This PF can't be used.",
357                                 dev->data->name);
358                 return -ENODEV;
359         }
360
361         alloc_sw_rings_min_mem(dev, d, num_queues, socket_id);
362
363         /* If minimal memory space approach failed, then allocate
364          * the 2 * 64MB block for the sw rings
365          */
366         if (d->sw_rings == NULL)
367                 alloc_2x64mb_sw_rings_mem(dev, d, socket_id);
368
369         if (d->sw_rings == NULL) {
370                 rte_bbdev_log(NOTICE,
371                                 "Failure allocating sw_rings memory");
372                 return -ENODEV;
373         }
374
375         /* Configure ACC100 with the base address for DMA descriptor rings
376          * Same descriptor rings used for UL and DL DMA Engines
377          * Note : Assuming only VF0 bundle is used for PF mode
378          */
379         phys_high = (uint32_t)(d->sw_rings_iova >> 32);
380         phys_low  = (uint32_t)(d->sw_rings_iova & ~(ACC100_SIZE_64MBYTE-1));
381
382         /* Choose correct registry addresses for the device type */
383         if (d->pf_device)
384                 reg_addr = &pf_reg_addr;
385         else
386                 reg_addr = &vf_reg_addr;
387
388         /* Read the populated cfg from ACC100 registers */
389         fetch_acc100_config(dev);
390
391         /* Release AXI from PF */
392         if (d->pf_device)
393                 acc100_reg_write(d, HWPfDmaAxiControl, 1);
394
395         acc100_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);
396         acc100_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);
397         acc100_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);
398         acc100_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);
399         acc100_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);
400         acc100_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);
401         acc100_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);
402         acc100_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);
403
404         /*
405          * Configure Ring Size to the max queue ring size
406          * (used for wrapping purpose)
407          */
408         payload = log2_basic(d->sw_ring_size / 64);
409         acc100_reg_write(d, reg_addr->ring_size, payload);
410
411         /* Configure tail pointer for use when SDONE enabled */
412         d->tail_ptrs = rte_zmalloc_socket(
413                         dev->device->driver->name,
414                         ACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t),
415                         RTE_CACHE_LINE_SIZE, socket_id);
416         if (d->tail_ptrs == NULL) {
417                 rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u",
418                                 dev->device->driver->name,
419                                 dev->data->dev_id);
420                 rte_free(d->sw_rings);
421                 return -ENOMEM;
422         }
423         d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs);
424
425         phys_high = (uint32_t)(d->tail_ptr_iova >> 32);
426         phys_low  = (uint32_t)(d->tail_ptr_iova);
427         acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);
428         acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);
429         acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);
430         acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);
431         acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);
432         acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);
433         acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);
434         acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);
435
436         d->harq_layout = rte_zmalloc_socket("HARQ Layout",
437                         ACC100_HARQ_LAYOUT * sizeof(*d->harq_layout),
438                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
439         if (d->harq_layout == NULL) {
440                 rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u",
441                                 dev->device->driver->name,
442                                 dev->data->dev_id);
443                 rte_free(d->sw_rings);
444                 return -ENOMEM;
445         }
446
447         /* Mark as configured properly */
448         d->configured = true;
449
450         rte_bbdev_log_debug(
451                         "ACC100 (%s) configured  sw_rings = %p, sw_rings_iova = %#"
452                         PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova);
453
454         return 0;
455 }
456
457 /* Free memory used for software rings */
458 static int
459 acc100_dev_close(struct rte_bbdev *dev)
460 {
461         struct acc100_device *d = dev->data->dev_private;
462         if (d->sw_rings_base != NULL) {
463                 rte_free(d->tail_ptrs);
464                 rte_free(d->sw_rings_base);
465                 d->sw_rings_base = NULL;
466         }
467         /* Ensure all in flight HW transactions are completed */
468         usleep(ACC100_LONG_WAIT);
469         return 0;
470 }
471
472 /**
473  * Report a ACC100 queue index which is free
474  * Return 0 to 16k for a valid queue_idx or -1 when no queue is available
475  * Note : Only supporting VF0 Bundle for PF mode
476  */
477 static int
478 acc100_find_free_queue_idx(struct rte_bbdev *dev,
479                 const struct rte_bbdev_queue_conf *conf)
480 {
481         struct acc100_device *d = dev->data->dev_private;
482         int op_2_acc[5] = {0, UL_4G, DL_4G, UL_5G, DL_5G};
483         int acc = op_2_acc[conf->op_type];
484         struct rte_acc100_queue_topology *qtop = NULL;
485
486         qtopFromAcc(&qtop, acc, &(d->acc100_conf));
487         if (qtop == NULL)
488                 return -1;
489         /* Identify matching QGroup Index which are sorted in priority order */
490         uint16_t group_idx = qtop->first_qgroup_index;
491         group_idx += conf->priority;
492         if (group_idx >= ACC100_NUM_QGRPS ||
493                         conf->priority >= qtop->num_qgroups) {
494                 rte_bbdev_log(INFO, "Invalid Priority on %s, priority %u",
495                                 dev->data->name, conf->priority);
496                 return -1;
497         }
498         /* Find a free AQ_idx  */
499         uint16_t aq_idx;
500         for (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {
501                 if (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {
502                         /* Mark the Queue as assigned */
503                         d->q_assigned_bit_map[group_idx] |= (1 << aq_idx);
504                         /* Report the AQ Index */
505                         return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx;
506                 }
507         }
508         rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u",
509                         dev->data->name, conf->priority);
510         return -1;
511 }
512
513 /* Setup ACC100 queue */
514 static int
515 acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
516                 const struct rte_bbdev_queue_conf *conf)
517 {
518         struct acc100_device *d = dev->data->dev_private;
519         struct acc100_queue *q;
520         int16_t q_idx;
521
522         /* Allocate the queue data structure. */
523         q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
524                         RTE_CACHE_LINE_SIZE, conf->socket);
525         if (q == NULL) {
526                 rte_bbdev_log(ERR, "Failed to allocate queue memory");
527                 return -ENOMEM;
528         }
529         if (d == NULL) {
530                 rte_bbdev_log(ERR, "Undefined device");
531                 return -ENODEV;
532         }
533
534         q->d = d;
535         q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
536         q->ring_addr_iova = d->sw_rings_iova + (d->sw_ring_size * queue_id);
537
538         /* Prepare the Ring with default descriptor format */
539         union acc100_dma_desc *desc = NULL;
540         unsigned int desc_idx, b_idx;
541         int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
542                 ACC100_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?
543                 ACC100_FCW_TD_BLEN : ACC100_FCW_LD_BLEN));
544
545         for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
546                 desc = q->ring_addr + desc_idx;
547                 desc->req.word0 = ACC100_DMA_DESC_TYPE;
548                 desc->req.word1 = 0; /**< Timestamp */
549                 desc->req.word2 = 0;
550                 desc->req.word3 = 0;
551                 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
552                 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
553                 desc->req.data_ptrs[0].blen = fcw_len;
554                 desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
555                 desc->req.data_ptrs[0].last = 0;
556                 desc->req.data_ptrs[0].dma_ext = 0;
557                 for (b_idx = 1; b_idx < ACC100_DMA_MAX_NUM_POINTERS - 1;
558                                 b_idx++) {
559                         desc->req.data_ptrs[b_idx].blkid = ACC100_DMA_BLKID_IN;
560                         desc->req.data_ptrs[b_idx].last = 1;
561                         desc->req.data_ptrs[b_idx].dma_ext = 0;
562                         b_idx++;
563                         desc->req.data_ptrs[b_idx].blkid =
564                                         ACC100_DMA_BLKID_OUT_ENC;
565                         desc->req.data_ptrs[b_idx].last = 1;
566                         desc->req.data_ptrs[b_idx].dma_ext = 0;
567                 }
568                 /* Preset some fields of LDPC FCW */
569                 desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
570                 desc->req.fcw_ld.gain_i = 1;
571                 desc->req.fcw_ld.gain_h = 1;
572         }
573
574         q->lb_in = rte_zmalloc_socket(dev->device->driver->name,
575                         RTE_CACHE_LINE_SIZE,
576                         RTE_CACHE_LINE_SIZE, conf->socket);
577         if (q->lb_in == NULL) {
578                 rte_bbdev_log(ERR, "Failed to allocate lb_in memory");
579                 rte_free(q);
580                 return -ENOMEM;
581         }
582         q->lb_in_addr_iova = rte_malloc_virt2iova(q->lb_in);
583         q->lb_out = rte_zmalloc_socket(dev->device->driver->name,
584                         RTE_CACHE_LINE_SIZE,
585                         RTE_CACHE_LINE_SIZE, conf->socket);
586         if (q->lb_out == NULL) {
587                 rte_bbdev_log(ERR, "Failed to allocate lb_out memory");
588                 rte_free(q->lb_in);
589                 rte_free(q);
590                 return -ENOMEM;
591         }
592         q->lb_out_addr_iova = rte_malloc_virt2iova(q->lb_out);
593
594         /*
595          * Software queue ring wraps synchronously with the HW when it reaches
596          * the boundary of the maximum allocated queue size, no matter what the
597          * sw queue size is. This wrapping is guarded by setting the wrap_mask
598          * to represent the maximum queue size as allocated at the time when
599          * the device has been setup (in configure()).
600          *
601          * The queue depth is set to the queue size value (conf->queue_size).
602          * This limits the occupancy of the queue at any point of time, so that
603          * the queue does not get swamped with enqueue requests.
604          */
605         q->sw_ring_depth = conf->queue_size;
606         q->sw_ring_wrap_mask = d->sw_ring_max_depth - 1;
607
608         q->op_type = conf->op_type;
609
610         q_idx = acc100_find_free_queue_idx(dev, conf);
611         if (q_idx == -1) {
612                 rte_free(q->lb_in);
613                 rte_free(q->lb_out);
614                 rte_free(q);
615                 return -1;
616         }
617
618         q->qgrp_id = (q_idx >> ACC100_GRP_ID_SHIFT) & 0xF;
619         q->vf_id = (q_idx >> ACC100_VF_ID_SHIFT)  & 0x3F;
620         q->aq_id = q_idx & 0xF;
621         q->aq_depth = (conf->op_type ==  RTE_BBDEV_OP_TURBO_DEC) ?
622                         (1 << d->acc100_conf.q_ul_4g.aq_depth_log2) :
623                         (1 << d->acc100_conf.q_dl_4g.aq_depth_log2);
624
625         q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,
626                         queue_offset(d->pf_device,
627                                         q->vf_id, q->qgrp_id, q->aq_id));
628
629         rte_bbdev_log_debug(
630                         "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p",
631                         dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id,
632                         q->aq_id, q->aq_depth, q->mmio_reg_enqueue);
633
634         dev->data->queues[queue_id].queue_private = q;
635         return 0;
636 }
637
638 /* Release ACC100 queue */
639 static int
640 acc100_queue_release(struct rte_bbdev *dev, uint16_t q_id)
641 {
642         struct acc100_device *d = dev->data->dev_private;
643         struct acc100_queue *q = dev->data->queues[q_id].queue_private;
644
645         if (q != NULL) {
646                 /* Mark the Queue as un-assigned */
647                 d->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -
648                                 (1 << q->aq_id));
649                 rte_free(q->lb_in);
650                 rte_free(q->lb_out);
651                 rte_free(q);
652                 dev->data->queues[q_id].queue_private = NULL;
653         }
654
655         return 0;
656 }
657
658 /* Get ACC100 device info */
659 static void
660 acc100_dev_info_get(struct rte_bbdev *dev,
661                 struct rte_bbdev_driver_info *dev_info)
662 {
663         struct acc100_device *d = dev->data->dev_private;
664
665         static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
666                 {
667                         .type   = RTE_BBDEV_OP_LDPC_ENC,
668                         .cap.ldpc_enc = {
669                                 .capability_flags =
670                                         RTE_BBDEV_LDPC_RATE_MATCH |
671                                         RTE_BBDEV_LDPC_CRC_24B_ATTACH |
672                                         RTE_BBDEV_LDPC_INTERLEAVER_BYPASS,
673                                 .num_buffers_src =
674                                                 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
675                                 .num_buffers_dst =
676                                                 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
677                         }
678                 },
679                 {
680                         .type   = RTE_BBDEV_OP_LDPC_DEC,
681                         .cap.ldpc_dec = {
682                         .capability_flags =
683                                 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
684                                 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
685                                 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
686                                 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
687 #ifdef ACC100_EXT_MEM
688                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
689                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
690 #endif
691                                 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
692                                 RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS |
693                                 RTE_BBDEV_LDPC_DECODE_BYPASS |
694                                 RTE_BBDEV_LDPC_DEC_SCATTER_GATHER |
695                                 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION |
696                                 RTE_BBDEV_LDPC_LLR_COMPRESSION,
697                         .llr_size = 8,
698                         .llr_decimals = 1,
699                         .num_buffers_src =
700                                         RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
701                         .num_buffers_hard_out =
702                                         RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
703                         .num_buffers_soft_out = 0,
704                         }
705                 },
706                 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
707         };
708
709         static struct rte_bbdev_queue_conf default_queue_conf;
710         default_queue_conf.socket = dev->data->socket_id;
711         default_queue_conf.queue_size = ACC100_MAX_QUEUE_DEPTH;
712
713         dev_info->driver_name = dev->device->driver->name;
714
715         /* Read and save the populated config from ACC100 registers */
716         fetch_acc100_config(dev);
717
718         /* This isn't ideal because it reports the maximum number of queues but
719          * does not provide info on how many can be uplink/downlink or different
720          * priorities
721          */
722         dev_info->max_num_queues =
723                         d->acc100_conf.q_dl_5g.num_aqs_per_groups *
724                         d->acc100_conf.q_dl_5g.num_qgroups +
725                         d->acc100_conf.q_ul_5g.num_aqs_per_groups *
726                         d->acc100_conf.q_ul_5g.num_qgroups +
727                         d->acc100_conf.q_dl_4g.num_aqs_per_groups *
728                         d->acc100_conf.q_dl_4g.num_qgroups +
729                         d->acc100_conf.q_ul_4g.num_aqs_per_groups *
730                         d->acc100_conf.q_ul_4g.num_qgroups;
731         dev_info->queue_size_lim = ACC100_MAX_QUEUE_DEPTH;
732         dev_info->hardware_accelerated = true;
733         dev_info->max_dl_queue_priority =
734                         d->acc100_conf.q_dl_4g.num_qgroups - 1;
735         dev_info->max_ul_queue_priority =
736                         d->acc100_conf.q_ul_4g.num_qgroups - 1;
737         dev_info->default_queue_conf = default_queue_conf;
738         dev_info->cpu_flag_reqs = NULL;
739         dev_info->min_alignment = 64;
740         dev_info->capabilities = bbdev_capabilities;
741 #ifdef ACC100_EXT_MEM
742         dev_info->harq_buffer_size = d->ddr_size;
743 #else
744         dev_info->harq_buffer_size = 0;
745 #endif
746 }
747
748
749 static const struct rte_bbdev_ops acc100_bbdev_ops = {
750         .setup_queues = acc100_setup_queues,
751         .close = acc100_dev_close,
752         .info_get = acc100_dev_info_get,
753         .queue_setup = acc100_queue_setup,
754         .queue_release = acc100_queue_release,
755 };
756
757 /* ACC100 PCI PF address map */
758 static struct rte_pci_id pci_id_acc100_pf_map[] = {
759         {
760                 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_PF_DEVICE_ID)
761         },
762         {.device_id = 0},
763 };
764
765 /* ACC100 PCI VF address map */
766 static struct rte_pci_id pci_id_acc100_vf_map[] = {
767         {
768                 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_VF_DEVICE_ID)
769         },
770         {.device_id = 0},
771 };
772
773 /* Read flag value 0/1 from bitmap */
774 static inline bool
775 check_bit(uint32_t bitmap, uint32_t bitmask)
776 {
777         return bitmap & bitmask;
778 }
779
780 static inline char *
781 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
782 {
783         if (unlikely(len > rte_pktmbuf_tailroom(m)))
784                 return NULL;
785
786         char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
787         m->data_len = (uint16_t)(m->data_len + len);
788         m_head->pkt_len  = (m_head->pkt_len + len);
789         return tail;
790 }
791
792 /* Compute value of k0.
793  * Based on 3GPP 38.212 Table 5.4.2.1-2
794  * Starting position of different redundancy versions, k0
795  */
796 static inline uint16_t
797 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
798 {
799         if (rv_index == 0)
800                 return 0;
801         uint16_t n = (bg == 1 ? ACC100_N_ZC_1 : ACC100_N_ZC_2) * z_c;
802         if (n_cb == n) {
803                 if (rv_index == 1)
804                         return (bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * z_c;
805                 else if (rv_index == 2)
806                         return (bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * z_c;
807                 else
808                         return (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c;
809         }
810         /* LBRM case - includes a division by N */
811         if (rv_index == 1)
812                 return (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb)
813                                 / n) * z_c;
814         else if (rv_index == 2)
815                 return (((bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * n_cb)
816                                 / n) * z_c;
817         else
818                 return (((bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * n_cb)
819                                 / n) * z_c;
820 }
821
822 /* Fill in a frame control word for LDPC encoding. */
823 static inline void
824 acc100_fcw_le_fill(const struct rte_bbdev_enc_op *op,
825                 struct acc100_fcw_le *fcw, int num_cb)
826 {
827         fcw->qm = op->ldpc_enc.q_m;
828         fcw->nfiller = op->ldpc_enc.n_filler;
829         fcw->BG = (op->ldpc_enc.basegraph - 1);
830         fcw->Zc = op->ldpc_enc.z_c;
831         fcw->ncb = op->ldpc_enc.n_cb;
832         fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,
833                         op->ldpc_enc.rv_index);
834         fcw->rm_e = op->ldpc_enc.cb_params.e;
835         fcw->crc_select = check_bit(op->ldpc_enc.op_flags,
836                         RTE_BBDEV_LDPC_CRC_24B_ATTACH);
837         fcw->bypass_intlv = check_bit(op->ldpc_enc.op_flags,
838                         RTE_BBDEV_LDPC_INTERLEAVER_BYPASS);
839         fcw->mcb_count = num_cb;
840 }
841
842 /* Fill in a frame control word for LDPC decoding. */
843 static inline void
844 acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,
845                 union acc100_harq_layout_data *harq_layout)
846 {
847         uint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;
848         uint16_t harq_index;
849         uint32_t l;
850         bool harq_prun = false;
851
852         fcw->qm = op->ldpc_dec.q_m;
853         fcw->nfiller = op->ldpc_dec.n_filler;
854         fcw->BG = (op->ldpc_dec.basegraph - 1);
855         fcw->Zc = op->ldpc_dec.z_c;
856         fcw->ncb = op->ldpc_dec.n_cb;
857         fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_dec.basegraph,
858                         op->ldpc_dec.rv_index);
859         if (op->ldpc_dec.code_block_mode == 1)
860                 fcw->rm_e = op->ldpc_dec.cb_params.e;
861         else
862                 fcw->rm_e = (op->ldpc_dec.tb_params.r <
863                                 op->ldpc_dec.tb_params.cab) ?
864                                                 op->ldpc_dec.tb_params.ea :
865                                                 op->ldpc_dec.tb_params.eb;
866
867         fcw->hcin_en = check_bit(op->ldpc_dec.op_flags,
868                         RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
869         fcw->hcout_en = check_bit(op->ldpc_dec.op_flags,
870                         RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
871         fcw->crc_select = check_bit(op->ldpc_dec.op_flags,
872                         RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
873         fcw->bypass_dec = check_bit(op->ldpc_dec.op_flags,
874                         RTE_BBDEV_LDPC_DECODE_BYPASS);
875         fcw->bypass_intlv = check_bit(op->ldpc_dec.op_flags,
876                         RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS);
877         if (op->ldpc_dec.q_m == 1) {
878                 fcw->bypass_intlv = 1;
879                 fcw->qm = 2;
880         }
881         fcw->hcin_decomp_mode = check_bit(op->ldpc_dec.op_flags,
882                         RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
883         fcw->hcout_comp_mode = check_bit(op->ldpc_dec.op_flags,
884                         RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
885         fcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,
886                         RTE_BBDEV_LDPC_LLR_COMPRESSION);
887         harq_index = op->ldpc_dec.harq_combined_output.offset /
888                         ACC100_HARQ_OFFSET;
889 #ifdef ACC100_EXT_MEM
890         /* Limit cases when HARQ pruning is valid */
891         harq_prun = ((op->ldpc_dec.harq_combined_output.offset %
892                         ACC100_HARQ_OFFSET) == 0) &&
893                         (op->ldpc_dec.harq_combined_output.offset <= UINT16_MAX
894                         * ACC100_HARQ_OFFSET);
895 #endif
896         if (fcw->hcin_en > 0) {
897                 harq_in_length = op->ldpc_dec.harq_combined_input.length;
898                 if (fcw->hcin_decomp_mode > 0)
899                         harq_in_length = harq_in_length * 8 / 6;
900                 harq_in_length = RTE_ALIGN(harq_in_length, 64);
901                 if ((harq_layout[harq_index].offset > 0) & harq_prun) {
902                         rte_bbdev_log_debug("HARQ IN offset unexpected for now\n");
903                         fcw->hcin_size0 = harq_layout[harq_index].size0;
904                         fcw->hcin_offset = harq_layout[harq_index].offset;
905                         fcw->hcin_size1 = harq_in_length -
906                                         harq_layout[harq_index].offset;
907                 } else {
908                         fcw->hcin_size0 = harq_in_length;
909                         fcw->hcin_offset = 0;
910                         fcw->hcin_size1 = 0;
911                 }
912         } else {
913                 fcw->hcin_size0 = 0;
914                 fcw->hcin_offset = 0;
915                 fcw->hcin_size1 = 0;
916         }
917
918         fcw->itmax = op->ldpc_dec.iter_max;
919         fcw->itstop = check_bit(op->ldpc_dec.op_flags,
920                         RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
921         fcw->synd_precoder = fcw->itstop;
922         /*
923          * These are all implicitly set
924          * fcw->synd_post = 0;
925          * fcw->so_en = 0;
926          * fcw->so_bypass_rm = 0;
927          * fcw->so_bypass_intlv = 0;
928          * fcw->dec_convllr = 0;
929          * fcw->hcout_convllr = 0;
930          * fcw->hcout_size1 = 0;
931          * fcw->so_it = 0;
932          * fcw->hcout_offset = 0;
933          * fcw->negstop_th = 0;
934          * fcw->negstop_it = 0;
935          * fcw->negstop_en = 0;
936          * fcw->gain_i = 1;
937          * fcw->gain_h = 1;
938          */
939         if (fcw->hcout_en > 0) {
940                 parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)
941                         * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
942                 k0_p = (fcw->k0 > parity_offset) ?
943                                 fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;
944                 ncb_p = fcw->ncb - op->ldpc_dec.n_filler;
945                 l = k0_p + fcw->rm_e;
946                 harq_out_length = (uint16_t) fcw->hcin_size0;
947                 harq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);
948                 harq_out_length = (harq_out_length + 0x3F) & 0xFFC0;
949                 if ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) &&
950                                 harq_prun) {
951                         fcw->hcout_size0 = (uint16_t) fcw->hcin_size0;
952                         fcw->hcout_offset = k0_p & 0xFFC0;
953                         fcw->hcout_size1 = harq_out_length - fcw->hcout_offset;
954                 } else {
955                         fcw->hcout_size0 = harq_out_length;
956                         fcw->hcout_size1 = 0;
957                         fcw->hcout_offset = 0;
958                 }
959                 harq_layout[harq_index].offset = fcw->hcout_offset;
960                 harq_layout[harq_index].size0 = fcw->hcout_size0;
961         } else {
962                 fcw->hcout_size0 = 0;
963                 fcw->hcout_size1 = 0;
964                 fcw->hcout_offset = 0;
965         }
966 }
967
968 /**
969  * Fills descriptor with data pointers of one block type.
970  *
971  * @param desc
972  *   Pointer to DMA descriptor.
973  * @param input
974  *   Pointer to pointer to input data which will be encoded. It can be changed
975  *   and points to next segment in scatter-gather case.
976  * @param offset
977  *   Input offset in rte_mbuf structure. It is used for calculating the point
978  *   where data is starting.
979  * @param cb_len
980  *   Length of currently processed Code Block
981  * @param seg_total_left
982  *   It indicates how many bytes still left in segment (mbuf) for further
983  *   processing.
984  * @param op_flags
985  *   Store information about device capabilities
986  * @param next_triplet
987  *   Index for ACC100 DMA Descriptor triplet
988  *
989  * @return
990  *   Returns index of next triplet on success, other value if lengths of
991  *   pkt and processed cb do not match.
992  *
993  */
994 static inline int
995 acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,
996                 struct rte_mbuf **input, uint32_t *offset, uint32_t cb_len,
997                 uint32_t *seg_total_left, int next_triplet)
998 {
999         uint32_t part_len;
1000         struct rte_mbuf *m = *input;
1001
1002         part_len = (*seg_total_left < cb_len) ? *seg_total_left : cb_len;
1003         cb_len -= part_len;
1004         *seg_total_left -= part_len;
1005
1006         desc->data_ptrs[next_triplet].address =
1007                         rte_pktmbuf_iova_offset(m, *offset);
1008         desc->data_ptrs[next_triplet].blen = part_len;
1009         desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
1010         desc->data_ptrs[next_triplet].last = 0;
1011         desc->data_ptrs[next_triplet].dma_ext = 0;
1012         *offset += part_len;
1013         next_triplet++;
1014
1015         while (cb_len > 0) {
1016                 if (next_triplet < ACC100_DMA_MAX_NUM_POINTERS &&
1017                                 m->next != NULL) {
1018
1019                         m = m->next;
1020                         *seg_total_left = rte_pktmbuf_data_len(m);
1021                         part_len = (*seg_total_left < cb_len) ?
1022                                         *seg_total_left :
1023                                         cb_len;
1024                         desc->data_ptrs[next_triplet].address =
1025                                         rte_pktmbuf_iova_offset(m, 0);
1026                         desc->data_ptrs[next_triplet].blen = part_len;
1027                         desc->data_ptrs[next_triplet].blkid =
1028                                         ACC100_DMA_BLKID_IN;
1029                         desc->data_ptrs[next_triplet].last = 0;
1030                         desc->data_ptrs[next_triplet].dma_ext = 0;
1031                         cb_len -= part_len;
1032                         *seg_total_left -= part_len;
1033                         /* Initializing offset for next segment (mbuf) */
1034                         *offset = part_len;
1035                         next_triplet++;
1036                 } else {
1037                         rte_bbdev_log(ERR,
1038                                 "Some data still left for processing: "
1039                                 "data_left: %u, next_triplet: %u, next_mbuf: %p",
1040                                 cb_len, next_triplet, m->next);
1041                         return -EINVAL;
1042                 }
1043         }
1044         /* Storing new mbuf as it could be changed in scatter-gather case*/
1045         *input = m;
1046
1047         return next_triplet;
1048 }
1049
1050 /* Fills descriptor with data pointers of one block type.
1051  * Returns index of next triplet on success, other value if lengths of
1052  * output data and processed mbuf do not match.
1053  */
1054 static inline int
1055 acc100_dma_fill_blk_type_out(struct acc100_dma_req_desc *desc,
1056                 struct rte_mbuf *output, uint32_t out_offset,
1057                 uint32_t output_len, int next_triplet, int blk_id)
1058 {
1059         desc->data_ptrs[next_triplet].address =
1060                         rte_pktmbuf_iova_offset(output, out_offset);
1061         desc->data_ptrs[next_triplet].blen = output_len;
1062         desc->data_ptrs[next_triplet].blkid = blk_id;
1063         desc->data_ptrs[next_triplet].last = 0;
1064         desc->data_ptrs[next_triplet].dma_ext = 0;
1065         next_triplet++;
1066
1067         return next_triplet;
1068 }
1069
1070 static inline void
1071 acc100_header_init(struct acc100_dma_req_desc *desc)
1072 {
1073         desc->word0 = ACC100_DMA_DESC_TYPE;
1074         desc->word1 = 0; /**< Timestamp could be disabled */
1075         desc->word2 = 0;
1076         desc->word3 = 0;
1077         desc->numCBs = 1;
1078 }
1079
1080 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1081 /* Check if any input data is unexpectedly left for processing */
1082 static inline int
1083 check_mbuf_total_left(uint32_t mbuf_total_left)
1084 {
1085         if (mbuf_total_left == 0)
1086                 return 0;
1087         rte_bbdev_log(ERR,
1088                 "Some date still left for processing: mbuf_total_left = %u",
1089                 mbuf_total_left);
1090         return -EINVAL;
1091 }
1092 #endif
1093
1094 static inline int
1095 acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,
1096                 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1097                 struct rte_mbuf *output, uint32_t *in_offset,
1098                 uint32_t *out_offset, uint32_t *out_length,
1099                 uint32_t *mbuf_total_left, uint32_t *seg_total_left)
1100 {
1101         int next_triplet = 1; /* FCW already done */
1102         uint16_t K, in_length_in_bits, in_length_in_bytes;
1103         struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1104
1105         acc100_header_init(desc);
1106
1107         K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1108         in_length_in_bits = K - enc->n_filler;
1109         if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) ||
1110                         (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH))
1111                 in_length_in_bits -= 24;
1112         in_length_in_bytes = in_length_in_bits >> 3;
1113
1114         if (unlikely((*mbuf_total_left == 0) ||
1115                         (*mbuf_total_left < in_length_in_bytes))) {
1116                 rte_bbdev_log(ERR,
1117                                 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1118                                 *mbuf_total_left, in_length_in_bytes);
1119                 return -1;
1120         }
1121
1122         next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1123                         in_length_in_bytes,
1124                         seg_total_left, next_triplet);
1125         if (unlikely(next_triplet < 0)) {
1126                 rte_bbdev_log(ERR,
1127                                 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1128                                 op);
1129                 return -1;
1130         }
1131         desc->data_ptrs[next_triplet - 1].last = 1;
1132         desc->m2dlen = next_triplet;
1133         *mbuf_total_left -= in_length_in_bytes;
1134
1135         /* Set output length */
1136         /* Integer round up division by 8 */
1137         *out_length = (enc->cb_params.e + 7) >> 3;
1138
1139         next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1140                         *out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1141         op->ldpc_enc.output.length += *out_length;
1142         *out_offset += *out_length;
1143         desc->data_ptrs[next_triplet - 1].last = 1;
1144         desc->data_ptrs[next_triplet - 1].dma_ext = 0;
1145         desc->d2mlen = next_triplet - desc->m2dlen;
1146
1147         desc->op_addr = op;
1148
1149         return 0;
1150 }
1151
1152 static inline int
1153 acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
1154                 struct acc100_dma_req_desc *desc,
1155                 struct rte_mbuf **input, struct rte_mbuf *h_output,
1156                 uint32_t *in_offset, uint32_t *h_out_offset,
1157                 uint32_t *h_out_length, uint32_t *mbuf_total_left,
1158                 uint32_t *seg_total_left,
1159                 struct acc100_fcw_ld *fcw)
1160 {
1161         struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1162         int next_triplet = 1; /* FCW already done */
1163         uint32_t input_length;
1164         uint16_t output_length, crc24_overlap = 0;
1165         uint16_t sys_cols, K, h_p_size, h_np_size;
1166         bool h_comp = check_bit(dec->op_flags,
1167                         RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1168
1169         acc100_header_init(desc);
1170
1171         if (check_bit(op->ldpc_dec.op_flags,
1172                         RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1173                 crc24_overlap = 24;
1174
1175         /* Compute some LDPC BG lengths */
1176         input_length = dec->cb_params.e;
1177         if (check_bit(op->ldpc_dec.op_flags,
1178                         RTE_BBDEV_LDPC_LLR_COMPRESSION))
1179                 input_length = (input_length * 3 + 3) / 4;
1180         sys_cols = (dec->basegraph == 1) ? 22 : 10;
1181         K = sys_cols * dec->z_c;
1182         output_length = K - dec->n_filler - crc24_overlap;
1183
1184         if (unlikely((*mbuf_total_left == 0) ||
1185                         (*mbuf_total_left < input_length))) {
1186                 rte_bbdev_log(ERR,
1187                                 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1188                                 *mbuf_total_left, input_length);
1189                 return -1;
1190         }
1191
1192         next_triplet = acc100_dma_fill_blk_type_in(desc, input,
1193                         in_offset, input_length,
1194                         seg_total_left, next_triplet);
1195
1196         if (unlikely(next_triplet < 0)) {
1197                 rte_bbdev_log(ERR,
1198                                 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1199                                 op);
1200                 return -1;
1201         }
1202
1203         if (check_bit(op->ldpc_dec.op_flags,
1204                                 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1205                 h_p_size = fcw->hcin_size0 + fcw->hcin_size1;
1206                 if (h_comp)
1207                         h_p_size = (h_p_size * 3 + 3) / 4;
1208                 desc->data_ptrs[next_triplet].address =
1209                                 dec->harq_combined_input.offset;
1210                 desc->data_ptrs[next_triplet].blen = h_p_size;
1211                 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN_HARQ;
1212                 desc->data_ptrs[next_triplet].dma_ext = 1;
1213 #ifndef ACC100_EXT_MEM
1214                 acc100_dma_fill_blk_type_out(
1215                                 desc,
1216                                 op->ldpc_dec.harq_combined_input.data,
1217                                 op->ldpc_dec.harq_combined_input.offset,
1218                                 h_p_size,
1219                                 next_triplet,
1220                                 ACC100_DMA_BLKID_IN_HARQ);
1221 #endif
1222                 next_triplet++;
1223         }
1224
1225         desc->data_ptrs[next_triplet - 1].last = 1;
1226         desc->m2dlen = next_triplet;
1227         *mbuf_total_left -= input_length;
1228
1229         next_triplet = acc100_dma_fill_blk_type_out(desc, h_output,
1230                         *h_out_offset, output_length >> 3, next_triplet,
1231                         ACC100_DMA_BLKID_OUT_HARD);
1232
1233         if (check_bit(op->ldpc_dec.op_flags,
1234                                 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1235                 /* Pruned size of the HARQ */
1236                 h_p_size = fcw->hcout_size0 + fcw->hcout_size1;
1237                 /* Non-Pruned size of the HARQ */
1238                 h_np_size = fcw->hcout_offset > 0 ?
1239                                 fcw->hcout_offset + fcw->hcout_size1 :
1240                                 h_p_size;
1241                 if (h_comp) {
1242                         h_np_size = (h_np_size * 3 + 3) / 4;
1243                         h_p_size = (h_p_size * 3 + 3) / 4;
1244                 }
1245                 dec->harq_combined_output.length = h_np_size;
1246                 desc->data_ptrs[next_triplet].address =
1247                                 dec->harq_combined_output.offset;
1248                 desc->data_ptrs[next_triplet].blen = h_p_size;
1249                 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARQ;
1250                 desc->data_ptrs[next_triplet].dma_ext = 1;
1251 #ifndef ACC100_EXT_MEM
1252                 acc100_dma_fill_blk_type_out(
1253                                 desc,
1254                                 dec->harq_combined_output.data,
1255                                 dec->harq_combined_output.offset,
1256                                 h_p_size,
1257                                 next_triplet,
1258                                 ACC100_DMA_BLKID_OUT_HARQ);
1259 #endif
1260                 next_triplet++;
1261         }
1262
1263         *h_out_length = output_length >> 3;
1264         dec->hard_output.length += *h_out_length;
1265         *h_out_offset += *h_out_length;
1266         desc->data_ptrs[next_triplet - 1].last = 1;
1267         desc->d2mlen = next_triplet - desc->m2dlen;
1268
1269         desc->op_addr = op;
1270
1271         return 0;
1272 }
1273
1274 static inline void
1275 acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,
1276                 struct acc100_dma_req_desc *desc,
1277                 struct rte_mbuf *input, struct rte_mbuf *h_output,
1278                 uint32_t *in_offset, uint32_t *h_out_offset,
1279                 uint32_t *h_out_length,
1280                 union acc100_harq_layout_data *harq_layout)
1281 {
1282         int next_triplet = 1; /* FCW already done */
1283         desc->data_ptrs[next_triplet].address =
1284                         rte_pktmbuf_iova_offset(input, *in_offset);
1285         next_triplet++;
1286
1287         if (check_bit(op->ldpc_dec.op_flags,
1288                                 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1289                 struct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;
1290                 desc->data_ptrs[next_triplet].address = hi.offset;
1291 #ifndef ACC100_EXT_MEM
1292                 desc->data_ptrs[next_triplet].address =
1293                                 rte_pktmbuf_iova_offset(hi.data, hi.offset);
1294 #endif
1295                 next_triplet++;
1296         }
1297
1298         desc->data_ptrs[next_triplet].address =
1299                         rte_pktmbuf_iova_offset(h_output, *h_out_offset);
1300         *h_out_length = desc->data_ptrs[next_triplet].blen;
1301         next_triplet++;
1302
1303         if (check_bit(op->ldpc_dec.op_flags,
1304                                 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1305                 desc->data_ptrs[next_triplet].address =
1306                                 op->ldpc_dec.harq_combined_output.offset;
1307                 /* Adjust based on previous operation */
1308                 struct rte_bbdev_dec_op *prev_op = desc->op_addr;
1309                 op->ldpc_dec.harq_combined_output.length =
1310                                 prev_op->ldpc_dec.harq_combined_output.length;
1311                 int16_t hq_idx = op->ldpc_dec.harq_combined_output.offset /
1312                                 ACC100_HARQ_OFFSET;
1313                 int16_t prev_hq_idx =
1314                                 prev_op->ldpc_dec.harq_combined_output.offset
1315                                 / ACC100_HARQ_OFFSET;
1316                 harq_layout[hq_idx].val = harq_layout[prev_hq_idx].val;
1317 #ifndef ACC100_EXT_MEM
1318                 struct rte_bbdev_op_data ho =
1319                                 op->ldpc_dec.harq_combined_output;
1320                 desc->data_ptrs[next_triplet].address =
1321                                 rte_pktmbuf_iova_offset(ho.data, ho.offset);
1322 #endif
1323                 next_triplet++;
1324         }
1325
1326         op->ldpc_dec.hard_output.length += *h_out_length;
1327         desc->op_addr = op;
1328 }
1329
1330
1331 /* Enqueue a number of operations to HW and update software rings */
1332 static inline void
1333 acc100_dma_enqueue(struct acc100_queue *q, uint16_t n,
1334                 struct rte_bbdev_stats *queue_stats)
1335 {
1336         union acc100_enqueue_reg_fmt enq_req;
1337 #ifdef RTE_BBDEV_OFFLOAD_COST
1338         uint64_t start_time = 0;
1339         queue_stats->acc_offload_cycles = 0;
1340 #else
1341         RTE_SET_USED(queue_stats);
1342 #endif
1343
1344         enq_req.val = 0;
1345         /* Setting offset, 100b for 256 DMA Desc */
1346         enq_req.addr_offset = ACC100_DESC_OFFSET;
1347
1348         /* Split ops into batches */
1349         do {
1350                 union acc100_dma_desc *desc;
1351                 uint16_t enq_batch_size;
1352                 uint64_t offset;
1353                 rte_iova_t req_elem_addr;
1354
1355                 enq_batch_size = RTE_MIN(n, MAX_ENQ_BATCH_SIZE);
1356
1357                 /* Set flag on last descriptor in a batch */
1358                 desc = q->ring_addr + ((q->sw_ring_head + enq_batch_size - 1) &
1359                                 q->sw_ring_wrap_mask);
1360                 desc->req.last_desc_in_batch = 1;
1361
1362                 /* Calculate the 1st descriptor's address */
1363                 offset = ((q->sw_ring_head & q->sw_ring_wrap_mask) *
1364                                 sizeof(union acc100_dma_desc));
1365                 req_elem_addr = q->ring_addr_iova + offset;
1366
1367                 /* Fill enqueue struct */
1368                 enq_req.num_elem = enq_batch_size;
1369                 /* low 6 bits are not needed */
1370                 enq_req.req_elem_addr = (uint32_t)(req_elem_addr >> 6);
1371
1372 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1373                 rte_memdump(stderr, "Req sdone", desc, sizeof(*desc));
1374 #endif
1375                 rte_bbdev_log_debug(
1376                                 "Enqueue %u reqs (phys %#"PRIx64") to reg %p",
1377                                 enq_batch_size,
1378                                 req_elem_addr,
1379                                 (void *)q->mmio_reg_enqueue);
1380
1381                 rte_wmb();
1382
1383 #ifdef RTE_BBDEV_OFFLOAD_COST
1384                 /* Start time measurement for enqueue function offload. */
1385                 start_time = rte_rdtsc_precise();
1386 #endif
1387                 rte_bbdev_log(DEBUG, "Debug : MMIO Enqueue");
1388                 mmio_write(q->mmio_reg_enqueue, enq_req.val);
1389
1390 #ifdef RTE_BBDEV_OFFLOAD_COST
1391                 queue_stats->acc_offload_cycles +=
1392                                 rte_rdtsc_precise() - start_time;
1393 #endif
1394
1395                 q->aq_enqueued++;
1396                 q->sw_ring_head += enq_batch_size;
1397                 n -= enq_batch_size;
1398
1399         } while (n);
1400
1401
1402 }
1403
1404 /* Enqueue one encode operations for ACC100 device in CB mode */
1405 static inline int
1406 enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,
1407                 uint16_t total_enqueued_cbs, int16_t num)
1408 {
1409         union acc100_dma_desc *desc = NULL;
1410         uint32_t out_length;
1411         struct rte_mbuf *output_head, *output;
1412         int i, next_triplet;
1413         uint16_t  in_length_in_bytes;
1414         struct rte_bbdev_op_ldpc_enc *enc = &ops[0]->ldpc_enc;
1415
1416         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1417                         & q->sw_ring_wrap_mask);
1418         desc = q->ring_addr + desc_idx;
1419         acc100_fcw_le_fill(ops[0], &desc->req.fcw_le, num);
1420
1421         /** This could be done at polling */
1422         desc->req.word0 = ACC100_DMA_DESC_TYPE;
1423         desc->req.word1 = 0; /**< Timestamp could be disabled */
1424         desc->req.word2 = 0;
1425         desc->req.word3 = 0;
1426         desc->req.numCBs = num;
1427
1428         in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;
1429         out_length = (enc->cb_params.e + 7) >> 3;
1430         desc->req.m2dlen = 1 + num;
1431         desc->req.d2mlen = num;
1432         next_triplet = 1;
1433
1434         for (i = 0; i < num; i++) {
1435                 desc->req.data_ptrs[next_triplet].address =
1436                         rte_pktmbuf_iova_offset(ops[i]->ldpc_enc.input.data, 0);
1437                 desc->req.data_ptrs[next_triplet].blen = in_length_in_bytes;
1438                 next_triplet++;
1439                 desc->req.data_ptrs[next_triplet].address =
1440                                 rte_pktmbuf_iova_offset(
1441                                 ops[i]->ldpc_enc.output.data, 0);
1442                 desc->req.data_ptrs[next_triplet].blen = out_length;
1443                 next_triplet++;
1444                 ops[i]->ldpc_enc.output.length = out_length;
1445                 output_head = output = ops[i]->ldpc_enc.output.data;
1446                 mbuf_append(output_head, output, out_length);
1447                 output->data_len = out_length;
1448         }
1449
1450         desc->req.op_addr = ops[0];
1451
1452 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1453         rte_memdump(stderr, "FCW", &desc->req.fcw_le,
1454                         sizeof(desc->req.fcw_le) - 8);
1455         rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1456 #endif
1457
1458         /* One CB (one op) was successfully prepared to enqueue */
1459         return num;
1460 }
1461
1462 /* Enqueue one encode operations for ACC100 device in CB mode */
1463 static inline int
1464 enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
1465                 uint16_t total_enqueued_cbs)
1466 {
1467         union acc100_dma_desc *desc = NULL;
1468         int ret;
1469         uint32_t in_offset, out_offset, out_length, mbuf_total_left,
1470                 seg_total_left;
1471         struct rte_mbuf *input, *output_head, *output;
1472
1473         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1474                         & q->sw_ring_wrap_mask);
1475         desc = q->ring_addr + desc_idx;
1476         acc100_fcw_le_fill(op, &desc->req.fcw_le, 1);
1477
1478         input = op->ldpc_enc.input.data;
1479         output_head = output = op->ldpc_enc.output.data;
1480         in_offset = op->ldpc_enc.input.offset;
1481         out_offset = op->ldpc_enc.output.offset;
1482         out_length = 0;
1483         mbuf_total_left = op->ldpc_enc.input.length;
1484         seg_total_left = rte_pktmbuf_data_len(op->ldpc_enc.input.data)
1485                         - in_offset;
1486
1487         ret = acc100_dma_desc_le_fill(op, &desc->req, &input, output,
1488                         &in_offset, &out_offset, &out_length, &mbuf_total_left,
1489                         &seg_total_left);
1490
1491         if (unlikely(ret < 0))
1492                 return ret;
1493
1494         mbuf_append(output_head, output, out_length);
1495
1496 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1497         rte_memdump(stderr, "FCW", &desc->req.fcw_le,
1498                         sizeof(desc->req.fcw_le) - 8);
1499         rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1500
1501         if (check_mbuf_total_left(mbuf_total_left) != 0)
1502                 return -EINVAL;
1503 #endif
1504         /* One CB (one op) was successfully prepared to enqueue */
1505         return 1;
1506 }
1507
1508 /** Enqueue one decode operations for ACC100 device in CB mode */
1509 static inline int
1510 enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
1511                 uint16_t total_enqueued_cbs, bool same_op)
1512 {
1513         int ret;
1514
1515         union acc100_dma_desc *desc;
1516         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1517                         & q->sw_ring_wrap_mask);
1518         desc = q->ring_addr + desc_idx;
1519         struct rte_mbuf *input, *h_output_head, *h_output;
1520         uint32_t in_offset, h_out_offset, mbuf_total_left, h_out_length = 0;
1521         input = op->ldpc_dec.input.data;
1522         h_output_head = h_output = op->ldpc_dec.hard_output.data;
1523         in_offset = op->ldpc_dec.input.offset;
1524         h_out_offset = op->ldpc_dec.hard_output.offset;
1525         mbuf_total_left = op->ldpc_dec.input.length;
1526 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1527         if (unlikely(input == NULL)) {
1528                 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1529                 return -EFAULT;
1530         }
1531 #endif
1532         union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
1533
1534         if (same_op) {
1535                 union acc100_dma_desc *prev_desc;
1536                 desc_idx = ((q->sw_ring_head + total_enqueued_cbs - 1)
1537                                 & q->sw_ring_wrap_mask);
1538                 prev_desc = q->ring_addr + desc_idx;
1539                 uint8_t *prev_ptr = (uint8_t *) prev_desc;
1540                 uint8_t *new_ptr = (uint8_t *) desc;
1541                 /* Copy first 4 words and BDESCs */
1542                 rte_memcpy(new_ptr, prev_ptr, ACC100_5GUL_SIZE_0);
1543                 rte_memcpy(new_ptr + ACC100_5GUL_OFFSET_0,
1544                                 prev_ptr + ACC100_5GUL_OFFSET_0,
1545                                 ACC100_5GUL_SIZE_1);
1546                 desc->req.op_addr = prev_desc->req.op_addr;
1547                 /* Copy FCW */
1548                 rte_memcpy(new_ptr + ACC100_DESC_FCW_OFFSET,
1549                                 prev_ptr + ACC100_DESC_FCW_OFFSET,
1550                                 ACC100_FCW_LD_BLEN);
1551                 acc100_dma_desc_ld_update(op, &desc->req, input, h_output,
1552                                 &in_offset, &h_out_offset,
1553                                 &h_out_length, harq_layout);
1554         } else {
1555                 struct acc100_fcw_ld *fcw;
1556                 uint32_t seg_total_left;
1557                 fcw = &desc->req.fcw_ld;
1558                 acc100_fcw_ld_fill(op, fcw, harq_layout);
1559
1560                 /* Special handling when overusing mbuf */
1561                 if (fcw->rm_e < ACC100_MAX_E_MBUF)
1562                         seg_total_left = rte_pktmbuf_data_len(input)
1563                                         - in_offset;
1564                 else
1565                         seg_total_left = fcw->rm_e;
1566
1567                 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input, h_output,
1568                                 &in_offset, &h_out_offset,
1569                                 &h_out_length, &mbuf_total_left,
1570                                 &seg_total_left, fcw);
1571                 if (unlikely(ret < 0))
1572                         return ret;
1573         }
1574
1575         /* Hard output */
1576         mbuf_append(h_output_head, h_output, h_out_length);
1577 #ifndef ACC100_EXT_MEM
1578         if (op->ldpc_dec.harq_combined_output.length > 0) {
1579                 /* Push the HARQ output into host memory */
1580                 struct rte_mbuf *hq_output_head, *hq_output;
1581                 hq_output_head = op->ldpc_dec.harq_combined_output.data;
1582                 hq_output = op->ldpc_dec.harq_combined_output.data;
1583                 mbuf_append(hq_output_head, hq_output,
1584                                 op->ldpc_dec.harq_combined_output.length);
1585         }
1586 #endif
1587
1588 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1589         rte_memdump(stderr, "FCW", &desc->req.fcw_ld,
1590                         sizeof(desc->req.fcw_ld) - 8);
1591         rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1592 #endif
1593
1594         /* One CB (one op) was successfully prepared to enqueue */
1595         return 1;
1596 }
1597
1598
1599 /* Enqueue one decode operations for ACC100 device in TB mode */
1600 static inline int
1601 enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
1602                 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
1603 {
1604         union acc100_dma_desc *desc = NULL;
1605         int ret;
1606         uint8_t r, c;
1607         uint32_t in_offset, h_out_offset,
1608                 h_out_length, mbuf_total_left, seg_total_left;
1609         struct rte_mbuf *input, *h_output_head, *h_output;
1610         uint16_t current_enqueued_cbs = 0;
1611
1612         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1613                         & q->sw_ring_wrap_mask);
1614         desc = q->ring_addr + desc_idx;
1615         uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
1616         union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
1617         acc100_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout);
1618
1619         input = op->ldpc_dec.input.data;
1620         h_output_head = h_output = op->ldpc_dec.hard_output.data;
1621         in_offset = op->ldpc_dec.input.offset;
1622         h_out_offset = op->ldpc_dec.hard_output.offset;
1623         h_out_length = 0;
1624         mbuf_total_left = op->ldpc_dec.input.length;
1625         c = op->ldpc_dec.tb_params.c;
1626         r = op->ldpc_dec.tb_params.r;
1627
1628         while (mbuf_total_left > 0 && r < c) {
1629
1630                 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
1631
1632                 /* Set up DMA descriptor */
1633                 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
1634                                 & q->sw_ring_wrap_mask);
1635                 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
1636                 desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
1637                 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input,
1638                                 h_output, &in_offset, &h_out_offset,
1639                                 &h_out_length,
1640                                 &mbuf_total_left, &seg_total_left,
1641                                 &desc->req.fcw_ld);
1642
1643                 if (unlikely(ret < 0))
1644                         return ret;
1645
1646                 /* Hard output */
1647                 mbuf_append(h_output_head, h_output, h_out_length);
1648
1649                 /* Set total number of CBs in TB */
1650                 desc->req.cbs_in_tb = cbs_in_tb;
1651 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1652                 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
1653                                 sizeof(desc->req.fcw_td) - 8);
1654                 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1655 #endif
1656
1657                 if (seg_total_left == 0) {
1658                         /* Go to the next mbuf */
1659                         input = input->next;
1660                         in_offset = 0;
1661                         h_output = h_output->next;
1662                         h_out_offset = 0;
1663                 }
1664                 total_enqueued_cbs++;
1665                 current_enqueued_cbs++;
1666                 r++;
1667         }
1668
1669         if (unlikely(desc == NULL))
1670                 return current_enqueued_cbs;
1671
1672 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1673         if (check_mbuf_total_left(mbuf_total_left) != 0)
1674                 return -EINVAL;
1675 #endif
1676         /* Set SDone on last CB descriptor for TB mode */
1677         desc->req.sdone_enable = 1;
1678         desc->req.irq_enable = q->irq_enable;
1679
1680         return current_enqueued_cbs;
1681 }
1682
1683
1684 /* Calculates number of CBs in processed encoder TB based on 'r' and input
1685  * length.
1686  */
1687 static inline uint8_t
1688 get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)
1689 {
1690         uint8_t c, c_neg, r, crc24_bits = 0;
1691         uint16_t k, k_neg, k_pos;
1692         uint8_t cbs_in_tb = 0;
1693         int32_t length;
1694
1695         length = turbo_enc->input.length;
1696         r = turbo_enc->tb_params.r;
1697         c = turbo_enc->tb_params.c;
1698         c_neg = turbo_enc->tb_params.c_neg;
1699         k_neg = turbo_enc->tb_params.k_neg;
1700         k_pos = turbo_enc->tb_params.k_pos;
1701         crc24_bits = 0;
1702         if (check_bit(turbo_enc->op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
1703                 crc24_bits = 24;
1704         while (length > 0 && r < c) {
1705                 k = (r < c_neg) ? k_neg : k_pos;
1706                 length -= (k - crc24_bits) >> 3;
1707                 r++;
1708                 cbs_in_tb++;
1709         }
1710
1711         return cbs_in_tb;
1712 }
1713
1714 /* Calculates number of CBs in processed decoder TB based on 'r' and input
1715  * length.
1716  */
1717 static inline uint16_t
1718 get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)
1719 {
1720         uint8_t c, c_neg, r = 0;
1721         uint16_t kw, k, k_neg, k_pos, cbs_in_tb = 0;
1722         int32_t length;
1723
1724         length = turbo_dec->input.length;
1725         r = turbo_dec->tb_params.r;
1726         c = turbo_dec->tb_params.c;
1727         c_neg = turbo_dec->tb_params.c_neg;
1728         k_neg = turbo_dec->tb_params.k_neg;
1729         k_pos = turbo_dec->tb_params.k_pos;
1730         while (length > 0 && r < c) {
1731                 k = (r < c_neg) ? k_neg : k_pos;
1732                 kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
1733                 length -= kw;
1734                 r++;
1735                 cbs_in_tb++;
1736         }
1737
1738         return cbs_in_tb;
1739 }
1740
1741 /* Calculates number of CBs in processed decoder TB based on 'r' and input
1742  * length.
1743  */
1744 static inline uint16_t
1745 get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)
1746 {
1747         uint16_t r, cbs_in_tb = 0;
1748         int32_t length = ldpc_dec->input.length;
1749         r = ldpc_dec->tb_params.r;
1750         while (length > 0 && r < ldpc_dec->tb_params.c) {
1751                 length -=  (r < ldpc_dec->tb_params.cab) ?
1752                                 ldpc_dec->tb_params.ea :
1753                                 ldpc_dec->tb_params.eb;
1754                 r++;
1755                 cbs_in_tb++;
1756         }
1757         return cbs_in_tb;
1758 }
1759
1760 /* Check we can mux encode operations with common FCW */
1761 static inline bool
1762 check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {
1763         uint16_t i;
1764         if (num <= 1)
1765                 return false;
1766         for (i = 1; i < num; ++i) {
1767                 /* Only mux compatible code blocks */
1768                 if (memcmp((uint8_t *)(&ops[i]->ldpc_enc) + ACC100_ENC_OFFSET,
1769                                 (uint8_t *)(&ops[0]->ldpc_enc) +
1770                                 ACC100_ENC_OFFSET,
1771                                 ACC100_CMP_ENC_SIZE) != 0)
1772                         return false;
1773         }
1774         return true;
1775 }
1776
1777 /** Enqueue encode operations for ACC100 device in CB mode. */
1778 static inline uint16_t
1779 acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
1780                 struct rte_bbdev_enc_op **ops, uint16_t num)
1781 {
1782         struct acc100_queue *q = q_data->queue_private;
1783         int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
1784         uint16_t i = 0;
1785         union acc100_dma_desc *desc;
1786         int ret, desc_idx = 0;
1787         int16_t enq, left = num;
1788
1789         while (left > 0) {
1790                 if (unlikely(avail < 1))
1791                         break;
1792                 avail--;
1793                 enq = RTE_MIN(left, ACC100_MUX_5GDL_DESC);
1794                 if (check_mux(&ops[i], enq)) {
1795                         ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],
1796                                         desc_idx, enq);
1797                         if (ret < 0)
1798                                 break;
1799                         i += enq;
1800                 } else {
1801                         ret = enqueue_ldpc_enc_one_op_cb(q, ops[i], desc_idx);
1802                         if (ret < 0)
1803                                 break;
1804                         i++;
1805                 }
1806                 desc_idx++;
1807                 left = num - i;
1808         }
1809
1810         if (unlikely(i == 0))
1811                 return 0; /* Nothing to enqueue */
1812
1813         /* Set SDone in last CB in enqueued ops for CB mode*/
1814         desc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1)
1815                         & q->sw_ring_wrap_mask);
1816         desc->req.sdone_enable = 1;
1817         desc->req.irq_enable = q->irq_enable;
1818
1819         acc100_dma_enqueue(q, desc_idx, &q_data->queue_stats);
1820
1821         /* Update stats */
1822         q_data->queue_stats.enqueued_count += i;
1823         q_data->queue_stats.enqueue_err_count += num - i;
1824
1825         return i;
1826 }
1827
1828 /* Enqueue encode operations for ACC100 device. */
1829 static uint16_t
1830 acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1831                 struct rte_bbdev_enc_op **ops, uint16_t num)
1832 {
1833         if (unlikely(num == 0))
1834                 return 0;
1835         return acc100_enqueue_ldpc_enc_cb(q_data, ops, num);
1836 }
1837
1838 /* Check we can mux encode operations with common FCW */
1839 static inline bool
1840 cmp_ldpc_dec_op(struct rte_bbdev_dec_op **ops) {
1841         /* Only mux compatible code blocks */
1842         if (memcmp((uint8_t *)(&ops[0]->ldpc_dec) + ACC100_DEC_OFFSET,
1843                         (uint8_t *)(&ops[1]->ldpc_dec) +
1844                         ACC100_DEC_OFFSET, ACC100_CMP_DEC_SIZE) != 0) {
1845                 return false;
1846         } else
1847                 return true;
1848 }
1849
1850
1851 /* Enqueue decode operations for ACC100 device in TB mode */
1852 static uint16_t
1853 acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,
1854                 struct rte_bbdev_dec_op **ops, uint16_t num)
1855 {
1856         struct acc100_queue *q = q_data->queue_private;
1857         int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
1858         uint16_t i, enqueued_cbs = 0;
1859         uint8_t cbs_in_tb;
1860         int ret;
1861
1862         for (i = 0; i < num; ++i) {
1863                 cbs_in_tb = get_num_cbs_in_tb_ldpc_dec(&ops[i]->ldpc_dec);
1864                 /* Check if there are available space for further processing */
1865                 if (unlikely(avail - cbs_in_tb < 0))
1866                         break;
1867                 avail -= cbs_in_tb;
1868
1869                 ret = enqueue_ldpc_dec_one_op_tb(q, ops[i],
1870                                 enqueued_cbs, cbs_in_tb);
1871                 if (ret < 0)
1872                         break;
1873                 enqueued_cbs += ret;
1874         }
1875
1876         acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
1877
1878         /* Update stats */
1879         q_data->queue_stats.enqueued_count += i;
1880         q_data->queue_stats.enqueue_err_count += num - i;
1881         return i;
1882 }
1883
1884 /* Enqueue decode operations for ACC100 device in CB mode */
1885 static uint16_t
1886 acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
1887                 struct rte_bbdev_dec_op **ops, uint16_t num)
1888 {
1889         struct acc100_queue *q = q_data->queue_private;
1890         int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
1891         uint16_t i;
1892         union acc100_dma_desc *desc;
1893         int ret;
1894         bool same_op = false;
1895         for (i = 0; i < num; ++i) {
1896                 /* Check if there are available space for further processing */
1897                 if (unlikely(avail < 1))
1898                         break;
1899                 avail -= 1;
1900
1901                 if (i > 0)
1902                         same_op = cmp_ldpc_dec_op(&ops[i-1]);
1903                 rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n",
1904                         i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index,
1905                         ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count,
1906                         ops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c,
1907                         ops[i]->ldpc_dec.n_cb, ops[i]->ldpc_dec.q_m,
1908                         ops[i]->ldpc_dec.n_filler, ops[i]->ldpc_dec.cb_params.e,
1909                         same_op);
1910                 ret = enqueue_ldpc_dec_one_op_cb(q, ops[i], i, same_op);
1911                 if (ret < 0)
1912                         break;
1913         }
1914
1915         if (unlikely(i == 0))
1916                 return 0; /* Nothing to enqueue */
1917
1918         /* Set SDone in last CB in enqueued ops for CB mode*/
1919         desc = q->ring_addr + ((q->sw_ring_head + i - 1)
1920                         & q->sw_ring_wrap_mask);
1921
1922         desc->req.sdone_enable = 1;
1923         desc->req.irq_enable = q->irq_enable;
1924
1925         acc100_dma_enqueue(q, i, &q_data->queue_stats);
1926
1927         /* Update stats */
1928         q_data->queue_stats.enqueued_count += i;
1929         q_data->queue_stats.enqueue_err_count += num - i;
1930         return i;
1931 }
1932
1933 /* Enqueue decode operations for ACC100 device. */
1934 static uint16_t
1935 acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1936                 struct rte_bbdev_dec_op **ops, uint16_t num)
1937 {
1938         struct acc100_queue *q = q_data->queue_private;
1939         int32_t aq_avail = q->aq_depth +
1940                         (q->aq_dequeued - q->aq_enqueued) / 128;
1941
1942         if (unlikely((aq_avail == 0) || (num == 0)))
1943                 return 0;
1944
1945         if (ops[0]->ldpc_dec.code_block_mode == 0)
1946                 return acc100_enqueue_ldpc_dec_tb(q_data, ops, num);
1947         else
1948                 return acc100_enqueue_ldpc_dec_cb(q_data, ops, num);
1949 }
1950
1951
1952 /* Dequeue one encode operations from ACC100 device in CB mode */
1953 static inline int
1954 dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
1955                 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
1956 {
1957         union acc100_dma_desc *desc, atom_desc;
1958         union acc100_dma_rsp_desc rsp;
1959         struct rte_bbdev_enc_op *op;
1960         int i;
1961
1962         desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
1963                         & q->sw_ring_wrap_mask);
1964         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
1965                         __ATOMIC_RELAXED);
1966
1967         /* Check fdone bit */
1968         if (!(atom_desc.rsp.val & ACC100_FDONE))
1969                 return -1;
1970
1971         rsp.val = atom_desc.rsp.val;
1972         rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
1973
1974         /* Dequeue */
1975         op = desc->req.op_addr;
1976
1977         /* Clearing status, it will be set based on response */
1978         op->status = 0;
1979
1980         op->status |= ((rsp.input_err)
1981                         ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
1982         op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
1983         op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
1984
1985         if (desc->req.last_desc_in_batch) {
1986                 (*aq_dequeued)++;
1987                 desc->req.last_desc_in_batch = 0;
1988         }
1989         desc->rsp.val = ACC100_DMA_DESC_TYPE;
1990         desc->rsp.add_info_0 = 0; /*Reserved bits */
1991         desc->rsp.add_info_1 = 0; /*Reserved bits */
1992
1993         /* Flag that the muxing cause loss of opaque data */
1994         op->opaque_data = (void *)-1;
1995         for (i = 0 ; i < desc->req.numCBs; i++)
1996                 ref_op[i] = op;
1997
1998         /* One CB (op) was successfully dequeued */
1999         return desc->req.numCBs;
2000 }
2001
2002 /* Dequeue one encode operations from ACC100 device in TB mode */
2003 static inline int
2004 dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
2005                 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
2006 {
2007         union acc100_dma_desc *desc, *last_desc, atom_desc;
2008         union acc100_dma_rsp_desc rsp;
2009         struct rte_bbdev_enc_op *op;
2010         uint8_t i = 0;
2011         uint16_t current_dequeued_cbs = 0, cbs_in_tb;
2012
2013         desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
2014                         & q->sw_ring_wrap_mask);
2015         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2016                         __ATOMIC_RELAXED);
2017
2018         /* Check fdone bit */
2019         if (!(atom_desc.rsp.val & ACC100_FDONE))
2020                 return -1;
2021
2022         /* Get number of CBs in dequeued TB */
2023         cbs_in_tb = desc->req.cbs_in_tb;
2024         /* Get last CB */
2025         last_desc = q->ring_addr + ((q->sw_ring_tail
2026                         + total_dequeued_cbs + cbs_in_tb - 1)
2027                         & q->sw_ring_wrap_mask);
2028         /* Check if last CB in TB is ready to dequeue (and thus
2029          * the whole TB) - checking sdone bit. If not return.
2030          */
2031         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
2032                         __ATOMIC_RELAXED);
2033         if (!(atom_desc.rsp.val & ACC100_SDONE))
2034                 return -1;
2035
2036         /* Dequeue */
2037         op = desc->req.op_addr;
2038
2039         /* Clearing status, it will be set based on response */
2040         op->status = 0;
2041
2042         while (i < cbs_in_tb) {
2043                 desc = q->ring_addr + ((q->sw_ring_tail
2044                                 + total_dequeued_cbs)
2045                                 & q->sw_ring_wrap_mask);
2046                 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2047                                 __ATOMIC_RELAXED);
2048                 rsp.val = atom_desc.rsp.val;
2049                 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
2050                                 rsp.val);
2051
2052                 op->status |= ((rsp.input_err)
2053                                 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2054                 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2055                 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2056
2057                 if (desc->req.last_desc_in_batch) {
2058                         (*aq_dequeued)++;
2059                         desc->req.last_desc_in_batch = 0;
2060                 }
2061                 desc->rsp.val = ACC100_DMA_DESC_TYPE;
2062                 desc->rsp.add_info_0 = 0;
2063                 desc->rsp.add_info_1 = 0;
2064                 total_dequeued_cbs++;
2065                 current_dequeued_cbs++;
2066                 i++;
2067         }
2068
2069         *ref_op = op;
2070
2071         return current_dequeued_cbs;
2072 }
2073
2074 /* Dequeue one decode operation from ACC100 device in CB mode */
2075 static inline int
2076 dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
2077                 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2078                 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2079 {
2080         union acc100_dma_desc *desc, atom_desc;
2081         union acc100_dma_rsp_desc rsp;
2082         struct rte_bbdev_dec_op *op;
2083
2084         desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2085                         & q->sw_ring_wrap_mask);
2086         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2087                         __ATOMIC_RELAXED);
2088
2089         /* Check fdone bit */
2090         if (!(atom_desc.rsp.val & ACC100_FDONE))
2091                 return -1;
2092
2093         rsp.val = atom_desc.rsp.val;
2094         rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
2095
2096         /* Dequeue */
2097         op = desc->req.op_addr;
2098
2099         /* Clearing status, it will be set based on response */
2100         op->status = 0;
2101         op->status |= ((rsp.input_err)
2102                         ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2103         op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2104         op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2105         if (op->status != 0)
2106                 q_data->queue_stats.dequeue_err_count++;
2107
2108         /* CRC invalid if error exists */
2109         if (!op->status)
2110                 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2111         op->turbo_dec.iter_count = (uint8_t) rsp.iter_cnt / 2;
2112         /* Check if this is the last desc in batch (Atomic Queue) */
2113         if (desc->req.last_desc_in_batch) {
2114                 (*aq_dequeued)++;
2115                 desc->req.last_desc_in_batch = 0;
2116         }
2117         desc->rsp.val = ACC100_DMA_DESC_TYPE;
2118         desc->rsp.add_info_0 = 0;
2119         desc->rsp.add_info_1 = 0;
2120         *ref_op = op;
2121
2122         /* One CB (op) was successfully dequeued */
2123         return 1;
2124 }
2125
2126 /* Dequeue one decode operations from ACC100 device in CB mode */
2127 static inline int
2128 dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
2129                 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2130                 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2131 {
2132         union acc100_dma_desc *desc, atom_desc;
2133         union acc100_dma_rsp_desc rsp;
2134         struct rte_bbdev_dec_op *op;
2135
2136         desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2137                         & q->sw_ring_wrap_mask);
2138         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2139                         __ATOMIC_RELAXED);
2140
2141         /* Check fdone bit */
2142         if (!(atom_desc.rsp.val & ACC100_FDONE))
2143                 return -1;
2144
2145         rsp.val = atom_desc.rsp.val;
2146
2147         /* Dequeue */
2148         op = desc->req.op_addr;
2149
2150         /* Clearing status, it will be set based on response */
2151         op->status = 0;
2152         op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;
2153         op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;
2154         op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;
2155         if (op->status != 0)
2156                 q_data->queue_stats.dequeue_err_count++;
2157
2158         op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2159         if (op->ldpc_dec.hard_output.length > 0 && !rsp.synd_ok)
2160                 op->status |= 1 << RTE_BBDEV_SYNDROME_ERROR;
2161         op->ldpc_dec.iter_count = (uint8_t) rsp.iter_cnt;
2162
2163         /* Check if this is the last desc in batch (Atomic Queue) */
2164         if (desc->req.last_desc_in_batch) {
2165                 (*aq_dequeued)++;
2166                 desc->req.last_desc_in_batch = 0;
2167         }
2168
2169         desc->rsp.val = ACC100_DMA_DESC_TYPE;
2170         desc->rsp.add_info_0 = 0;
2171         desc->rsp.add_info_1 = 0;
2172
2173         *ref_op = op;
2174
2175         /* One CB (op) was successfully dequeued */
2176         return 1;
2177 }
2178
2179 /* Dequeue one decode operations from ACC100 device in TB mode. */
2180 static inline int
2181 dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2182                 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2183 {
2184         union acc100_dma_desc *desc, *last_desc, atom_desc;
2185         union acc100_dma_rsp_desc rsp;
2186         struct rte_bbdev_dec_op *op;
2187         uint8_t cbs_in_tb = 1, cb_idx = 0;
2188
2189         desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2190                         & q->sw_ring_wrap_mask);
2191         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2192                         __ATOMIC_RELAXED);
2193
2194         /* Check fdone bit */
2195         if (!(atom_desc.rsp.val & ACC100_FDONE))
2196                 return -1;
2197
2198         /* Dequeue */
2199         op = desc->req.op_addr;
2200
2201         /* Get number of CBs in dequeued TB */
2202         cbs_in_tb = desc->req.cbs_in_tb;
2203         /* Get last CB */
2204         last_desc = q->ring_addr + ((q->sw_ring_tail
2205                         + dequeued_cbs + cbs_in_tb - 1)
2206                         & q->sw_ring_wrap_mask);
2207         /* Check if last CB in TB is ready to dequeue (and thus
2208          * the whole TB) - checking sdone bit. If not return.
2209          */
2210         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
2211                         __ATOMIC_RELAXED);
2212         if (!(atom_desc.rsp.val & ACC100_SDONE))
2213                 return -1;
2214
2215         /* Clearing status, it will be set based on response */
2216         op->status = 0;
2217
2218         /* Read remaining CBs if exists */
2219         while (cb_idx < cbs_in_tb) {
2220                 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2221                                 & q->sw_ring_wrap_mask);
2222                 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2223                                 __ATOMIC_RELAXED);
2224                 rsp.val = atom_desc.rsp.val;
2225                 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
2226                                 rsp.val);
2227
2228                 op->status |= ((rsp.input_err)
2229                                 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2230                 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2231                 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2232
2233                 /* CRC invalid if error exists */
2234                 if (!op->status)
2235                         op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2236                 op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
2237                                 op->turbo_dec.iter_count);
2238
2239                 /* Check if this is the last desc in batch (Atomic Queue) */
2240                 if (desc->req.last_desc_in_batch) {
2241                         (*aq_dequeued)++;
2242                         desc->req.last_desc_in_batch = 0;
2243                 }
2244                 desc->rsp.val = ACC100_DMA_DESC_TYPE;
2245                 desc->rsp.add_info_0 = 0;
2246                 desc->rsp.add_info_1 = 0;
2247                 dequeued_cbs++;
2248                 cb_idx++;
2249         }
2250
2251         *ref_op = op;
2252
2253         return cb_idx;
2254 }
2255
2256 /* Dequeue LDPC encode operations from ACC100 device. */
2257 static uint16_t
2258 acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
2259                 struct rte_bbdev_enc_op **ops, uint16_t num)
2260 {
2261         struct acc100_queue *q = q_data->queue_private;
2262         uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
2263         uint32_t aq_dequeued = 0;
2264         uint16_t dequeue_num, i, dequeued_cbs = 0, dequeued_descs = 0;
2265         int ret;
2266
2267 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2268         if (unlikely(ops == 0 && q == NULL))
2269                 return 0;
2270 #endif
2271
2272         dequeue_num = RTE_MIN(avail, num);
2273
2274         for (i = 0; i < dequeue_num; i++) {
2275                 ret = dequeue_enc_one_op_cb(q, &ops[dequeued_cbs],
2276                                 dequeued_descs, &aq_dequeued);
2277                 if (ret < 0)
2278                         break;
2279                 dequeued_cbs += ret;
2280                 dequeued_descs++;
2281                 if (dequeued_cbs >= num)
2282                         break;
2283         }
2284
2285         q->aq_dequeued += aq_dequeued;
2286         q->sw_ring_tail += dequeued_descs;
2287
2288         /* Update enqueue stats */
2289         q_data->queue_stats.dequeued_count += dequeued_cbs;
2290
2291         return dequeued_cbs;
2292 }
2293
2294 /* Dequeue decode operations from ACC100 device. */
2295 static uint16_t
2296 acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
2297                 struct rte_bbdev_dec_op **ops, uint16_t num)
2298 {
2299         struct acc100_queue *q = q_data->queue_private;
2300         uint16_t dequeue_num;
2301         uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
2302         uint32_t aq_dequeued = 0;
2303         uint16_t i;
2304         uint16_t dequeued_cbs = 0;
2305         struct rte_bbdev_dec_op *op;
2306         int ret;
2307
2308 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2309         if (unlikely(ops == 0 && q == NULL))
2310                 return 0;
2311 #endif
2312
2313         dequeue_num = RTE_MIN(avail, num);
2314
2315         for (i = 0; i < dequeue_num; ++i) {
2316                 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2317                         & q->sw_ring_wrap_mask))->req.op_addr;
2318                 if (op->ldpc_dec.code_block_mode == 0)
2319                         ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
2320                                         &aq_dequeued);
2321                 else
2322                         ret = dequeue_ldpc_dec_one_op_cb(
2323                                         q_data, q, &ops[i], dequeued_cbs,
2324                                         &aq_dequeued);
2325
2326                 if (ret < 0)
2327                         break;
2328                 dequeued_cbs += ret;
2329         }
2330
2331         q->aq_dequeued += aq_dequeued;
2332         q->sw_ring_tail += dequeued_cbs;
2333
2334         /* Update enqueue stats */
2335         q_data->queue_stats.dequeued_count += i;
2336
2337         return i;
2338 }
2339
2340 /* Initialization Function */
2341 static void
2342 acc100_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
2343 {
2344         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2345
2346         dev->dev_ops = &acc100_bbdev_ops;
2347         dev->enqueue_ldpc_enc_ops = acc100_enqueue_ldpc_enc;
2348         dev->enqueue_ldpc_dec_ops = acc100_enqueue_ldpc_dec;
2349         dev->dequeue_ldpc_enc_ops = acc100_dequeue_ldpc_enc;
2350         dev->dequeue_ldpc_dec_ops = acc100_dequeue_ldpc_dec;
2351
2352         ((struct acc100_device *) dev->data->dev_private)->pf_device =
2353                         !strcmp(drv->driver.name,
2354                                         RTE_STR(ACC100PF_DRIVER_NAME));
2355         ((struct acc100_device *) dev->data->dev_private)->mmio_base =
2356                         pci_dev->mem_resource[0].addr;
2357
2358         rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"",
2359                         drv->driver.name, dev->data->name,
2360                         (void *)pci_dev->mem_resource[0].addr,
2361                         pci_dev->mem_resource[0].phys_addr);
2362 }
2363
2364 static int acc100_pci_probe(struct rte_pci_driver *pci_drv,
2365         struct rte_pci_device *pci_dev)
2366 {
2367         struct rte_bbdev *bbdev = NULL;
2368         char dev_name[RTE_BBDEV_NAME_MAX_LEN];
2369
2370         if (pci_dev == NULL) {
2371                 rte_bbdev_log(ERR, "NULL PCI device");
2372                 return -EINVAL;
2373         }
2374
2375         rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
2376
2377         /* Allocate memory to be used privately by drivers */
2378         bbdev = rte_bbdev_allocate(pci_dev->device.name);
2379         if (bbdev == NULL)
2380                 return -ENODEV;
2381
2382         /* allocate device private memory */
2383         bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
2384                         sizeof(struct acc100_device), RTE_CACHE_LINE_SIZE,
2385                         pci_dev->device.numa_node);
2386
2387         if (bbdev->data->dev_private == NULL) {
2388                 rte_bbdev_log(CRIT,
2389                                 "Allocate of %zu bytes for device \"%s\" failed",
2390                                 sizeof(struct acc100_device), dev_name);
2391                                 rte_bbdev_release(bbdev);
2392                         return -ENOMEM;
2393         }
2394
2395         /* Fill HW specific part of device structure */
2396         bbdev->device = &pci_dev->device;
2397         bbdev->intr_handle = &pci_dev->intr_handle;
2398         bbdev->data->socket_id = pci_dev->device.numa_node;
2399
2400         /* Invoke ACC100 device initialization function */
2401         acc100_bbdev_init(bbdev, pci_drv);
2402
2403         rte_bbdev_log_debug("Initialised bbdev %s (id = %u)",
2404                         dev_name, bbdev->data->dev_id);
2405         return 0;
2406 }
2407
2408 static int acc100_pci_remove(struct rte_pci_device *pci_dev)
2409 {
2410         struct rte_bbdev *bbdev;
2411         int ret;
2412         uint8_t dev_id;
2413
2414         if (pci_dev == NULL)
2415                 return -EINVAL;
2416
2417         /* Find device */
2418         bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
2419         if (bbdev == NULL) {
2420                 rte_bbdev_log(CRIT,
2421                                 "Couldn't find HW dev \"%s\" to uninitialise it",
2422                                 pci_dev->device.name);
2423                 return -ENODEV;
2424         }
2425         dev_id = bbdev->data->dev_id;
2426
2427         /* free device private memory before close */
2428         rte_free(bbdev->data->dev_private);
2429
2430         /* Close device */
2431         ret = rte_bbdev_close(dev_id);
2432         if (ret < 0)
2433                 rte_bbdev_log(ERR,
2434                                 "Device %i failed to close during uninit: %i",
2435                                 dev_id, ret);
2436
2437         /* release bbdev from library */
2438         rte_bbdev_release(bbdev);
2439
2440         rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
2441
2442         return 0;
2443 }
2444
2445 static struct rte_pci_driver acc100_pci_pf_driver = {
2446                 .probe = acc100_pci_probe,
2447                 .remove = acc100_pci_remove,
2448                 .id_table = pci_id_acc100_pf_map,
2449                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
2450 };
2451
2452 static struct rte_pci_driver acc100_pci_vf_driver = {
2453                 .probe = acc100_pci_probe,
2454                 .remove = acc100_pci_remove,
2455                 .id_table = pci_id_acc100_vf_map,
2456                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
2457 };
2458
2459 RTE_PMD_REGISTER_PCI(ACC100PF_DRIVER_NAME, acc100_pci_pf_driver);
2460 RTE_PMD_REGISTER_PCI_TABLE(ACC100PF_DRIVER_NAME, pci_id_acc100_pf_map);
2461 RTE_PMD_REGISTER_PCI(ACC100VF_DRIVER_NAME, acc100_pci_vf_driver);
2462 RTE_PMD_REGISTER_PCI_TABLE(ACC100VF_DRIVER_NAME, pci_id_acc100_vf_map);