0541046c5d8452049244a14f06887ab13c457f3f
[dpdk.git] / drivers / baseband / acc100 / rte_acc100_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_log.h>
9 #include <rte_dev.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_byteorder.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_hexdump.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #ifdef RTE_BBDEV_OFFLOAD_COST
19 #include <rte_cycles.h>
20 #endif
21
22 #include <rte_bbdev.h>
23 #include <rte_bbdev_pmd.h>
24 #include "rte_acc100_pmd.h"
25
26 #ifdef RTE_LIBRTE_BBDEV_DEBUG
27 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, DEBUG);
28 #else
29 RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, NOTICE);
30 #endif
31
32 /* Write to MMIO register address */
33 static inline void
34 mmio_write(void *addr, uint32_t value)
35 {
36         *((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
37 }
38
39 /* Write a register of a ACC100 device */
40 static inline void
41 acc100_reg_write(struct acc100_device *d, uint32_t offset, uint32_t payload)
42 {
43         void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
44         mmio_write(reg_addr, payload);
45         usleep(ACC100_LONG_WAIT);
46 }
47
48 /* Read a register of a ACC100 device */
49 static inline uint32_t
50 acc100_reg_read(struct acc100_device *d, uint32_t offset)
51 {
52
53         void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
54         uint32_t ret = *((volatile uint32_t *)(reg_addr));
55         return rte_le_to_cpu_32(ret);
56 }
57
58 /* Basic Implementation of Log2 for exact 2^N */
59 static inline uint32_t
60 log2_basic(uint32_t value)
61 {
62         return (value == 0) ? 0 : rte_bsf32(value);
63 }
64
65 /* Calculate memory alignment offset assuming alignment is 2^N */
66 static inline uint32_t
67 calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)
68 {
69         rte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);
70         return (uint32_t)(alignment -
71                         (unaligned_phy_mem & (alignment-1)));
72 }
73
74 /* Calculate the offset of the enqueue register */
75 static inline uint32_t
76 queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)
77 {
78         if (pf_device)
79                 return ((vf_id << 12) + (qgrp_id << 7) + (aq_id << 3) +
80                                 HWPfQmgrIngressAq);
81         else
82                 return ((qgrp_id << 7) + (aq_id << 3) +
83                                 HWVfQmgrIngressAq);
84 }
85
86 enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, NUM_ACC};
87
88 /* Return the queue topology for a Queue Group Index */
89 static inline void
90 qtopFromAcc(struct rte_acc100_queue_topology **qtop, int acc_enum,
91                 struct rte_acc100_conf *acc100_conf)
92 {
93         struct rte_acc100_queue_topology *p_qtop;
94         p_qtop = NULL;
95         switch (acc_enum) {
96         case UL_4G:
97                 p_qtop = &(acc100_conf->q_ul_4g);
98                 break;
99         case UL_5G:
100                 p_qtop = &(acc100_conf->q_ul_5g);
101                 break;
102         case DL_4G:
103                 p_qtop = &(acc100_conf->q_dl_4g);
104                 break;
105         case DL_5G:
106                 p_qtop = &(acc100_conf->q_dl_5g);
107                 break;
108         default:
109                 /* NOTREACHED */
110                 rte_bbdev_log(ERR, "Unexpected error evaluating qtopFromAcc");
111                 break;
112         }
113         *qtop = p_qtop;
114 }
115
116 static void
117 initQTop(struct rte_acc100_conf *acc100_conf)
118 {
119         acc100_conf->q_ul_4g.num_aqs_per_groups = 0;
120         acc100_conf->q_ul_4g.num_qgroups = 0;
121         acc100_conf->q_ul_4g.first_qgroup_index = -1;
122         acc100_conf->q_ul_5g.num_aqs_per_groups = 0;
123         acc100_conf->q_ul_5g.num_qgroups = 0;
124         acc100_conf->q_ul_5g.first_qgroup_index = -1;
125         acc100_conf->q_dl_4g.num_aqs_per_groups = 0;
126         acc100_conf->q_dl_4g.num_qgroups = 0;
127         acc100_conf->q_dl_4g.first_qgroup_index = -1;
128         acc100_conf->q_dl_5g.num_aqs_per_groups = 0;
129         acc100_conf->q_dl_5g.num_qgroups = 0;
130         acc100_conf->q_dl_5g.first_qgroup_index = -1;
131 }
132
133 static inline void
134 updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,
135                 struct acc100_device *d) {
136         uint32_t reg;
137         struct rte_acc100_queue_topology *q_top = NULL;
138         qtopFromAcc(&q_top, acc, acc100_conf);
139         if (unlikely(q_top == NULL))
140                 return;
141         uint16_t aq;
142         q_top->num_qgroups++;
143         if (q_top->first_qgroup_index == -1) {
144                 q_top->first_qgroup_index = qg;
145                 /* Can be optimized to assume all are enabled by default */
146                 reg = acc100_reg_read(d, queue_offset(d->pf_device,
147                                 0, qg, ACC100_NUM_AQS - 1));
148                 if (reg & ACC100_QUEUE_ENABLE) {
149                         q_top->num_aqs_per_groups = ACC100_NUM_AQS;
150                         return;
151                 }
152                 q_top->num_aqs_per_groups = 0;
153                 for (aq = 0; aq < ACC100_NUM_AQS; aq++) {
154                         reg = acc100_reg_read(d, queue_offset(d->pf_device,
155                                         0, qg, aq));
156                         if (reg & ACC100_QUEUE_ENABLE)
157                                 q_top->num_aqs_per_groups++;
158                 }
159         }
160 }
161
162 /* Fetch configuration enabled for the PF/VF using MMIO Read (slow) */
163 static inline void
164 fetch_acc100_config(struct rte_bbdev *dev)
165 {
166         struct acc100_device *d = dev->data->dev_private;
167         struct rte_acc100_conf *acc100_conf = &d->acc100_conf;
168         const struct acc100_registry_addr *reg_addr;
169         uint8_t acc, qg;
170         uint32_t reg, reg_aq, reg_len0, reg_len1;
171         uint32_t reg_mode;
172
173         /* No need to retrieve the configuration is already done */
174         if (d->configured)
175                 return;
176
177         /* Choose correct registry addresses for the device type */
178         if (d->pf_device)
179                 reg_addr = &pf_reg_addr;
180         else
181                 reg_addr = &vf_reg_addr;
182
183         d->ddr_size = (1 + acc100_reg_read(d, reg_addr->ddr_range)) << 10;
184
185         /* Single VF Bundle by VF */
186         acc100_conf->num_vf_bundles = 1;
187         initQTop(acc100_conf);
188
189         struct rte_acc100_queue_topology *q_top = NULL;
190         int qman_func_id[ACC100_NUM_ACCS] = {ACC100_ACCMAP_0, ACC100_ACCMAP_1,
191                         ACC100_ACCMAP_2, ACC100_ACCMAP_3, ACC100_ACCMAP_4};
192         reg = acc100_reg_read(d, reg_addr->qman_group_func);
193         for (qg = 0; qg < ACC100_NUM_QGRPS_PER_WORD; qg++) {
194                 reg_aq = acc100_reg_read(d,
195                                 queue_offset(d->pf_device, 0, qg, 0));
196                 if (reg_aq & ACC100_QUEUE_ENABLE) {
197                         uint32_t idx = (reg >> (qg * 4)) & 0x7;
198                         if (idx < ACC100_NUM_ACCS) {
199                                 acc = qman_func_id[idx];
200                                 updateQtop(acc, qg, acc100_conf, d);
201                         }
202                 }
203         }
204
205         /* Check the depth of the AQs*/
206         reg_len0 = acc100_reg_read(d, reg_addr->depth_log0_offset);
207         reg_len1 = acc100_reg_read(d, reg_addr->depth_log1_offset);
208         for (acc = 0; acc < NUM_ACC; acc++) {
209                 qtopFromAcc(&q_top, acc, acc100_conf);
210                 if (q_top->first_qgroup_index < ACC100_NUM_QGRPS_PER_WORD)
211                         q_top->aq_depth_log2 = (reg_len0 >>
212                                         (q_top->first_qgroup_index * 4))
213                                         & 0xF;
214                 else
215                         q_top->aq_depth_log2 = (reg_len1 >>
216                                         ((q_top->first_qgroup_index -
217                                         ACC100_NUM_QGRPS_PER_WORD) * 4))
218                                         & 0xF;
219         }
220
221         /* Read PF mode */
222         if (d->pf_device) {
223                 reg_mode = acc100_reg_read(d, HWPfHiPfMode);
224                 acc100_conf->pf_mode_en = (reg_mode == ACC100_PF_VAL) ? 1 : 0;
225         }
226
227         rte_bbdev_log_debug(
228                         "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u\n",
229                         (d->pf_device) ? "PF" : "VF",
230                         (acc100_conf->input_pos_llr_1_bit) ? "POS" : "NEG",
231                         (acc100_conf->output_pos_llr_1_bit) ? "POS" : "NEG",
232                         acc100_conf->q_ul_4g.num_qgroups,
233                         acc100_conf->q_dl_4g.num_qgroups,
234                         acc100_conf->q_ul_5g.num_qgroups,
235                         acc100_conf->q_dl_5g.num_qgroups,
236                         acc100_conf->q_ul_4g.num_aqs_per_groups,
237                         acc100_conf->q_dl_4g.num_aqs_per_groups,
238                         acc100_conf->q_ul_5g.num_aqs_per_groups,
239                         acc100_conf->q_dl_5g.num_aqs_per_groups,
240                         acc100_conf->q_ul_4g.aq_depth_log2,
241                         acc100_conf->q_dl_4g.aq_depth_log2,
242                         acc100_conf->q_ul_5g.aq_depth_log2,
243                         acc100_conf->q_dl_5g.aq_depth_log2);
244 }
245
246 static void
247 free_base_addresses(void **base_addrs, int size)
248 {
249         int i;
250         for (i = 0; i < size; i++)
251                 rte_free(base_addrs[i]);
252 }
253
254 static inline uint32_t
255 get_desc_len(void)
256 {
257         return sizeof(union acc100_dma_desc);
258 }
259
260 /* Allocate the 2 * 64MB block for the sw rings */
261 static int
262 alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc100_device *d,
263                 int socket)
264 {
265         uint32_t sw_ring_size = ACC100_SIZE_64MBYTE;
266         d->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,
267                         2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);
268         if (d->sw_rings_base == NULL) {
269                 rte_bbdev_log(ERR, "Failed to allocate memory for %s:%u",
270                                 dev->device->driver->name,
271                                 dev->data->dev_id);
272                 return -ENOMEM;
273         }
274         uint32_t next_64mb_align_offset = calc_mem_alignment_offset(
275                         d->sw_rings_base, ACC100_SIZE_64MBYTE);
276         d->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);
277         d->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +
278                         next_64mb_align_offset;
279         d->sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
280         d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
281
282         return 0;
283 }
284
285 /* Attempt to allocate minimised memory space for sw rings */
286 static void
287 alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc100_device *d,
288                 uint16_t num_queues, int socket)
289 {
290         rte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;
291         uint32_t next_64mb_align_offset;
292         rte_iova_t sw_ring_iova_end_addr;
293         void *base_addrs[ACC100_SW_RING_MEM_ALLOC_ATTEMPTS];
294         void *sw_rings_base;
295         int i = 0;
296         uint32_t q_sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();
297         uint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;
298
299         /* Find an aligned block of memory to store sw rings */
300         while (i < ACC100_SW_RING_MEM_ALLOC_ATTEMPTS) {
301                 /*
302                  * sw_ring allocated memory is guaranteed to be aligned to
303                  * q_sw_ring_size at the condition that the requested size is
304                  * less than the page size
305                  */
306                 sw_rings_base = rte_zmalloc_socket(
307                                 dev->device->driver->name,
308                                 dev_sw_ring_size, q_sw_ring_size, socket);
309
310                 if (sw_rings_base == NULL) {
311                         rte_bbdev_log(ERR,
312                                         "Failed to allocate memory for %s:%u",
313                                         dev->device->driver->name,
314                                         dev->data->dev_id);
315                         break;
316                 }
317
318                 sw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);
319                 next_64mb_align_offset = calc_mem_alignment_offset(
320                                 sw_rings_base, ACC100_SIZE_64MBYTE);
321                 next_64mb_align_addr_iova = sw_rings_base_iova +
322                                 next_64mb_align_offset;
323                 sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;
324
325                 /* Check if the end of the sw ring memory block is before the
326                  * start of next 64MB aligned mem address
327                  */
328                 if (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {
329                         d->sw_rings_iova = sw_rings_base_iova;
330                         d->sw_rings = sw_rings_base;
331                         d->sw_rings_base = sw_rings_base;
332                         d->sw_ring_size = q_sw_ring_size;
333                         d->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;
334                         break;
335                 }
336                 /* Store the address of the unaligned mem block */
337                 base_addrs[i] = sw_rings_base;
338                 i++;
339         }
340
341         /* Free all unaligned blocks of mem allocated in the loop */
342         free_base_addresses(base_addrs, i);
343 }
344
345
346 /* Allocate 64MB memory used for all software rings */
347 static int
348 acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
349 {
350         uint32_t phys_low, phys_high, payload;
351         struct acc100_device *d = dev->data->dev_private;
352         const struct acc100_registry_addr *reg_addr;
353
354         if (d->pf_device && !d->acc100_conf.pf_mode_en) {
355                 rte_bbdev_log(NOTICE,
356                                 "%s has PF mode disabled. This PF can't be used.",
357                                 dev->data->name);
358                 return -ENODEV;
359         }
360
361         alloc_sw_rings_min_mem(dev, d, num_queues, socket_id);
362
363         /* If minimal memory space approach failed, then allocate
364          * the 2 * 64MB block for the sw rings
365          */
366         if (d->sw_rings == NULL)
367                 alloc_2x64mb_sw_rings_mem(dev, d, socket_id);
368
369         if (d->sw_rings == NULL) {
370                 rte_bbdev_log(NOTICE,
371                                 "Failure allocating sw_rings memory");
372                 return -ENODEV;
373         }
374
375         /* Configure ACC100 with the base address for DMA descriptor rings
376          * Same descriptor rings used for UL and DL DMA Engines
377          * Note : Assuming only VF0 bundle is used for PF mode
378          */
379         phys_high = (uint32_t)(d->sw_rings_iova >> 32);
380         phys_low  = (uint32_t)(d->sw_rings_iova & ~(ACC100_SIZE_64MBYTE-1));
381
382         /* Choose correct registry addresses for the device type */
383         if (d->pf_device)
384                 reg_addr = &pf_reg_addr;
385         else
386                 reg_addr = &vf_reg_addr;
387
388         /* Read the populated cfg from ACC100 registers */
389         fetch_acc100_config(dev);
390
391         /* Release AXI from PF */
392         if (d->pf_device)
393                 acc100_reg_write(d, HWPfDmaAxiControl, 1);
394
395         acc100_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);
396         acc100_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);
397         acc100_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);
398         acc100_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);
399         acc100_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);
400         acc100_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);
401         acc100_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);
402         acc100_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);
403
404         /*
405          * Configure Ring Size to the max queue ring size
406          * (used for wrapping purpose)
407          */
408         payload = log2_basic(d->sw_ring_size / 64);
409         acc100_reg_write(d, reg_addr->ring_size, payload);
410
411         /* Configure tail pointer for use when SDONE enabled */
412         d->tail_ptrs = rte_zmalloc_socket(
413                         dev->device->driver->name,
414                         ACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t),
415                         RTE_CACHE_LINE_SIZE, socket_id);
416         if (d->tail_ptrs == NULL) {
417                 rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u",
418                                 dev->device->driver->name,
419                                 dev->data->dev_id);
420                 rte_free(d->sw_rings);
421                 return -ENOMEM;
422         }
423         d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs);
424
425         phys_high = (uint32_t)(d->tail_ptr_iova >> 32);
426         phys_low  = (uint32_t)(d->tail_ptr_iova);
427         acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);
428         acc100_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);
429         acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);
430         acc100_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);
431         acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);
432         acc100_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);
433         acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);
434         acc100_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);
435
436         d->harq_layout = rte_zmalloc_socket("HARQ Layout",
437                         ACC100_HARQ_LAYOUT * sizeof(*d->harq_layout),
438                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
439         if (d->harq_layout == NULL) {
440                 rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u",
441                                 dev->device->driver->name,
442                                 dev->data->dev_id);
443                 rte_free(d->sw_rings);
444                 return -ENOMEM;
445         }
446
447         /* Mark as configured properly */
448         d->configured = true;
449
450         rte_bbdev_log_debug(
451                         "ACC100 (%s) configured  sw_rings = %p, sw_rings_iova = %#"
452                         PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova);
453
454         return 0;
455 }
456
457 /* Free memory used for software rings */
458 static int
459 acc100_dev_close(struct rte_bbdev *dev)
460 {
461         struct acc100_device *d = dev->data->dev_private;
462         if (d->sw_rings_base != NULL) {
463                 rte_free(d->tail_ptrs);
464                 rte_free(d->sw_rings_base);
465                 d->sw_rings_base = NULL;
466         }
467         /* Ensure all in flight HW transactions are completed */
468         usleep(ACC100_LONG_WAIT);
469         return 0;
470 }
471
472 /**
473  * Report a ACC100 queue index which is free
474  * Return 0 to 16k for a valid queue_idx or -1 when no queue is available
475  * Note : Only supporting VF0 Bundle for PF mode
476  */
477 static int
478 acc100_find_free_queue_idx(struct rte_bbdev *dev,
479                 const struct rte_bbdev_queue_conf *conf)
480 {
481         struct acc100_device *d = dev->data->dev_private;
482         int op_2_acc[5] = {0, UL_4G, DL_4G, UL_5G, DL_5G};
483         int acc = op_2_acc[conf->op_type];
484         struct rte_acc100_queue_topology *qtop = NULL;
485
486         qtopFromAcc(&qtop, acc, &(d->acc100_conf));
487         if (qtop == NULL)
488                 return -1;
489         /* Identify matching QGroup Index which are sorted in priority order */
490         uint16_t group_idx = qtop->first_qgroup_index;
491         group_idx += conf->priority;
492         if (group_idx >= ACC100_NUM_QGRPS ||
493                         conf->priority >= qtop->num_qgroups) {
494                 rte_bbdev_log(INFO, "Invalid Priority on %s, priority %u",
495                                 dev->data->name, conf->priority);
496                 return -1;
497         }
498         /* Find a free AQ_idx  */
499         uint16_t aq_idx;
500         for (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {
501                 if (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {
502                         /* Mark the Queue as assigned */
503                         d->q_assigned_bit_map[group_idx] |= (1 << aq_idx);
504                         /* Report the AQ Index */
505                         return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx;
506                 }
507         }
508         rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u",
509                         dev->data->name, conf->priority);
510         return -1;
511 }
512
513 /* Setup ACC100 queue */
514 static int
515 acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
516                 const struct rte_bbdev_queue_conf *conf)
517 {
518         struct acc100_device *d = dev->data->dev_private;
519         struct acc100_queue *q;
520         int16_t q_idx;
521
522         /* Allocate the queue data structure. */
523         q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
524                         RTE_CACHE_LINE_SIZE, conf->socket);
525         if (q == NULL) {
526                 rte_bbdev_log(ERR, "Failed to allocate queue memory");
527                 return -ENOMEM;
528         }
529         if (d == NULL) {
530                 rte_bbdev_log(ERR, "Undefined device");
531                 return -ENODEV;
532         }
533
534         q->d = d;
535         q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
536         q->ring_addr_iova = d->sw_rings_iova + (d->sw_ring_size * queue_id);
537
538         /* Prepare the Ring with default descriptor format */
539         union acc100_dma_desc *desc = NULL;
540         unsigned int desc_idx, b_idx;
541         int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
542                 ACC100_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?
543                 ACC100_FCW_TD_BLEN : ACC100_FCW_LD_BLEN));
544
545         for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
546                 desc = q->ring_addr + desc_idx;
547                 desc->req.word0 = ACC100_DMA_DESC_TYPE;
548                 desc->req.word1 = 0; /**< Timestamp */
549                 desc->req.word2 = 0;
550                 desc->req.word3 = 0;
551                 uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
552                 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
553                 desc->req.data_ptrs[0].blen = fcw_len;
554                 desc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;
555                 desc->req.data_ptrs[0].last = 0;
556                 desc->req.data_ptrs[0].dma_ext = 0;
557                 for (b_idx = 1; b_idx < ACC100_DMA_MAX_NUM_POINTERS - 1;
558                                 b_idx++) {
559                         desc->req.data_ptrs[b_idx].blkid = ACC100_DMA_BLKID_IN;
560                         desc->req.data_ptrs[b_idx].last = 1;
561                         desc->req.data_ptrs[b_idx].dma_ext = 0;
562                         b_idx++;
563                         desc->req.data_ptrs[b_idx].blkid =
564                                         ACC100_DMA_BLKID_OUT_ENC;
565                         desc->req.data_ptrs[b_idx].last = 1;
566                         desc->req.data_ptrs[b_idx].dma_ext = 0;
567                 }
568                 /* Preset some fields of LDPC FCW */
569                 desc->req.fcw_ld.FCWversion = ACC100_FCW_VER;
570                 desc->req.fcw_ld.gain_i = 1;
571                 desc->req.fcw_ld.gain_h = 1;
572         }
573
574         q->lb_in = rte_zmalloc_socket(dev->device->driver->name,
575                         RTE_CACHE_LINE_SIZE,
576                         RTE_CACHE_LINE_SIZE, conf->socket);
577         if (q->lb_in == NULL) {
578                 rte_bbdev_log(ERR, "Failed to allocate lb_in memory");
579                 rte_free(q);
580                 return -ENOMEM;
581         }
582         q->lb_in_addr_iova = rte_malloc_virt2iova(q->lb_in);
583         q->lb_out = rte_zmalloc_socket(dev->device->driver->name,
584                         RTE_CACHE_LINE_SIZE,
585                         RTE_CACHE_LINE_SIZE, conf->socket);
586         if (q->lb_out == NULL) {
587                 rte_bbdev_log(ERR, "Failed to allocate lb_out memory");
588                 rte_free(q->lb_in);
589                 rte_free(q);
590                 return -ENOMEM;
591         }
592         q->lb_out_addr_iova = rte_malloc_virt2iova(q->lb_out);
593
594         /*
595          * Software queue ring wraps synchronously with the HW when it reaches
596          * the boundary of the maximum allocated queue size, no matter what the
597          * sw queue size is. This wrapping is guarded by setting the wrap_mask
598          * to represent the maximum queue size as allocated at the time when
599          * the device has been setup (in configure()).
600          *
601          * The queue depth is set to the queue size value (conf->queue_size).
602          * This limits the occupancy of the queue at any point of time, so that
603          * the queue does not get swamped with enqueue requests.
604          */
605         q->sw_ring_depth = conf->queue_size;
606         q->sw_ring_wrap_mask = d->sw_ring_max_depth - 1;
607
608         q->op_type = conf->op_type;
609
610         q_idx = acc100_find_free_queue_idx(dev, conf);
611         if (q_idx == -1) {
612                 rte_free(q->lb_in);
613                 rte_free(q->lb_out);
614                 rte_free(q);
615                 return -1;
616         }
617
618         q->qgrp_id = (q_idx >> ACC100_GRP_ID_SHIFT) & 0xF;
619         q->vf_id = (q_idx >> ACC100_VF_ID_SHIFT)  & 0x3F;
620         q->aq_id = q_idx & 0xF;
621         q->aq_depth = (conf->op_type ==  RTE_BBDEV_OP_TURBO_DEC) ?
622                         (1 << d->acc100_conf.q_ul_4g.aq_depth_log2) :
623                         (1 << d->acc100_conf.q_dl_4g.aq_depth_log2);
624
625         q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,
626                         queue_offset(d->pf_device,
627                                         q->vf_id, q->qgrp_id, q->aq_id));
628
629         rte_bbdev_log_debug(
630                         "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p",
631                         dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id,
632                         q->aq_id, q->aq_depth, q->mmio_reg_enqueue);
633
634         dev->data->queues[queue_id].queue_private = q;
635         return 0;
636 }
637
638 /* Release ACC100 queue */
639 static int
640 acc100_queue_release(struct rte_bbdev *dev, uint16_t q_id)
641 {
642         struct acc100_device *d = dev->data->dev_private;
643         struct acc100_queue *q = dev->data->queues[q_id].queue_private;
644
645         if (q != NULL) {
646                 /* Mark the Queue as un-assigned */
647                 d->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -
648                                 (1 << q->aq_id));
649                 rte_free(q->lb_in);
650                 rte_free(q->lb_out);
651                 rte_free(q);
652                 dev->data->queues[q_id].queue_private = NULL;
653         }
654
655         return 0;
656 }
657
658 /* Get ACC100 device info */
659 static void
660 acc100_dev_info_get(struct rte_bbdev *dev,
661                 struct rte_bbdev_driver_info *dev_info)
662 {
663         struct acc100_device *d = dev->data->dev_private;
664
665         static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
666                 {
667                         .type   = RTE_BBDEV_OP_LDPC_ENC,
668                         .cap.ldpc_enc = {
669                                 .capability_flags =
670                                         RTE_BBDEV_LDPC_RATE_MATCH |
671                                         RTE_BBDEV_LDPC_CRC_24B_ATTACH |
672                                         RTE_BBDEV_LDPC_INTERLEAVER_BYPASS,
673                                 .num_buffers_src =
674                                                 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
675                                 .num_buffers_dst =
676                                                 RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
677                         }
678                 },
679                 {
680                         .type   = RTE_BBDEV_OP_LDPC_DEC,
681                         .cap.ldpc_dec = {
682                         .capability_flags =
683                                 RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
684                                 RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
685                                 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
686                                 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
687 #ifdef ACC100_EXT_MEM
688                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
689                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
690                                 RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
691 #endif
692                                 RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
693                                 RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS |
694                                 RTE_BBDEV_LDPC_DECODE_BYPASS |
695                                 RTE_BBDEV_LDPC_DEC_SCATTER_GATHER |
696                                 RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION |
697                                 RTE_BBDEV_LDPC_LLR_COMPRESSION,
698                         .llr_size = 8,
699                         .llr_decimals = 1,
700                         .num_buffers_src =
701                                         RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
702                         .num_buffers_hard_out =
703                                         RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
704                         .num_buffers_soft_out = 0,
705                         }
706                 },
707                 RTE_BBDEV_END_OF_CAPABILITIES_LIST()
708         };
709
710         static struct rte_bbdev_queue_conf default_queue_conf;
711         default_queue_conf.socket = dev->data->socket_id;
712         default_queue_conf.queue_size = ACC100_MAX_QUEUE_DEPTH;
713
714         dev_info->driver_name = dev->device->driver->name;
715
716         /* Read and save the populated config from ACC100 registers */
717         fetch_acc100_config(dev);
718
719         /* This isn't ideal because it reports the maximum number of queues but
720          * does not provide info on how many can be uplink/downlink or different
721          * priorities
722          */
723         dev_info->max_num_queues =
724                         d->acc100_conf.q_dl_5g.num_aqs_per_groups *
725                         d->acc100_conf.q_dl_5g.num_qgroups +
726                         d->acc100_conf.q_ul_5g.num_aqs_per_groups *
727                         d->acc100_conf.q_ul_5g.num_qgroups +
728                         d->acc100_conf.q_dl_4g.num_aqs_per_groups *
729                         d->acc100_conf.q_dl_4g.num_qgroups +
730                         d->acc100_conf.q_ul_4g.num_aqs_per_groups *
731                         d->acc100_conf.q_ul_4g.num_qgroups;
732         dev_info->queue_size_lim = ACC100_MAX_QUEUE_DEPTH;
733         dev_info->hardware_accelerated = true;
734         dev_info->max_dl_queue_priority =
735                         d->acc100_conf.q_dl_4g.num_qgroups - 1;
736         dev_info->max_ul_queue_priority =
737                         d->acc100_conf.q_ul_4g.num_qgroups - 1;
738         dev_info->default_queue_conf = default_queue_conf;
739         dev_info->cpu_flag_reqs = NULL;
740         dev_info->min_alignment = 64;
741         dev_info->capabilities = bbdev_capabilities;
742 #ifdef ACC100_EXT_MEM
743         dev_info->harq_buffer_size = d->ddr_size;
744 #else
745         dev_info->harq_buffer_size = 0;
746 #endif
747 }
748
749
750 static const struct rte_bbdev_ops acc100_bbdev_ops = {
751         .setup_queues = acc100_setup_queues,
752         .close = acc100_dev_close,
753         .info_get = acc100_dev_info_get,
754         .queue_setup = acc100_queue_setup,
755         .queue_release = acc100_queue_release,
756 };
757
758 /* ACC100 PCI PF address map */
759 static struct rte_pci_id pci_id_acc100_pf_map[] = {
760         {
761                 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_PF_DEVICE_ID)
762         },
763         {.device_id = 0},
764 };
765
766 /* ACC100 PCI VF address map */
767 static struct rte_pci_id pci_id_acc100_vf_map[] = {
768         {
769                 RTE_PCI_DEVICE(RTE_ACC100_VENDOR_ID, RTE_ACC100_VF_DEVICE_ID)
770         },
771         {.device_id = 0},
772 };
773
774 /* Read flag value 0/1 from bitmap */
775 static inline bool
776 check_bit(uint32_t bitmap, uint32_t bitmask)
777 {
778         return bitmap & bitmask;
779 }
780
781 static inline char *
782 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
783 {
784         if (unlikely(len > rte_pktmbuf_tailroom(m)))
785                 return NULL;
786
787         char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
788         m->data_len = (uint16_t)(m->data_len + len);
789         m_head->pkt_len  = (m_head->pkt_len + len);
790         return tail;
791 }
792
793 /* Compute value of k0.
794  * Based on 3GPP 38.212 Table 5.4.2.1-2
795  * Starting position of different redundancy versions, k0
796  */
797 static inline uint16_t
798 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
799 {
800         if (rv_index == 0)
801                 return 0;
802         uint16_t n = (bg == 1 ? ACC100_N_ZC_1 : ACC100_N_ZC_2) * z_c;
803         if (n_cb == n) {
804                 if (rv_index == 1)
805                         return (bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * z_c;
806                 else if (rv_index == 2)
807                         return (bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * z_c;
808                 else
809                         return (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c;
810         }
811         /* LBRM case - includes a division by N */
812         if (rv_index == 1)
813                 return (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb)
814                                 / n) * z_c;
815         else if (rv_index == 2)
816                 return (((bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * n_cb)
817                                 / n) * z_c;
818         else
819                 return (((bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * n_cb)
820                                 / n) * z_c;
821 }
822
823 /* Fill in a frame control word for LDPC encoding. */
824 static inline void
825 acc100_fcw_le_fill(const struct rte_bbdev_enc_op *op,
826                 struct acc100_fcw_le *fcw, int num_cb)
827 {
828         fcw->qm = op->ldpc_enc.q_m;
829         fcw->nfiller = op->ldpc_enc.n_filler;
830         fcw->BG = (op->ldpc_enc.basegraph - 1);
831         fcw->Zc = op->ldpc_enc.z_c;
832         fcw->ncb = op->ldpc_enc.n_cb;
833         fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,
834                         op->ldpc_enc.rv_index);
835         fcw->rm_e = op->ldpc_enc.cb_params.e;
836         fcw->crc_select = check_bit(op->ldpc_enc.op_flags,
837                         RTE_BBDEV_LDPC_CRC_24B_ATTACH);
838         fcw->bypass_intlv = check_bit(op->ldpc_enc.op_flags,
839                         RTE_BBDEV_LDPC_INTERLEAVER_BYPASS);
840         fcw->mcb_count = num_cb;
841 }
842
843 /* Fill in a frame control word for LDPC decoding. */
844 static inline void
845 acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,
846                 union acc100_harq_layout_data *harq_layout)
847 {
848         uint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;
849         uint16_t harq_index;
850         uint32_t l;
851         bool harq_prun = false;
852
853         fcw->qm = op->ldpc_dec.q_m;
854         fcw->nfiller = op->ldpc_dec.n_filler;
855         fcw->BG = (op->ldpc_dec.basegraph - 1);
856         fcw->Zc = op->ldpc_dec.z_c;
857         fcw->ncb = op->ldpc_dec.n_cb;
858         fcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_dec.basegraph,
859                         op->ldpc_dec.rv_index);
860         if (op->ldpc_dec.code_block_mode == 1)
861                 fcw->rm_e = op->ldpc_dec.cb_params.e;
862         else
863                 fcw->rm_e = (op->ldpc_dec.tb_params.r <
864                                 op->ldpc_dec.tb_params.cab) ?
865                                                 op->ldpc_dec.tb_params.ea :
866                                                 op->ldpc_dec.tb_params.eb;
867
868         fcw->hcin_en = check_bit(op->ldpc_dec.op_flags,
869                         RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
870         fcw->hcout_en = check_bit(op->ldpc_dec.op_flags,
871                         RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
872         fcw->crc_select = check_bit(op->ldpc_dec.op_flags,
873                         RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
874         fcw->bypass_dec = check_bit(op->ldpc_dec.op_flags,
875                         RTE_BBDEV_LDPC_DECODE_BYPASS);
876         fcw->bypass_intlv = check_bit(op->ldpc_dec.op_flags,
877                         RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS);
878         if (op->ldpc_dec.q_m == 1) {
879                 fcw->bypass_intlv = 1;
880                 fcw->qm = 2;
881         }
882         fcw->hcin_decomp_mode = check_bit(op->ldpc_dec.op_flags,
883                         RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
884         fcw->hcout_comp_mode = check_bit(op->ldpc_dec.op_flags,
885                         RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
886         fcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,
887                         RTE_BBDEV_LDPC_LLR_COMPRESSION);
888         harq_index = op->ldpc_dec.harq_combined_output.offset /
889                         ACC100_HARQ_OFFSET;
890 #ifdef ACC100_EXT_MEM
891         /* Limit cases when HARQ pruning is valid */
892         harq_prun = ((op->ldpc_dec.harq_combined_output.offset %
893                         ACC100_HARQ_OFFSET) == 0) &&
894                         (op->ldpc_dec.harq_combined_output.offset <= UINT16_MAX
895                         * ACC100_HARQ_OFFSET);
896 #endif
897         if (fcw->hcin_en > 0) {
898                 harq_in_length = op->ldpc_dec.harq_combined_input.length;
899                 if (fcw->hcin_decomp_mode > 0)
900                         harq_in_length = harq_in_length * 8 / 6;
901                 harq_in_length = RTE_ALIGN(harq_in_length, 64);
902                 if ((harq_layout[harq_index].offset > 0) & harq_prun) {
903                         rte_bbdev_log_debug("HARQ IN offset unexpected for now\n");
904                         fcw->hcin_size0 = harq_layout[harq_index].size0;
905                         fcw->hcin_offset = harq_layout[harq_index].offset;
906                         fcw->hcin_size1 = harq_in_length -
907                                         harq_layout[harq_index].offset;
908                 } else {
909                         fcw->hcin_size0 = harq_in_length;
910                         fcw->hcin_offset = 0;
911                         fcw->hcin_size1 = 0;
912                 }
913         } else {
914                 fcw->hcin_size0 = 0;
915                 fcw->hcin_offset = 0;
916                 fcw->hcin_size1 = 0;
917         }
918
919         fcw->itmax = op->ldpc_dec.iter_max;
920         fcw->itstop = check_bit(op->ldpc_dec.op_flags,
921                         RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
922         fcw->synd_precoder = fcw->itstop;
923         /*
924          * These are all implicitly set
925          * fcw->synd_post = 0;
926          * fcw->so_en = 0;
927          * fcw->so_bypass_rm = 0;
928          * fcw->so_bypass_intlv = 0;
929          * fcw->dec_convllr = 0;
930          * fcw->hcout_convllr = 0;
931          * fcw->hcout_size1 = 0;
932          * fcw->so_it = 0;
933          * fcw->hcout_offset = 0;
934          * fcw->negstop_th = 0;
935          * fcw->negstop_it = 0;
936          * fcw->negstop_en = 0;
937          * fcw->gain_i = 1;
938          * fcw->gain_h = 1;
939          */
940         if (fcw->hcout_en > 0) {
941                 parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)
942                         * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
943                 k0_p = (fcw->k0 > parity_offset) ?
944                                 fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;
945                 ncb_p = fcw->ncb - op->ldpc_dec.n_filler;
946                 l = k0_p + fcw->rm_e;
947                 harq_out_length = (uint16_t) fcw->hcin_size0;
948                 harq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);
949                 harq_out_length = (harq_out_length + 0x3F) & 0xFFC0;
950                 if ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) &&
951                                 harq_prun) {
952                         fcw->hcout_size0 = (uint16_t) fcw->hcin_size0;
953                         fcw->hcout_offset = k0_p & 0xFFC0;
954                         fcw->hcout_size1 = harq_out_length - fcw->hcout_offset;
955                 } else {
956                         fcw->hcout_size0 = harq_out_length;
957                         fcw->hcout_size1 = 0;
958                         fcw->hcout_offset = 0;
959                 }
960                 harq_layout[harq_index].offset = fcw->hcout_offset;
961                 harq_layout[harq_index].size0 = fcw->hcout_size0;
962         } else {
963                 fcw->hcout_size0 = 0;
964                 fcw->hcout_size1 = 0;
965                 fcw->hcout_offset = 0;
966         }
967 }
968
969 /**
970  * Fills descriptor with data pointers of one block type.
971  *
972  * @param desc
973  *   Pointer to DMA descriptor.
974  * @param input
975  *   Pointer to pointer to input data which will be encoded. It can be changed
976  *   and points to next segment in scatter-gather case.
977  * @param offset
978  *   Input offset in rte_mbuf structure. It is used for calculating the point
979  *   where data is starting.
980  * @param cb_len
981  *   Length of currently processed Code Block
982  * @param seg_total_left
983  *   It indicates how many bytes still left in segment (mbuf) for further
984  *   processing.
985  * @param op_flags
986  *   Store information about device capabilities
987  * @param next_triplet
988  *   Index for ACC100 DMA Descriptor triplet
989  *
990  * @return
991  *   Returns index of next triplet on success, other value if lengths of
992  *   pkt and processed cb do not match.
993  *
994  */
995 static inline int
996 acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,
997                 struct rte_mbuf **input, uint32_t *offset, uint32_t cb_len,
998                 uint32_t *seg_total_left, int next_triplet)
999 {
1000         uint32_t part_len;
1001         struct rte_mbuf *m = *input;
1002
1003         part_len = (*seg_total_left < cb_len) ? *seg_total_left : cb_len;
1004         cb_len -= part_len;
1005         *seg_total_left -= part_len;
1006
1007         desc->data_ptrs[next_triplet].address =
1008                         rte_pktmbuf_iova_offset(m, *offset);
1009         desc->data_ptrs[next_triplet].blen = part_len;
1010         desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
1011         desc->data_ptrs[next_triplet].last = 0;
1012         desc->data_ptrs[next_triplet].dma_ext = 0;
1013         *offset += part_len;
1014         next_triplet++;
1015
1016         while (cb_len > 0) {
1017                 if (next_triplet < ACC100_DMA_MAX_NUM_POINTERS &&
1018                                 m->next != NULL) {
1019
1020                         m = m->next;
1021                         *seg_total_left = rte_pktmbuf_data_len(m);
1022                         part_len = (*seg_total_left < cb_len) ?
1023                                         *seg_total_left :
1024                                         cb_len;
1025                         desc->data_ptrs[next_triplet].address =
1026                                         rte_pktmbuf_iova_offset(m, 0);
1027                         desc->data_ptrs[next_triplet].blen = part_len;
1028                         desc->data_ptrs[next_triplet].blkid =
1029                                         ACC100_DMA_BLKID_IN;
1030                         desc->data_ptrs[next_triplet].last = 0;
1031                         desc->data_ptrs[next_triplet].dma_ext = 0;
1032                         cb_len -= part_len;
1033                         *seg_total_left -= part_len;
1034                         /* Initializing offset for next segment (mbuf) */
1035                         *offset = part_len;
1036                         next_triplet++;
1037                 } else {
1038                         rte_bbdev_log(ERR,
1039                                 "Some data still left for processing: "
1040                                 "data_left: %u, next_triplet: %u, next_mbuf: %p",
1041                                 cb_len, next_triplet, m->next);
1042                         return -EINVAL;
1043                 }
1044         }
1045         /* Storing new mbuf as it could be changed in scatter-gather case*/
1046         *input = m;
1047
1048         return next_triplet;
1049 }
1050
1051 /* Fills descriptor with data pointers of one block type.
1052  * Returns index of next triplet on success, other value if lengths of
1053  * output data and processed mbuf do not match.
1054  */
1055 static inline int
1056 acc100_dma_fill_blk_type_out(struct acc100_dma_req_desc *desc,
1057                 struct rte_mbuf *output, uint32_t out_offset,
1058                 uint32_t output_len, int next_triplet, int blk_id)
1059 {
1060         desc->data_ptrs[next_triplet].address =
1061                         rte_pktmbuf_iova_offset(output, out_offset);
1062         desc->data_ptrs[next_triplet].blen = output_len;
1063         desc->data_ptrs[next_triplet].blkid = blk_id;
1064         desc->data_ptrs[next_triplet].last = 0;
1065         desc->data_ptrs[next_triplet].dma_ext = 0;
1066         next_triplet++;
1067
1068         return next_triplet;
1069 }
1070
1071 static inline void
1072 acc100_header_init(struct acc100_dma_req_desc *desc)
1073 {
1074         desc->word0 = ACC100_DMA_DESC_TYPE;
1075         desc->word1 = 0; /**< Timestamp could be disabled */
1076         desc->word2 = 0;
1077         desc->word3 = 0;
1078         desc->numCBs = 1;
1079 }
1080
1081 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1082 /* Check if any input data is unexpectedly left for processing */
1083 static inline int
1084 check_mbuf_total_left(uint32_t mbuf_total_left)
1085 {
1086         if (mbuf_total_left == 0)
1087                 return 0;
1088         rte_bbdev_log(ERR,
1089                 "Some date still left for processing: mbuf_total_left = %u",
1090                 mbuf_total_left);
1091         return -EINVAL;
1092 }
1093 #endif
1094
1095 static inline int
1096 acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,
1097                 struct acc100_dma_req_desc *desc, struct rte_mbuf **input,
1098                 struct rte_mbuf *output, uint32_t *in_offset,
1099                 uint32_t *out_offset, uint32_t *out_length,
1100                 uint32_t *mbuf_total_left, uint32_t *seg_total_left)
1101 {
1102         int next_triplet = 1; /* FCW already done */
1103         uint16_t K, in_length_in_bits, in_length_in_bytes;
1104         struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1105
1106         acc100_header_init(desc);
1107
1108         K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1109         in_length_in_bits = K - enc->n_filler;
1110         if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) ||
1111                         (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH))
1112                 in_length_in_bits -= 24;
1113         in_length_in_bytes = in_length_in_bits >> 3;
1114
1115         if (unlikely((*mbuf_total_left == 0) ||
1116                         (*mbuf_total_left < in_length_in_bytes))) {
1117                 rte_bbdev_log(ERR,
1118                                 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1119                                 *mbuf_total_left, in_length_in_bytes);
1120                 return -1;
1121         }
1122
1123         next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,
1124                         in_length_in_bytes,
1125                         seg_total_left, next_triplet);
1126         if (unlikely(next_triplet < 0)) {
1127                 rte_bbdev_log(ERR,
1128                                 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1129                                 op);
1130                 return -1;
1131         }
1132         desc->data_ptrs[next_triplet - 1].last = 1;
1133         desc->m2dlen = next_triplet;
1134         *mbuf_total_left -= in_length_in_bytes;
1135
1136         /* Set output length */
1137         /* Integer round up division by 8 */
1138         *out_length = (enc->cb_params.e + 7) >> 3;
1139
1140         next_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,
1141                         *out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);
1142         op->ldpc_enc.output.length += *out_length;
1143         *out_offset += *out_length;
1144         desc->data_ptrs[next_triplet - 1].last = 1;
1145         desc->data_ptrs[next_triplet - 1].dma_ext = 0;
1146         desc->d2mlen = next_triplet - desc->m2dlen;
1147
1148         desc->op_addr = op;
1149
1150         return 0;
1151 }
1152
1153 static inline int
1154 acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
1155                 struct acc100_dma_req_desc *desc,
1156                 struct rte_mbuf **input, struct rte_mbuf *h_output,
1157                 uint32_t *in_offset, uint32_t *h_out_offset,
1158                 uint32_t *h_out_length, uint32_t *mbuf_total_left,
1159                 uint32_t *seg_total_left,
1160                 struct acc100_fcw_ld *fcw)
1161 {
1162         struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1163         int next_triplet = 1; /* FCW already done */
1164         uint32_t input_length;
1165         uint16_t output_length, crc24_overlap = 0;
1166         uint16_t sys_cols, K, h_p_size, h_np_size;
1167         bool h_comp = check_bit(dec->op_flags,
1168                         RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);
1169
1170         acc100_header_init(desc);
1171
1172         if (check_bit(op->ldpc_dec.op_flags,
1173                         RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1174                 crc24_overlap = 24;
1175
1176         /* Compute some LDPC BG lengths */
1177         input_length = dec->cb_params.e;
1178         if (check_bit(op->ldpc_dec.op_flags,
1179                         RTE_BBDEV_LDPC_LLR_COMPRESSION))
1180                 input_length = (input_length * 3 + 3) / 4;
1181         sys_cols = (dec->basegraph == 1) ? 22 : 10;
1182         K = sys_cols * dec->z_c;
1183         output_length = K - dec->n_filler - crc24_overlap;
1184
1185         if (unlikely((*mbuf_total_left == 0) ||
1186                         (*mbuf_total_left < input_length))) {
1187                 rte_bbdev_log(ERR,
1188                                 "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1189                                 *mbuf_total_left, input_length);
1190                 return -1;
1191         }
1192
1193         next_triplet = acc100_dma_fill_blk_type_in(desc, input,
1194                         in_offset, input_length,
1195                         seg_total_left, next_triplet);
1196
1197         if (unlikely(next_triplet < 0)) {
1198                 rte_bbdev_log(ERR,
1199                                 "Mismatch between data to process and mbuf data length in bbdev_op: %p",
1200                                 op);
1201                 return -1;
1202         }
1203
1204         if (check_bit(op->ldpc_dec.op_flags,
1205                                 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1206                 h_p_size = fcw->hcin_size0 + fcw->hcin_size1;
1207                 if (h_comp)
1208                         h_p_size = (h_p_size * 3 + 3) / 4;
1209                 desc->data_ptrs[next_triplet].address =
1210                                 dec->harq_combined_input.offset;
1211                 desc->data_ptrs[next_triplet].blen = h_p_size;
1212                 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN_HARQ;
1213                 desc->data_ptrs[next_triplet].dma_ext = 1;
1214 #ifndef ACC100_EXT_MEM
1215                 acc100_dma_fill_blk_type_out(
1216                                 desc,
1217                                 op->ldpc_dec.harq_combined_input.data,
1218                                 op->ldpc_dec.harq_combined_input.offset,
1219                                 h_p_size,
1220                                 next_triplet,
1221                                 ACC100_DMA_BLKID_IN_HARQ);
1222 #endif
1223                 next_triplet++;
1224         }
1225
1226         desc->data_ptrs[next_triplet - 1].last = 1;
1227         desc->m2dlen = next_triplet;
1228         *mbuf_total_left -= input_length;
1229
1230         next_triplet = acc100_dma_fill_blk_type_out(desc, h_output,
1231                         *h_out_offset, output_length >> 3, next_triplet,
1232                         ACC100_DMA_BLKID_OUT_HARD);
1233
1234         if (check_bit(op->ldpc_dec.op_flags,
1235                                 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1236                 /* Pruned size of the HARQ */
1237                 h_p_size = fcw->hcout_size0 + fcw->hcout_size1;
1238                 /* Non-Pruned size of the HARQ */
1239                 h_np_size = fcw->hcout_offset > 0 ?
1240                                 fcw->hcout_offset + fcw->hcout_size1 :
1241                                 h_p_size;
1242                 if (h_comp) {
1243                         h_np_size = (h_np_size * 3 + 3) / 4;
1244                         h_p_size = (h_p_size * 3 + 3) / 4;
1245                 }
1246                 dec->harq_combined_output.length = h_np_size;
1247                 desc->data_ptrs[next_triplet].address =
1248                                 dec->harq_combined_output.offset;
1249                 desc->data_ptrs[next_triplet].blen = h_p_size;
1250                 desc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARQ;
1251                 desc->data_ptrs[next_triplet].dma_ext = 1;
1252 #ifndef ACC100_EXT_MEM
1253                 acc100_dma_fill_blk_type_out(
1254                                 desc,
1255                                 dec->harq_combined_output.data,
1256                                 dec->harq_combined_output.offset,
1257                                 h_p_size,
1258                                 next_triplet,
1259                                 ACC100_DMA_BLKID_OUT_HARQ);
1260 #endif
1261                 next_triplet++;
1262         }
1263
1264         *h_out_length = output_length >> 3;
1265         dec->hard_output.length += *h_out_length;
1266         *h_out_offset += *h_out_length;
1267         desc->data_ptrs[next_triplet - 1].last = 1;
1268         desc->d2mlen = next_triplet - desc->m2dlen;
1269
1270         desc->op_addr = op;
1271
1272         return 0;
1273 }
1274
1275 static inline void
1276 acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,
1277                 struct acc100_dma_req_desc *desc,
1278                 struct rte_mbuf *input, struct rte_mbuf *h_output,
1279                 uint32_t *in_offset, uint32_t *h_out_offset,
1280                 uint32_t *h_out_length,
1281                 union acc100_harq_layout_data *harq_layout)
1282 {
1283         int next_triplet = 1; /* FCW already done */
1284         desc->data_ptrs[next_triplet].address =
1285                         rte_pktmbuf_iova_offset(input, *in_offset);
1286         next_triplet++;
1287
1288         if (check_bit(op->ldpc_dec.op_flags,
1289                                 RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1290                 struct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;
1291                 desc->data_ptrs[next_triplet].address = hi.offset;
1292 #ifndef ACC100_EXT_MEM
1293                 desc->data_ptrs[next_triplet].address =
1294                                 rte_pktmbuf_iova_offset(hi.data, hi.offset);
1295 #endif
1296                 next_triplet++;
1297         }
1298
1299         desc->data_ptrs[next_triplet].address =
1300                         rte_pktmbuf_iova_offset(h_output, *h_out_offset);
1301         *h_out_length = desc->data_ptrs[next_triplet].blen;
1302         next_triplet++;
1303
1304         if (check_bit(op->ldpc_dec.op_flags,
1305                                 RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1306                 desc->data_ptrs[next_triplet].address =
1307                                 op->ldpc_dec.harq_combined_output.offset;
1308                 /* Adjust based on previous operation */
1309                 struct rte_bbdev_dec_op *prev_op = desc->op_addr;
1310                 op->ldpc_dec.harq_combined_output.length =
1311                                 prev_op->ldpc_dec.harq_combined_output.length;
1312                 int16_t hq_idx = op->ldpc_dec.harq_combined_output.offset /
1313                                 ACC100_HARQ_OFFSET;
1314                 int16_t prev_hq_idx =
1315                                 prev_op->ldpc_dec.harq_combined_output.offset
1316                                 / ACC100_HARQ_OFFSET;
1317                 harq_layout[hq_idx].val = harq_layout[prev_hq_idx].val;
1318 #ifndef ACC100_EXT_MEM
1319                 struct rte_bbdev_op_data ho =
1320                                 op->ldpc_dec.harq_combined_output;
1321                 desc->data_ptrs[next_triplet].address =
1322                                 rte_pktmbuf_iova_offset(ho.data, ho.offset);
1323 #endif
1324                 next_triplet++;
1325         }
1326
1327         op->ldpc_dec.hard_output.length += *h_out_length;
1328         desc->op_addr = op;
1329 }
1330
1331
1332 /* Enqueue a number of operations to HW and update software rings */
1333 static inline void
1334 acc100_dma_enqueue(struct acc100_queue *q, uint16_t n,
1335                 struct rte_bbdev_stats *queue_stats)
1336 {
1337         union acc100_enqueue_reg_fmt enq_req;
1338 #ifdef RTE_BBDEV_OFFLOAD_COST
1339         uint64_t start_time = 0;
1340         queue_stats->acc_offload_cycles = 0;
1341 #else
1342         RTE_SET_USED(queue_stats);
1343 #endif
1344
1345         enq_req.val = 0;
1346         /* Setting offset, 100b for 256 DMA Desc */
1347         enq_req.addr_offset = ACC100_DESC_OFFSET;
1348
1349         /* Split ops into batches */
1350         do {
1351                 union acc100_dma_desc *desc;
1352                 uint16_t enq_batch_size;
1353                 uint64_t offset;
1354                 rte_iova_t req_elem_addr;
1355
1356                 enq_batch_size = RTE_MIN(n, MAX_ENQ_BATCH_SIZE);
1357
1358                 /* Set flag on last descriptor in a batch */
1359                 desc = q->ring_addr + ((q->sw_ring_head + enq_batch_size - 1) &
1360                                 q->sw_ring_wrap_mask);
1361                 desc->req.last_desc_in_batch = 1;
1362
1363                 /* Calculate the 1st descriptor's address */
1364                 offset = ((q->sw_ring_head & q->sw_ring_wrap_mask) *
1365                                 sizeof(union acc100_dma_desc));
1366                 req_elem_addr = q->ring_addr_iova + offset;
1367
1368                 /* Fill enqueue struct */
1369                 enq_req.num_elem = enq_batch_size;
1370                 /* low 6 bits are not needed */
1371                 enq_req.req_elem_addr = (uint32_t)(req_elem_addr >> 6);
1372
1373 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1374                 rte_memdump(stderr, "Req sdone", desc, sizeof(*desc));
1375 #endif
1376                 rte_bbdev_log_debug(
1377                                 "Enqueue %u reqs (phys %#"PRIx64") to reg %p",
1378                                 enq_batch_size,
1379                                 req_elem_addr,
1380                                 (void *)q->mmio_reg_enqueue);
1381
1382                 rte_wmb();
1383
1384 #ifdef RTE_BBDEV_OFFLOAD_COST
1385                 /* Start time measurement for enqueue function offload. */
1386                 start_time = rte_rdtsc_precise();
1387 #endif
1388                 rte_bbdev_log(DEBUG, "Debug : MMIO Enqueue");
1389                 mmio_write(q->mmio_reg_enqueue, enq_req.val);
1390
1391 #ifdef RTE_BBDEV_OFFLOAD_COST
1392                 queue_stats->acc_offload_cycles +=
1393                                 rte_rdtsc_precise() - start_time;
1394 #endif
1395
1396                 q->aq_enqueued++;
1397                 q->sw_ring_head += enq_batch_size;
1398                 n -= enq_batch_size;
1399
1400         } while (n);
1401
1402
1403 }
1404
1405 /* Enqueue one encode operations for ACC100 device in CB mode */
1406 static inline int
1407 enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,
1408                 uint16_t total_enqueued_cbs, int16_t num)
1409 {
1410         union acc100_dma_desc *desc = NULL;
1411         uint32_t out_length;
1412         struct rte_mbuf *output_head, *output;
1413         int i, next_triplet;
1414         uint16_t  in_length_in_bytes;
1415         struct rte_bbdev_op_ldpc_enc *enc = &ops[0]->ldpc_enc;
1416
1417         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1418                         & q->sw_ring_wrap_mask);
1419         desc = q->ring_addr + desc_idx;
1420         acc100_fcw_le_fill(ops[0], &desc->req.fcw_le, num);
1421
1422         /** This could be done at polling */
1423         acc100_header_init(&desc->req);
1424         desc->req.numCBs = num;
1425
1426         in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;
1427         out_length = (enc->cb_params.e + 7) >> 3;
1428         desc->req.m2dlen = 1 + num;
1429         desc->req.d2mlen = num;
1430         next_triplet = 1;
1431
1432         for (i = 0; i < num; i++) {
1433                 desc->req.data_ptrs[next_triplet].address =
1434                         rte_pktmbuf_iova_offset(ops[i]->ldpc_enc.input.data, 0);
1435                 desc->req.data_ptrs[next_triplet].blen = in_length_in_bytes;
1436                 next_triplet++;
1437                 desc->req.data_ptrs[next_triplet].address =
1438                                 rte_pktmbuf_iova_offset(
1439                                 ops[i]->ldpc_enc.output.data, 0);
1440                 desc->req.data_ptrs[next_triplet].blen = out_length;
1441                 next_triplet++;
1442                 ops[i]->ldpc_enc.output.length = out_length;
1443                 output_head = output = ops[i]->ldpc_enc.output.data;
1444                 mbuf_append(output_head, output, out_length);
1445                 output->data_len = out_length;
1446         }
1447
1448         desc->req.op_addr = ops[0];
1449
1450 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1451         rte_memdump(stderr, "FCW", &desc->req.fcw_le,
1452                         sizeof(desc->req.fcw_le) - 8);
1453         rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1454 #endif
1455
1456         /* One CB (one op) was successfully prepared to enqueue */
1457         return num;
1458 }
1459
1460 /* Enqueue one encode operations for ACC100 device in CB mode */
1461 static inline int
1462 enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,
1463                 uint16_t total_enqueued_cbs)
1464 {
1465         union acc100_dma_desc *desc = NULL;
1466         int ret;
1467         uint32_t in_offset, out_offset, out_length, mbuf_total_left,
1468                 seg_total_left;
1469         struct rte_mbuf *input, *output_head, *output;
1470
1471         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1472                         & q->sw_ring_wrap_mask);
1473         desc = q->ring_addr + desc_idx;
1474         acc100_fcw_le_fill(op, &desc->req.fcw_le, 1);
1475
1476         input = op->ldpc_enc.input.data;
1477         output_head = output = op->ldpc_enc.output.data;
1478         in_offset = op->ldpc_enc.input.offset;
1479         out_offset = op->ldpc_enc.output.offset;
1480         out_length = 0;
1481         mbuf_total_left = op->ldpc_enc.input.length;
1482         seg_total_left = rte_pktmbuf_data_len(op->ldpc_enc.input.data)
1483                         - in_offset;
1484
1485         ret = acc100_dma_desc_le_fill(op, &desc->req, &input, output,
1486                         &in_offset, &out_offset, &out_length, &mbuf_total_left,
1487                         &seg_total_left);
1488
1489         if (unlikely(ret < 0))
1490                 return ret;
1491
1492         mbuf_append(output_head, output, out_length);
1493
1494 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1495         rte_memdump(stderr, "FCW", &desc->req.fcw_le,
1496                         sizeof(desc->req.fcw_le) - 8);
1497         rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1498
1499         if (check_mbuf_total_left(mbuf_total_left) != 0)
1500                 return -EINVAL;
1501 #endif
1502         /* One CB (one op) was successfully prepared to enqueue */
1503         return 1;
1504 }
1505
1506 static inline int
1507 harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
1508                 uint16_t total_enqueued_cbs) {
1509         struct acc100_fcw_ld *fcw;
1510         union acc100_dma_desc *desc;
1511         int next_triplet = 1;
1512         struct rte_mbuf *hq_output_head, *hq_output;
1513         uint16_t harq_in_length = op->ldpc_dec.harq_combined_input.length;
1514         if (harq_in_length == 0) {
1515                 rte_bbdev_log(ERR, "Loopback of invalid null size\n");
1516                 return -EINVAL;
1517         }
1518
1519         int h_comp = check_bit(op->ldpc_dec.op_flags,
1520                         RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION
1521                         ) ? 1 : 0;
1522         if (h_comp == 1)
1523                 harq_in_length = harq_in_length * 8 / 6;
1524         harq_in_length = RTE_ALIGN(harq_in_length, 64);
1525         uint16_t harq_dma_length_in = (h_comp == 0) ?
1526                         harq_in_length :
1527                         harq_in_length * 6 / 8;
1528         uint16_t harq_dma_length_out = harq_dma_length_in;
1529         bool ddr_mem_in = check_bit(op->ldpc_dec.op_flags,
1530                         RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE);
1531         union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
1532         uint16_t harq_index = (ddr_mem_in ?
1533                         op->ldpc_dec.harq_combined_input.offset :
1534                         op->ldpc_dec.harq_combined_output.offset)
1535                         / ACC100_HARQ_OFFSET;
1536
1537         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1538                         & q->sw_ring_wrap_mask);
1539         desc = q->ring_addr + desc_idx;
1540         fcw = &desc->req.fcw_ld;
1541         /* Set the FCW from loopback into DDR */
1542         memset(fcw, 0, sizeof(struct acc100_fcw_ld));
1543         fcw->FCWversion = ACC100_FCW_VER;
1544         fcw->qm = 2;
1545         fcw->Zc = 384;
1546         if (harq_in_length < 16 * ACC100_N_ZC_1)
1547                 fcw->Zc = 16;
1548         fcw->ncb = fcw->Zc * ACC100_N_ZC_1;
1549         fcw->rm_e = 2;
1550         fcw->hcin_en = 1;
1551         fcw->hcout_en = 1;
1552
1553         rte_bbdev_log(DEBUG, "Loopback IN %d Index %d offset %d length %d %d\n",
1554                         ddr_mem_in, harq_index,
1555                         harq_layout[harq_index].offset, harq_in_length,
1556                         harq_dma_length_in);
1557
1558         if (ddr_mem_in && (harq_layout[harq_index].offset > 0)) {
1559                 fcw->hcin_size0 = harq_layout[harq_index].size0;
1560                 fcw->hcin_offset = harq_layout[harq_index].offset;
1561                 fcw->hcin_size1 = harq_in_length - fcw->hcin_offset;
1562                 harq_dma_length_in = (fcw->hcin_size0 + fcw->hcin_size1);
1563                 if (h_comp == 1)
1564                         harq_dma_length_in = harq_dma_length_in * 6 / 8;
1565         } else {
1566                 fcw->hcin_size0 = harq_in_length;
1567         }
1568         harq_layout[harq_index].val = 0;
1569         rte_bbdev_log(DEBUG, "Loopback FCW Config %d %d %d\n",
1570                         fcw->hcin_size0, fcw->hcin_offset, fcw->hcin_size1);
1571         fcw->hcout_size0 = harq_in_length;
1572         fcw->hcin_decomp_mode = h_comp;
1573         fcw->hcout_comp_mode = h_comp;
1574         fcw->gain_i = 1;
1575         fcw->gain_h = 1;
1576
1577         /* Set the prefix of descriptor. This could be done at polling */
1578         acc100_header_init(&desc->req);
1579
1580         /* Null LLR input for Decoder */
1581         desc->req.data_ptrs[next_triplet].address =
1582                         q->lb_in_addr_iova;
1583         desc->req.data_ptrs[next_triplet].blen = 2;
1584         desc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;
1585         desc->req.data_ptrs[next_triplet].last = 0;
1586         desc->req.data_ptrs[next_triplet].dma_ext = 0;
1587         next_triplet++;
1588
1589         /* HARQ Combine input from either Memory interface */
1590         if (!ddr_mem_in) {
1591                 next_triplet = acc100_dma_fill_blk_type_out(&desc->req,
1592                                 op->ldpc_dec.harq_combined_input.data,
1593                                 op->ldpc_dec.harq_combined_input.offset,
1594                                 harq_dma_length_in,
1595                                 next_triplet,
1596                                 ACC100_DMA_BLKID_IN_HARQ);
1597         } else {
1598                 desc->req.data_ptrs[next_triplet].address =
1599                                 op->ldpc_dec.harq_combined_input.offset;
1600                 desc->req.data_ptrs[next_triplet].blen =
1601                                 harq_dma_length_in;
1602                 desc->req.data_ptrs[next_triplet].blkid =
1603                                 ACC100_DMA_BLKID_IN_HARQ;
1604                 desc->req.data_ptrs[next_triplet].dma_ext = 1;
1605                 next_triplet++;
1606         }
1607         desc->req.data_ptrs[next_triplet - 1].last = 1;
1608         desc->req.m2dlen = next_triplet;
1609
1610         /* Dropped decoder hard output */
1611         desc->req.data_ptrs[next_triplet].address =
1612                         q->lb_out_addr_iova;
1613         desc->req.data_ptrs[next_triplet].blen = ACC100_BYTES_IN_WORD;
1614         desc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARD;
1615         desc->req.data_ptrs[next_triplet].last = 0;
1616         desc->req.data_ptrs[next_triplet].dma_ext = 0;
1617         next_triplet++;
1618
1619         /* HARQ Combine output to either Memory interface */
1620         if (check_bit(op->ldpc_dec.op_flags,
1621                         RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE
1622                         )) {
1623                 desc->req.data_ptrs[next_triplet].address =
1624                                 op->ldpc_dec.harq_combined_output.offset;
1625                 desc->req.data_ptrs[next_triplet].blen =
1626                                 harq_dma_length_out;
1627                 desc->req.data_ptrs[next_triplet].blkid =
1628                                 ACC100_DMA_BLKID_OUT_HARQ;
1629                 desc->req.data_ptrs[next_triplet].dma_ext = 1;
1630                 next_triplet++;
1631         } else {
1632                 hq_output_head = op->ldpc_dec.harq_combined_output.data;
1633                 hq_output = op->ldpc_dec.harq_combined_output.data;
1634                 next_triplet = acc100_dma_fill_blk_type_out(
1635                                 &desc->req,
1636                                 op->ldpc_dec.harq_combined_output.data,
1637                                 op->ldpc_dec.harq_combined_output.offset,
1638                                 harq_dma_length_out,
1639                                 next_triplet,
1640                                 ACC100_DMA_BLKID_OUT_HARQ);
1641                 /* HARQ output */
1642                 mbuf_append(hq_output_head, hq_output, harq_dma_length_out);
1643                 op->ldpc_dec.harq_combined_output.length =
1644                                 harq_dma_length_out;
1645         }
1646         desc->req.data_ptrs[next_triplet - 1].last = 1;
1647         desc->req.d2mlen = next_triplet - desc->req.m2dlen;
1648         desc->req.op_addr = op;
1649
1650         /* One CB (one op) was successfully prepared to enqueue */
1651         return 1;
1652 }
1653
1654 /** Enqueue one decode operations for ACC100 device in CB mode */
1655 static inline int
1656 enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
1657                 uint16_t total_enqueued_cbs, bool same_op)
1658 {
1659         int ret;
1660         if (unlikely(check_bit(op->ldpc_dec.op_flags,
1661                         RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK))) {
1662                 ret = harq_loopback(q, op, total_enqueued_cbs);
1663                 return ret;
1664         }
1665
1666         union acc100_dma_desc *desc;
1667         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1668                         & q->sw_ring_wrap_mask);
1669         desc = q->ring_addr + desc_idx;
1670         struct rte_mbuf *input, *h_output_head, *h_output;
1671         uint32_t in_offset, h_out_offset, mbuf_total_left, h_out_length = 0;
1672         input = op->ldpc_dec.input.data;
1673         h_output_head = h_output = op->ldpc_dec.hard_output.data;
1674         in_offset = op->ldpc_dec.input.offset;
1675         h_out_offset = op->ldpc_dec.hard_output.offset;
1676         mbuf_total_left = op->ldpc_dec.input.length;
1677 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1678         if (unlikely(input == NULL)) {
1679                 rte_bbdev_log(ERR, "Invalid mbuf pointer");
1680                 return -EFAULT;
1681         }
1682 #endif
1683         union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
1684
1685         if (same_op) {
1686                 union acc100_dma_desc *prev_desc;
1687                 desc_idx = ((q->sw_ring_head + total_enqueued_cbs - 1)
1688                                 & q->sw_ring_wrap_mask);
1689                 prev_desc = q->ring_addr + desc_idx;
1690                 uint8_t *prev_ptr = (uint8_t *) prev_desc;
1691                 uint8_t *new_ptr = (uint8_t *) desc;
1692                 /* Copy first 4 words and BDESCs */
1693                 rte_memcpy(new_ptr, prev_ptr, ACC100_5GUL_SIZE_0);
1694                 rte_memcpy(new_ptr + ACC100_5GUL_OFFSET_0,
1695                                 prev_ptr + ACC100_5GUL_OFFSET_0,
1696                                 ACC100_5GUL_SIZE_1);
1697                 desc->req.op_addr = prev_desc->req.op_addr;
1698                 /* Copy FCW */
1699                 rte_memcpy(new_ptr + ACC100_DESC_FCW_OFFSET,
1700                                 prev_ptr + ACC100_DESC_FCW_OFFSET,
1701                                 ACC100_FCW_LD_BLEN);
1702                 acc100_dma_desc_ld_update(op, &desc->req, input, h_output,
1703                                 &in_offset, &h_out_offset,
1704                                 &h_out_length, harq_layout);
1705         } else {
1706                 struct acc100_fcw_ld *fcw;
1707                 uint32_t seg_total_left;
1708                 fcw = &desc->req.fcw_ld;
1709                 acc100_fcw_ld_fill(op, fcw, harq_layout);
1710
1711                 /* Special handling when overusing mbuf */
1712                 if (fcw->rm_e < ACC100_MAX_E_MBUF)
1713                         seg_total_left = rte_pktmbuf_data_len(input)
1714                                         - in_offset;
1715                 else
1716                         seg_total_left = fcw->rm_e;
1717
1718                 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input, h_output,
1719                                 &in_offset, &h_out_offset,
1720                                 &h_out_length, &mbuf_total_left,
1721                                 &seg_total_left, fcw);
1722                 if (unlikely(ret < 0))
1723                         return ret;
1724         }
1725
1726         /* Hard output */
1727         mbuf_append(h_output_head, h_output, h_out_length);
1728 #ifndef ACC100_EXT_MEM
1729         if (op->ldpc_dec.harq_combined_output.length > 0) {
1730                 /* Push the HARQ output into host memory */
1731                 struct rte_mbuf *hq_output_head, *hq_output;
1732                 hq_output_head = op->ldpc_dec.harq_combined_output.data;
1733                 hq_output = op->ldpc_dec.harq_combined_output.data;
1734                 mbuf_append(hq_output_head, hq_output,
1735                                 op->ldpc_dec.harq_combined_output.length);
1736         }
1737 #endif
1738
1739 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1740         rte_memdump(stderr, "FCW", &desc->req.fcw_ld,
1741                         sizeof(desc->req.fcw_ld) - 8);
1742         rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1743 #endif
1744
1745         /* One CB (one op) was successfully prepared to enqueue */
1746         return 1;
1747 }
1748
1749
1750 /* Enqueue one decode operations for ACC100 device in TB mode */
1751 static inline int
1752 enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,
1753                 uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)
1754 {
1755         union acc100_dma_desc *desc = NULL;
1756         int ret;
1757         uint8_t r, c;
1758         uint32_t in_offset, h_out_offset,
1759                 h_out_length, mbuf_total_left, seg_total_left;
1760         struct rte_mbuf *input, *h_output_head, *h_output;
1761         uint16_t current_enqueued_cbs = 0;
1762
1763         uint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)
1764                         & q->sw_ring_wrap_mask);
1765         desc = q->ring_addr + desc_idx;
1766         uint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;
1767         union acc100_harq_layout_data *harq_layout = q->d->harq_layout;
1768         acc100_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout);
1769
1770         input = op->ldpc_dec.input.data;
1771         h_output_head = h_output = op->ldpc_dec.hard_output.data;
1772         in_offset = op->ldpc_dec.input.offset;
1773         h_out_offset = op->ldpc_dec.hard_output.offset;
1774         h_out_length = 0;
1775         mbuf_total_left = op->ldpc_dec.input.length;
1776         c = op->ldpc_dec.tb_params.c;
1777         r = op->ldpc_dec.tb_params.r;
1778
1779         while (mbuf_total_left > 0 && r < c) {
1780
1781                 seg_total_left = rte_pktmbuf_data_len(input) - in_offset;
1782
1783                 /* Set up DMA descriptor */
1784                 desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)
1785                                 & q->sw_ring_wrap_mask);
1786                 desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
1787                 desc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;
1788                 ret = acc100_dma_desc_ld_fill(op, &desc->req, &input,
1789                                 h_output, &in_offset, &h_out_offset,
1790                                 &h_out_length,
1791                                 &mbuf_total_left, &seg_total_left,
1792                                 &desc->req.fcw_ld);
1793
1794                 if (unlikely(ret < 0))
1795                         return ret;
1796
1797                 /* Hard output */
1798                 mbuf_append(h_output_head, h_output, h_out_length);
1799
1800                 /* Set total number of CBs in TB */
1801                 desc->req.cbs_in_tb = cbs_in_tb;
1802 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1803                 rte_memdump(stderr, "FCW", &desc->req.fcw_td,
1804                                 sizeof(desc->req.fcw_td) - 8);
1805                 rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc));
1806 #endif
1807
1808                 if (seg_total_left == 0) {
1809                         /* Go to the next mbuf */
1810                         input = input->next;
1811                         in_offset = 0;
1812                         h_output = h_output->next;
1813                         h_out_offset = 0;
1814                 }
1815                 total_enqueued_cbs++;
1816                 current_enqueued_cbs++;
1817                 r++;
1818         }
1819
1820         if (unlikely(desc == NULL))
1821                 return current_enqueued_cbs;
1822
1823 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1824         if (check_mbuf_total_left(mbuf_total_left) != 0)
1825                 return -EINVAL;
1826 #endif
1827         /* Set SDone on last CB descriptor for TB mode */
1828         desc->req.sdone_enable = 1;
1829         desc->req.irq_enable = q->irq_enable;
1830
1831         return current_enqueued_cbs;
1832 }
1833
1834
1835 /* Calculates number of CBs in processed encoder TB based on 'r' and input
1836  * length.
1837  */
1838 static inline uint8_t
1839 get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)
1840 {
1841         uint8_t c, c_neg, r, crc24_bits = 0;
1842         uint16_t k, k_neg, k_pos;
1843         uint8_t cbs_in_tb = 0;
1844         int32_t length;
1845
1846         length = turbo_enc->input.length;
1847         r = turbo_enc->tb_params.r;
1848         c = turbo_enc->tb_params.c;
1849         c_neg = turbo_enc->tb_params.c_neg;
1850         k_neg = turbo_enc->tb_params.k_neg;
1851         k_pos = turbo_enc->tb_params.k_pos;
1852         crc24_bits = 0;
1853         if (check_bit(turbo_enc->op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))
1854                 crc24_bits = 24;
1855         while (length > 0 && r < c) {
1856                 k = (r < c_neg) ? k_neg : k_pos;
1857                 length -= (k - crc24_bits) >> 3;
1858                 r++;
1859                 cbs_in_tb++;
1860         }
1861
1862         return cbs_in_tb;
1863 }
1864
1865 /* Calculates number of CBs in processed decoder TB based on 'r' and input
1866  * length.
1867  */
1868 static inline uint16_t
1869 get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)
1870 {
1871         uint8_t c, c_neg, r = 0;
1872         uint16_t kw, k, k_neg, k_pos, cbs_in_tb = 0;
1873         int32_t length;
1874
1875         length = turbo_dec->input.length;
1876         r = turbo_dec->tb_params.r;
1877         c = turbo_dec->tb_params.c;
1878         c_neg = turbo_dec->tb_params.c_neg;
1879         k_neg = turbo_dec->tb_params.k_neg;
1880         k_pos = turbo_dec->tb_params.k_pos;
1881         while (length > 0 && r < c) {
1882                 k = (r < c_neg) ? k_neg : k_pos;
1883                 kw = RTE_ALIGN_CEIL(k + 4, 32) * 3;
1884                 length -= kw;
1885                 r++;
1886                 cbs_in_tb++;
1887         }
1888
1889         return cbs_in_tb;
1890 }
1891
1892 /* Calculates number of CBs in processed decoder TB based on 'r' and input
1893  * length.
1894  */
1895 static inline uint16_t
1896 get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)
1897 {
1898         uint16_t r, cbs_in_tb = 0;
1899         int32_t length = ldpc_dec->input.length;
1900         r = ldpc_dec->tb_params.r;
1901         while (length > 0 && r < ldpc_dec->tb_params.c) {
1902                 length -=  (r < ldpc_dec->tb_params.cab) ?
1903                                 ldpc_dec->tb_params.ea :
1904                                 ldpc_dec->tb_params.eb;
1905                 r++;
1906                 cbs_in_tb++;
1907         }
1908         return cbs_in_tb;
1909 }
1910
1911 /* Check we can mux encode operations with common FCW */
1912 static inline bool
1913 check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {
1914         uint16_t i;
1915         if (num <= 1)
1916                 return false;
1917         for (i = 1; i < num; ++i) {
1918                 /* Only mux compatible code blocks */
1919                 if (memcmp((uint8_t *)(&ops[i]->ldpc_enc) + ACC100_ENC_OFFSET,
1920                                 (uint8_t *)(&ops[0]->ldpc_enc) +
1921                                 ACC100_ENC_OFFSET,
1922                                 ACC100_CMP_ENC_SIZE) != 0)
1923                         return false;
1924         }
1925         return true;
1926 }
1927
1928 /** Enqueue encode operations for ACC100 device in CB mode. */
1929 static inline uint16_t
1930 acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
1931                 struct rte_bbdev_enc_op **ops, uint16_t num)
1932 {
1933         struct acc100_queue *q = q_data->queue_private;
1934         int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
1935         uint16_t i = 0;
1936         union acc100_dma_desc *desc;
1937         int ret, desc_idx = 0;
1938         int16_t enq, left = num;
1939
1940         while (left > 0) {
1941                 if (unlikely(avail < 1))
1942                         break;
1943                 avail--;
1944                 enq = RTE_MIN(left, ACC100_MUX_5GDL_DESC);
1945                 if (check_mux(&ops[i], enq)) {
1946                         ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],
1947                                         desc_idx, enq);
1948                         if (ret < 0)
1949                                 break;
1950                         i += enq;
1951                 } else {
1952                         ret = enqueue_ldpc_enc_one_op_cb(q, ops[i], desc_idx);
1953                         if (ret < 0)
1954                                 break;
1955                         i++;
1956                 }
1957                 desc_idx++;
1958                 left = num - i;
1959         }
1960
1961         if (unlikely(i == 0))
1962                 return 0; /* Nothing to enqueue */
1963
1964         /* Set SDone in last CB in enqueued ops for CB mode*/
1965         desc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1)
1966                         & q->sw_ring_wrap_mask);
1967         desc->req.sdone_enable = 1;
1968         desc->req.irq_enable = q->irq_enable;
1969
1970         acc100_dma_enqueue(q, desc_idx, &q_data->queue_stats);
1971
1972         /* Update stats */
1973         q_data->queue_stats.enqueued_count += i;
1974         q_data->queue_stats.enqueue_err_count += num - i;
1975
1976         return i;
1977 }
1978
1979 /* Enqueue encode operations for ACC100 device. */
1980 static uint16_t
1981 acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1982                 struct rte_bbdev_enc_op **ops, uint16_t num)
1983 {
1984         if (unlikely(num == 0))
1985                 return 0;
1986         return acc100_enqueue_ldpc_enc_cb(q_data, ops, num);
1987 }
1988
1989 /* Check we can mux encode operations with common FCW */
1990 static inline bool
1991 cmp_ldpc_dec_op(struct rte_bbdev_dec_op **ops) {
1992         /* Only mux compatible code blocks */
1993         if (memcmp((uint8_t *)(&ops[0]->ldpc_dec) + ACC100_DEC_OFFSET,
1994                         (uint8_t *)(&ops[1]->ldpc_dec) +
1995                         ACC100_DEC_OFFSET, ACC100_CMP_DEC_SIZE) != 0) {
1996                 return false;
1997         } else
1998                 return true;
1999 }
2000
2001
2002 /* Enqueue decode operations for ACC100 device in TB mode */
2003 static uint16_t
2004 acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,
2005                 struct rte_bbdev_dec_op **ops, uint16_t num)
2006 {
2007         struct acc100_queue *q = q_data->queue_private;
2008         int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
2009         uint16_t i, enqueued_cbs = 0;
2010         uint8_t cbs_in_tb;
2011         int ret;
2012
2013         for (i = 0; i < num; ++i) {
2014                 cbs_in_tb = get_num_cbs_in_tb_ldpc_dec(&ops[i]->ldpc_dec);
2015                 /* Check if there are available space for further processing */
2016                 if (unlikely(avail - cbs_in_tb < 0))
2017                         break;
2018                 avail -= cbs_in_tb;
2019
2020                 ret = enqueue_ldpc_dec_one_op_tb(q, ops[i],
2021                                 enqueued_cbs, cbs_in_tb);
2022                 if (ret < 0)
2023                         break;
2024                 enqueued_cbs += ret;
2025         }
2026
2027         acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);
2028
2029         /* Update stats */
2030         q_data->queue_stats.enqueued_count += i;
2031         q_data->queue_stats.enqueue_err_count += num - i;
2032         return i;
2033 }
2034
2035 /* Enqueue decode operations for ACC100 device in CB mode */
2036 static uint16_t
2037 acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
2038                 struct rte_bbdev_dec_op **ops, uint16_t num)
2039 {
2040         struct acc100_queue *q = q_data->queue_private;
2041         int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;
2042         uint16_t i;
2043         union acc100_dma_desc *desc;
2044         int ret;
2045         bool same_op = false;
2046         for (i = 0; i < num; ++i) {
2047                 /* Check if there are available space for further processing */
2048                 if (unlikely(avail < 1))
2049                         break;
2050                 avail -= 1;
2051
2052                 if (i > 0)
2053                         same_op = cmp_ldpc_dec_op(&ops[i-1]);
2054                 rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n",
2055                         i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index,
2056                         ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count,
2057                         ops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c,
2058                         ops[i]->ldpc_dec.n_cb, ops[i]->ldpc_dec.q_m,
2059                         ops[i]->ldpc_dec.n_filler, ops[i]->ldpc_dec.cb_params.e,
2060                         same_op);
2061                 ret = enqueue_ldpc_dec_one_op_cb(q, ops[i], i, same_op);
2062                 if (ret < 0)
2063                         break;
2064         }
2065
2066         if (unlikely(i == 0))
2067                 return 0; /* Nothing to enqueue */
2068
2069         /* Set SDone in last CB in enqueued ops for CB mode*/
2070         desc = q->ring_addr + ((q->sw_ring_head + i - 1)
2071                         & q->sw_ring_wrap_mask);
2072
2073         desc->req.sdone_enable = 1;
2074         desc->req.irq_enable = q->irq_enable;
2075
2076         acc100_dma_enqueue(q, i, &q_data->queue_stats);
2077
2078         /* Update stats */
2079         q_data->queue_stats.enqueued_count += i;
2080         q_data->queue_stats.enqueue_err_count += num - i;
2081         return i;
2082 }
2083
2084 /* Enqueue decode operations for ACC100 device. */
2085 static uint16_t
2086 acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
2087                 struct rte_bbdev_dec_op **ops, uint16_t num)
2088 {
2089         struct acc100_queue *q = q_data->queue_private;
2090         int32_t aq_avail = q->aq_depth +
2091                         (q->aq_dequeued - q->aq_enqueued) / 128;
2092
2093         if (unlikely((aq_avail == 0) || (num == 0)))
2094                 return 0;
2095
2096         if (ops[0]->ldpc_dec.code_block_mode == 0)
2097                 return acc100_enqueue_ldpc_dec_tb(q_data, ops, num);
2098         else
2099                 return acc100_enqueue_ldpc_dec_cb(q_data, ops, num);
2100 }
2101
2102
2103 /* Dequeue one encode operations from ACC100 device in CB mode */
2104 static inline int
2105 dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
2106                 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
2107 {
2108         union acc100_dma_desc *desc, atom_desc;
2109         union acc100_dma_rsp_desc rsp;
2110         struct rte_bbdev_enc_op *op;
2111         int i;
2112
2113         desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
2114                         & q->sw_ring_wrap_mask);
2115         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2116                         __ATOMIC_RELAXED);
2117
2118         /* Check fdone bit */
2119         if (!(atom_desc.rsp.val & ACC100_FDONE))
2120                 return -1;
2121
2122         rsp.val = atom_desc.rsp.val;
2123         rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
2124
2125         /* Dequeue */
2126         op = desc->req.op_addr;
2127
2128         /* Clearing status, it will be set based on response */
2129         op->status = 0;
2130
2131         op->status |= ((rsp.input_err)
2132                         ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2133         op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2134         op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2135
2136         if (desc->req.last_desc_in_batch) {
2137                 (*aq_dequeued)++;
2138                 desc->req.last_desc_in_batch = 0;
2139         }
2140         desc->rsp.val = ACC100_DMA_DESC_TYPE;
2141         desc->rsp.add_info_0 = 0; /*Reserved bits */
2142         desc->rsp.add_info_1 = 0; /*Reserved bits */
2143
2144         /* Flag that the muxing cause loss of opaque data */
2145         op->opaque_data = (void *)-1;
2146         for (i = 0 ; i < desc->req.numCBs; i++)
2147                 ref_op[i] = op;
2148
2149         /* One CB (op) was successfully dequeued */
2150         return desc->req.numCBs;
2151 }
2152
2153 /* Dequeue one encode operations from ACC100 device in TB mode */
2154 static inline int
2155 dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,
2156                 uint16_t total_dequeued_cbs, uint32_t *aq_dequeued)
2157 {
2158         union acc100_dma_desc *desc, *last_desc, atom_desc;
2159         union acc100_dma_rsp_desc rsp;
2160         struct rte_bbdev_enc_op *op;
2161         uint8_t i = 0;
2162         uint16_t current_dequeued_cbs = 0, cbs_in_tb;
2163
2164         desc = q->ring_addr + ((q->sw_ring_tail + total_dequeued_cbs)
2165                         & q->sw_ring_wrap_mask);
2166         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2167                         __ATOMIC_RELAXED);
2168
2169         /* Check fdone bit */
2170         if (!(atom_desc.rsp.val & ACC100_FDONE))
2171                 return -1;
2172
2173         /* Get number of CBs in dequeued TB */
2174         cbs_in_tb = desc->req.cbs_in_tb;
2175         /* Get last CB */
2176         last_desc = q->ring_addr + ((q->sw_ring_tail
2177                         + total_dequeued_cbs + cbs_in_tb - 1)
2178                         & q->sw_ring_wrap_mask);
2179         /* Check if last CB in TB is ready to dequeue (and thus
2180          * the whole TB) - checking sdone bit. If not return.
2181          */
2182         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
2183                         __ATOMIC_RELAXED);
2184         if (!(atom_desc.rsp.val & ACC100_SDONE))
2185                 return -1;
2186
2187         /* Dequeue */
2188         op = desc->req.op_addr;
2189
2190         /* Clearing status, it will be set based on response */
2191         op->status = 0;
2192
2193         while (i < cbs_in_tb) {
2194                 desc = q->ring_addr + ((q->sw_ring_tail
2195                                 + total_dequeued_cbs)
2196                                 & q->sw_ring_wrap_mask);
2197                 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2198                                 __ATOMIC_RELAXED);
2199                 rsp.val = atom_desc.rsp.val;
2200                 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
2201                                 rsp.val);
2202
2203                 op->status |= ((rsp.input_err)
2204                                 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2205                 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2206                 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2207
2208                 if (desc->req.last_desc_in_batch) {
2209                         (*aq_dequeued)++;
2210                         desc->req.last_desc_in_batch = 0;
2211                 }
2212                 desc->rsp.val = ACC100_DMA_DESC_TYPE;
2213                 desc->rsp.add_info_0 = 0;
2214                 desc->rsp.add_info_1 = 0;
2215                 total_dequeued_cbs++;
2216                 current_dequeued_cbs++;
2217                 i++;
2218         }
2219
2220         *ref_op = op;
2221
2222         return current_dequeued_cbs;
2223 }
2224
2225 /* Dequeue one decode operation from ACC100 device in CB mode */
2226 static inline int
2227 dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
2228                 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2229                 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2230 {
2231         union acc100_dma_desc *desc, atom_desc;
2232         union acc100_dma_rsp_desc rsp;
2233         struct rte_bbdev_dec_op *op;
2234
2235         desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2236                         & q->sw_ring_wrap_mask);
2237         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2238                         __ATOMIC_RELAXED);
2239
2240         /* Check fdone bit */
2241         if (!(atom_desc.rsp.val & ACC100_FDONE))
2242                 return -1;
2243
2244         rsp.val = atom_desc.rsp.val;
2245         rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val);
2246
2247         /* Dequeue */
2248         op = desc->req.op_addr;
2249
2250         /* Clearing status, it will be set based on response */
2251         op->status = 0;
2252         op->status |= ((rsp.input_err)
2253                         ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2254         op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2255         op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2256         if (op->status != 0)
2257                 q_data->queue_stats.dequeue_err_count++;
2258
2259         /* CRC invalid if error exists */
2260         if (!op->status)
2261                 op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2262         op->turbo_dec.iter_count = (uint8_t) rsp.iter_cnt / 2;
2263         /* Check if this is the last desc in batch (Atomic Queue) */
2264         if (desc->req.last_desc_in_batch) {
2265                 (*aq_dequeued)++;
2266                 desc->req.last_desc_in_batch = 0;
2267         }
2268         desc->rsp.val = ACC100_DMA_DESC_TYPE;
2269         desc->rsp.add_info_0 = 0;
2270         desc->rsp.add_info_1 = 0;
2271         *ref_op = op;
2272
2273         /* One CB (op) was successfully dequeued */
2274         return 1;
2275 }
2276
2277 /* Dequeue one decode operations from ACC100 device in CB mode */
2278 static inline int
2279 dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,
2280                 struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2281                 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2282 {
2283         union acc100_dma_desc *desc, atom_desc;
2284         union acc100_dma_rsp_desc rsp;
2285         struct rte_bbdev_dec_op *op;
2286
2287         desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2288                         & q->sw_ring_wrap_mask);
2289         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2290                         __ATOMIC_RELAXED);
2291
2292         /* Check fdone bit */
2293         if (!(atom_desc.rsp.val & ACC100_FDONE))
2294                 return -1;
2295
2296         rsp.val = atom_desc.rsp.val;
2297
2298         /* Dequeue */
2299         op = desc->req.op_addr;
2300
2301         /* Clearing status, it will be set based on response */
2302         op->status = 0;
2303         op->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;
2304         op->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;
2305         op->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;
2306         if (op->status != 0)
2307                 q_data->queue_stats.dequeue_err_count++;
2308
2309         op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2310         if (op->ldpc_dec.hard_output.length > 0 && !rsp.synd_ok)
2311                 op->status |= 1 << RTE_BBDEV_SYNDROME_ERROR;
2312         op->ldpc_dec.iter_count = (uint8_t) rsp.iter_cnt;
2313
2314         /* Check if this is the last desc in batch (Atomic Queue) */
2315         if (desc->req.last_desc_in_batch) {
2316                 (*aq_dequeued)++;
2317                 desc->req.last_desc_in_batch = 0;
2318         }
2319
2320         desc->rsp.val = ACC100_DMA_DESC_TYPE;
2321         desc->rsp.add_info_0 = 0;
2322         desc->rsp.add_info_1 = 0;
2323
2324         *ref_op = op;
2325
2326         /* One CB (op) was successfully dequeued */
2327         return 1;
2328 }
2329
2330 /* Dequeue one decode operations from ACC100 device in TB mode. */
2331 static inline int
2332 dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,
2333                 uint16_t dequeued_cbs, uint32_t *aq_dequeued)
2334 {
2335         union acc100_dma_desc *desc, *last_desc, atom_desc;
2336         union acc100_dma_rsp_desc rsp;
2337         struct rte_bbdev_dec_op *op;
2338         uint8_t cbs_in_tb = 1, cb_idx = 0;
2339
2340         desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2341                         & q->sw_ring_wrap_mask);
2342         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2343                         __ATOMIC_RELAXED);
2344
2345         /* Check fdone bit */
2346         if (!(atom_desc.rsp.val & ACC100_FDONE))
2347                 return -1;
2348
2349         /* Dequeue */
2350         op = desc->req.op_addr;
2351
2352         /* Get number of CBs in dequeued TB */
2353         cbs_in_tb = desc->req.cbs_in_tb;
2354         /* Get last CB */
2355         last_desc = q->ring_addr + ((q->sw_ring_tail
2356                         + dequeued_cbs + cbs_in_tb - 1)
2357                         & q->sw_ring_wrap_mask);
2358         /* Check if last CB in TB is ready to dequeue (and thus
2359          * the whole TB) - checking sdone bit. If not return.
2360          */
2361         atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
2362                         __ATOMIC_RELAXED);
2363         if (!(atom_desc.rsp.val & ACC100_SDONE))
2364                 return -1;
2365
2366         /* Clearing status, it will be set based on response */
2367         op->status = 0;
2368
2369         /* Read remaining CBs if exists */
2370         while (cb_idx < cbs_in_tb) {
2371                 desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2372                                 & q->sw_ring_wrap_mask);
2373                 atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
2374                                 __ATOMIC_RELAXED);
2375                 rsp.val = atom_desc.rsp.val;
2376                 rte_bbdev_log_debug("Resp. desc %p: %x", desc,
2377                                 rsp.val);
2378
2379                 op->status |= ((rsp.input_err)
2380                                 ? (1 << RTE_BBDEV_DATA_ERROR) : 0);
2381                 op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2382                 op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);
2383
2384                 /* CRC invalid if error exists */
2385                 if (!op->status)
2386                         op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
2387                 op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
2388                                 op->turbo_dec.iter_count);
2389
2390                 /* Check if this is the last desc in batch (Atomic Queue) */
2391                 if (desc->req.last_desc_in_batch) {
2392                         (*aq_dequeued)++;
2393                         desc->req.last_desc_in_batch = 0;
2394                 }
2395                 desc->rsp.val = ACC100_DMA_DESC_TYPE;
2396                 desc->rsp.add_info_0 = 0;
2397                 desc->rsp.add_info_1 = 0;
2398                 dequeued_cbs++;
2399                 cb_idx++;
2400         }
2401
2402         *ref_op = op;
2403
2404         return cb_idx;
2405 }
2406
2407 /* Dequeue LDPC encode operations from ACC100 device. */
2408 static uint16_t
2409 acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
2410                 struct rte_bbdev_enc_op **ops, uint16_t num)
2411 {
2412         struct acc100_queue *q = q_data->queue_private;
2413         uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
2414         uint32_t aq_dequeued = 0;
2415         uint16_t dequeue_num, i, dequeued_cbs = 0, dequeued_descs = 0;
2416         int ret;
2417
2418 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2419         if (unlikely(ops == 0 && q == NULL))
2420                 return 0;
2421 #endif
2422
2423         dequeue_num = RTE_MIN(avail, num);
2424
2425         for (i = 0; i < dequeue_num; i++) {
2426                 ret = dequeue_enc_one_op_cb(q, &ops[dequeued_cbs],
2427                                 dequeued_descs, &aq_dequeued);
2428                 if (ret < 0)
2429                         break;
2430                 dequeued_cbs += ret;
2431                 dequeued_descs++;
2432                 if (dequeued_cbs >= num)
2433                         break;
2434         }
2435
2436         q->aq_dequeued += aq_dequeued;
2437         q->sw_ring_tail += dequeued_descs;
2438
2439         /* Update enqueue stats */
2440         q_data->queue_stats.dequeued_count += dequeued_cbs;
2441
2442         return dequeued_cbs;
2443 }
2444
2445 /* Dequeue decode operations from ACC100 device. */
2446 static uint16_t
2447 acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
2448                 struct rte_bbdev_dec_op **ops, uint16_t num)
2449 {
2450         struct acc100_queue *q = q_data->queue_private;
2451         uint16_t dequeue_num;
2452         uint32_t avail = q->sw_ring_head - q->sw_ring_tail;
2453         uint32_t aq_dequeued = 0;
2454         uint16_t i;
2455         uint16_t dequeued_cbs = 0;
2456         struct rte_bbdev_dec_op *op;
2457         int ret;
2458
2459 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2460         if (unlikely(ops == 0 && q == NULL))
2461                 return 0;
2462 #endif
2463
2464         dequeue_num = RTE_MIN(avail, num);
2465
2466         for (i = 0; i < dequeue_num; ++i) {
2467                 op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)
2468                         & q->sw_ring_wrap_mask))->req.op_addr;
2469                 if (op->ldpc_dec.code_block_mode == 0)
2470                         ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,
2471                                         &aq_dequeued);
2472                 else
2473                         ret = dequeue_ldpc_dec_one_op_cb(
2474                                         q_data, q, &ops[i], dequeued_cbs,
2475                                         &aq_dequeued);
2476
2477                 if (ret < 0)
2478                         break;
2479                 dequeued_cbs += ret;
2480         }
2481
2482         q->aq_dequeued += aq_dequeued;
2483         q->sw_ring_tail += dequeued_cbs;
2484
2485         /* Update enqueue stats */
2486         q_data->queue_stats.dequeued_count += i;
2487
2488         return i;
2489 }
2490
2491 /* Initialization Function */
2492 static void
2493 acc100_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
2494 {
2495         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2496
2497         dev->dev_ops = &acc100_bbdev_ops;
2498         dev->enqueue_ldpc_enc_ops = acc100_enqueue_ldpc_enc;
2499         dev->enqueue_ldpc_dec_ops = acc100_enqueue_ldpc_dec;
2500         dev->dequeue_ldpc_enc_ops = acc100_dequeue_ldpc_enc;
2501         dev->dequeue_ldpc_dec_ops = acc100_dequeue_ldpc_dec;
2502
2503         ((struct acc100_device *) dev->data->dev_private)->pf_device =
2504                         !strcmp(drv->driver.name,
2505                                         RTE_STR(ACC100PF_DRIVER_NAME));
2506         ((struct acc100_device *) dev->data->dev_private)->mmio_base =
2507                         pci_dev->mem_resource[0].addr;
2508
2509         rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"",
2510                         drv->driver.name, dev->data->name,
2511                         (void *)pci_dev->mem_resource[0].addr,
2512                         pci_dev->mem_resource[0].phys_addr);
2513 }
2514
2515 static int acc100_pci_probe(struct rte_pci_driver *pci_drv,
2516         struct rte_pci_device *pci_dev)
2517 {
2518         struct rte_bbdev *bbdev = NULL;
2519         char dev_name[RTE_BBDEV_NAME_MAX_LEN];
2520
2521         if (pci_dev == NULL) {
2522                 rte_bbdev_log(ERR, "NULL PCI device");
2523                 return -EINVAL;
2524         }
2525
2526         rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
2527
2528         /* Allocate memory to be used privately by drivers */
2529         bbdev = rte_bbdev_allocate(pci_dev->device.name);
2530         if (bbdev == NULL)
2531                 return -ENODEV;
2532
2533         /* allocate device private memory */
2534         bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
2535                         sizeof(struct acc100_device), RTE_CACHE_LINE_SIZE,
2536                         pci_dev->device.numa_node);
2537
2538         if (bbdev->data->dev_private == NULL) {
2539                 rte_bbdev_log(CRIT,
2540                                 "Allocate of %zu bytes for device \"%s\" failed",
2541                                 sizeof(struct acc100_device), dev_name);
2542                                 rte_bbdev_release(bbdev);
2543                         return -ENOMEM;
2544         }
2545
2546         /* Fill HW specific part of device structure */
2547         bbdev->device = &pci_dev->device;
2548         bbdev->intr_handle = &pci_dev->intr_handle;
2549         bbdev->data->socket_id = pci_dev->device.numa_node;
2550
2551         /* Invoke ACC100 device initialization function */
2552         acc100_bbdev_init(bbdev, pci_drv);
2553
2554         rte_bbdev_log_debug("Initialised bbdev %s (id = %u)",
2555                         dev_name, bbdev->data->dev_id);
2556         return 0;
2557 }
2558
2559 static int acc100_pci_remove(struct rte_pci_device *pci_dev)
2560 {
2561         struct rte_bbdev *bbdev;
2562         int ret;
2563         uint8_t dev_id;
2564
2565         if (pci_dev == NULL)
2566                 return -EINVAL;
2567
2568         /* Find device */
2569         bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
2570         if (bbdev == NULL) {
2571                 rte_bbdev_log(CRIT,
2572                                 "Couldn't find HW dev \"%s\" to uninitialise it",
2573                                 pci_dev->device.name);
2574                 return -ENODEV;
2575         }
2576         dev_id = bbdev->data->dev_id;
2577
2578         /* free device private memory before close */
2579         rte_free(bbdev->data->dev_private);
2580
2581         /* Close device */
2582         ret = rte_bbdev_close(dev_id);
2583         if (ret < 0)
2584                 rte_bbdev_log(ERR,
2585                                 "Device %i failed to close during uninit: %i",
2586                                 dev_id, ret);
2587
2588         /* release bbdev from library */
2589         rte_bbdev_release(bbdev);
2590
2591         rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
2592
2593         return 0;
2594 }
2595
2596 static struct rte_pci_driver acc100_pci_pf_driver = {
2597                 .probe = acc100_pci_probe,
2598                 .remove = acc100_pci_remove,
2599                 .id_table = pci_id_acc100_pf_map,
2600                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
2601 };
2602
2603 static struct rte_pci_driver acc100_pci_vf_driver = {
2604                 .probe = acc100_pci_probe,
2605                 .remove = acc100_pci_remove,
2606                 .id_table = pci_id_acc100_vf_map,
2607                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING
2608 };
2609
2610 RTE_PMD_REGISTER_PCI(ACC100PF_DRIVER_NAME, acc100_pci_pf_driver);
2611 RTE_PMD_REGISTER_PCI_TABLE(ACC100PF_DRIVER_NAME, pci_id_acc100_pf_map);
2612 RTE_PMD_REGISTER_PCI(ACC100VF_DRIVER_NAME, acc100_pci_vf_driver);
2613 RTE_PMD_REGISTER_PCI_TABLE(ACC100VF_DRIVER_NAME, pci_id_acc100_vf_map);