net/bnxt: fix filter type for non-ntuple flows
[dpdk.git] / drivers / compress / qat / qat_comp_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6
7 #include "qat_comp.h"
8 #include "qat_comp_pmd.h"
9
10 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
11
12 struct stream_create_info {
13         struct qat_comp_dev_private *comp_dev;
14         int socket_id;
15         int error;
16 };
17
18 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
19         {/* COMPRESSION - deflate */
20          .algo = RTE_COMP_ALGO_DEFLATE,
21          .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
22                                 RTE_COMP_FF_CRC32_CHECKSUM |
23                                 RTE_COMP_FF_ADLER32_CHECKSUM |
24                                 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
25                                 RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
26                                 RTE_COMP_FF_HUFFMAN_FIXED |
27                                 RTE_COMP_FF_HUFFMAN_DYNAMIC |
28                                 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
29                                 RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
30                                 RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
31                                 RTE_COMP_FF_STATEFUL_DECOMPRESSION,
32          .window_size = {.min = 15, .max = 15, .increment = 0} },
33         {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
34
35 static void
36 qat_comp_stats_get(struct rte_compressdev *dev,
37                 struct rte_compressdev_stats *stats)
38 {
39         struct qat_common_stats qat_stats = {0};
40         struct qat_comp_dev_private *qat_priv;
41
42         if (stats == NULL || dev == NULL) {
43                 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
44                 return;
45         }
46         qat_priv = dev->data->dev_private;
47
48         qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
49         stats->enqueued_count = qat_stats.enqueued_count;
50         stats->dequeued_count = qat_stats.dequeued_count;
51         stats->enqueue_err_count = qat_stats.enqueue_err_count;
52         stats->dequeue_err_count = qat_stats.dequeue_err_count;
53 }
54
55 static void
56 qat_comp_stats_reset(struct rte_compressdev *dev)
57 {
58         struct qat_comp_dev_private *qat_priv;
59
60         if (dev == NULL) {
61                 QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
62                 return;
63         }
64         qat_priv = dev->data->dev_private;
65
66         qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
67
68 }
69
70 static int
71 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
72 {
73         struct qat_comp_dev_private *qat_private = dev->data->dev_private;
74         struct qat_qp **qp_addr =
75                 (struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]);
76         struct qat_qp *qp = (struct qat_qp *)*qp_addr;
77         uint32_t i;
78
79         QAT_LOG(DEBUG, "Release comp qp %u on device %d",
80                                 queue_pair_id, dev->data->dev_id);
81
82         qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
83                                                 = NULL;
84
85         for (i = 0; i < qp->nb_descriptors; i++) {
86
87                 struct qat_comp_op_cookie *cookie = qp->op_cookies[i];
88
89                 rte_free(cookie->qat_sgl_src_d);
90                 rte_free(cookie->qat_sgl_dst_d);
91         }
92
93         return qat_qp_release((struct qat_qp **)
94                         &(dev->data->queue_pairs[queue_pair_id]));
95 }
96
97 static int
98 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
99                   uint32_t max_inflight_ops, int socket_id)
100 {
101         struct qat_qp *qp;
102         int ret = 0;
103         uint32_t i;
104         struct qat_qp_config qat_qp_conf;
105
106         struct qat_qp **qp_addr =
107                         (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
108         struct qat_comp_dev_private *qat_private = dev->data->dev_private;
109         const struct qat_qp_hw_data *comp_hw_qps =
110                         qat_gen_config[qat_private->qat_dev->qat_dev_gen]
111                                       .qp_hw_data[QAT_SERVICE_COMPRESSION];
112         const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
113
114         /* If qp is already in use free ring memory and qp metadata. */
115         if (*qp_addr != NULL) {
116                 ret = qat_comp_qp_release(dev, qp_id);
117                 if (ret < 0)
118                         return ret;
119         }
120         if (qp_id >= qat_qps_per_service(comp_hw_qps,
121                                          QAT_SERVICE_COMPRESSION)) {
122                 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
123                 return -EINVAL;
124         }
125
126         qat_qp_conf.hw = qp_hw_data;
127         qat_qp_conf.build_request = qat_comp_build_request;
128         qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
129         qat_qp_conf.nb_descriptors = max_inflight_ops;
130         qat_qp_conf.socket_id = socket_id;
131         qat_qp_conf.service_str = "comp";
132
133         ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
134         if (ret != 0)
135                 return ret;
136
137         /* store a link to the qp in the qat_pci_device */
138         qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
139                                                                 = *qp_addr;
140
141         qp = (struct qat_qp *)*qp_addr;
142
143         for (i = 0; i < qp->nb_descriptors; i++) {
144
145                 struct qat_comp_op_cookie *cookie =
146                                 qp->op_cookies[i];
147
148                 cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL,
149                                         sizeof(struct qat_sgl) +
150                                         sizeof(struct qat_flat_buf) *
151                                         QAT_PMD_COMP_SGL_DEF_SEGMENTS,
152                                         64, dev->data->socket_id);
153
154                 cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL,
155                                         sizeof(struct qat_sgl) +
156                                         sizeof(struct qat_flat_buf) *
157                                         QAT_PMD_COMP_SGL_DEF_SEGMENTS,
158                                         64, dev->data->socket_id);
159
160                 if (cookie->qat_sgl_src_d == NULL ||
161                                 cookie->qat_sgl_dst_d == NULL) {
162                         QAT_LOG(ERR, "Can't allocate SGL"
163                                      " for device %s",
164                                      qat_private->qat_dev->name);
165                         return -ENOMEM;
166                 }
167
168                 cookie->qat_sgl_src_phys_addr =
169                                 rte_malloc_virt2iova(cookie->qat_sgl_src_d);
170
171                 cookie->qat_sgl_dst_phys_addr =
172                                 rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
173
174                 cookie->dst_nb_elems = cookie->src_nb_elems =
175                                 QAT_PMD_COMP_SGL_DEF_SEGMENTS;
176
177                 cookie->socket_id = dev->data->socket_id;
178
179                 cookie->error = 0;
180         }
181
182         return ret;
183 }
184
185
186 #define QAT_IM_BUFFER_DEBUG 0
187 static const struct rte_memzone *
188 qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
189                               uint32_t buff_size)
190 {
191         char inter_buff_mz_name[RTE_MEMZONE_NAMESIZE];
192         const struct rte_memzone *memzone;
193         uint8_t *mz_start = NULL;
194         rte_iova_t mz_start_phys = 0;
195         struct array_of_ptrs *array_of_pointers;
196         int size_of_ptr_array;
197         uint32_t full_size;
198         uint32_t offset_of_sgls, offset_of_flat_buffs = 0;
199         int i;
200         int num_im_sgls = qat_gen_config[
201                 comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required;
202
203         QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls",
204                                 comp_dev->qat_dev->name, num_im_sgls);
205         snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE,
206                                 "%s_inter_buff", comp_dev->qat_dev->name);
207         memzone = rte_memzone_lookup(inter_buff_mz_name);
208         if (memzone != NULL) {
209                 QAT_LOG(DEBUG, "QAT COMP im buffer memzone created already");
210                 return memzone;
211         }
212
213         /* Create a memzone to hold intermediate buffers and associated
214          * meta-data needed by the firmware. The memzone contains 3 parts:
215          *  - a list of num_im_sgls physical pointers to sgls
216          *  - the num_im_sgl sgl structures, each pointing to
217          *    QAT_NUM_BUFS_IN_IM_SGL flat buffers
218          *  - the flat buffers: num_im_sgl * QAT_NUM_BUFS_IN_IM_SGL
219          *    buffers, each of buff_size
220          * num_im_sgls depends on the hardware generation of the device
221          * buff_size comes from the user via the config file
222          */
223
224         size_of_ptr_array = num_im_sgls * sizeof(phys_addr_t);
225         offset_of_sgls = (size_of_ptr_array + (~QAT_64_BYTE_ALIGN_MASK))
226                         & QAT_64_BYTE_ALIGN_MASK;
227         offset_of_flat_buffs =
228             offset_of_sgls + num_im_sgls * sizeof(struct qat_inter_sgl);
229         full_size = offset_of_flat_buffs +
230                         num_im_sgls * buff_size * QAT_NUM_BUFS_IN_IM_SGL;
231
232         memzone = rte_memzone_reserve_aligned(inter_buff_mz_name, full_size,
233                         comp_dev->compressdev->data->socket_id,
234                         RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
235         if (memzone == NULL) {
236                 QAT_LOG(ERR, "Can't allocate intermediate buffers"
237                                 " for device %s", comp_dev->qat_dev->name);
238                 return NULL;
239         }
240
241         mz_start = (uint8_t *)memzone->addr;
242         mz_start_phys = memzone->phys_addr;
243         QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
244                         ", size required %d, size created %zu",
245                         inter_buff_mz_name, mz_start, mz_start_phys,
246                         full_size, memzone->len);
247
248         array_of_pointers = (struct array_of_ptrs *)mz_start;
249         for (i = 0; i < num_im_sgls; i++) {
250                 uint32_t curr_sgl_offset =
251                     offset_of_sgls + i * sizeof(struct qat_inter_sgl);
252                 struct qat_inter_sgl *sgl =
253                     (struct qat_inter_sgl *)(mz_start + curr_sgl_offset);
254                 int lb;
255                 array_of_pointers->pointer[i] = mz_start_phys + curr_sgl_offset;
256
257                 sgl->num_bufs = QAT_NUM_BUFS_IN_IM_SGL;
258                 sgl->num_mapped_bufs = 0;
259                 sgl->resrvd = 0;
260
261 #if QAT_IM_BUFFER_DEBUG
262                 QAT_LOG(DEBUG, "  : phys addr of sgl[%i] in array_of_pointers"
263                         " = 0x%"PRIx64, i, array_of_pointers->pointer[i]);
264                 QAT_LOG(DEBUG, "  : virt address of sgl[%i] = %p", i, sgl);
265 #endif
266                 for (lb = 0; lb < QAT_NUM_BUFS_IN_IM_SGL; lb++) {
267                         sgl->buffers[lb].addr =
268                           mz_start_phys + offset_of_flat_buffs +
269                           (((i * QAT_NUM_BUFS_IN_IM_SGL) + lb) * buff_size);
270                         sgl->buffers[lb].len = buff_size;
271                         sgl->buffers[lb].resrvd = 0;
272 #if QAT_IM_BUFFER_DEBUG
273                         QAT_LOG(DEBUG,
274                           "  : sgl->buffers[%d].addr = 0x%"PRIx64", len=%d",
275                           lb, sgl->buffers[lb].addr, sgl->buffers[lb].len);
276 #endif
277                 }
278         }
279 #if QAT_IM_BUFFER_DEBUG
280         QAT_DP_HEXDUMP_LOG(DEBUG,  "IM buffer memzone start:",
281                         mz_start, offset_of_flat_buffs + 32);
282 #endif
283         return memzone;
284 }
285
286 static struct rte_mempool *
287 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
288                            struct rte_compressdev_config *config,
289                            uint32_t num_elements)
290 {
291         char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
292         struct rte_mempool *mp;
293
294         snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
295                         "%s_xforms", comp_dev->qat_dev->name);
296
297         QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
298         mp = rte_mempool_lookup(xform_pool_name);
299
300         if (mp != NULL) {
301                 QAT_LOG(DEBUG, "xformpool already created");
302                 if (mp->size != num_elements) {
303                         QAT_LOG(DEBUG, "xformpool wrong size - delete it");
304                         rte_mempool_free(mp);
305                         mp = NULL;
306                         comp_dev->xformpool = NULL;
307                 }
308         }
309
310         if (mp == NULL)
311                 mp = rte_mempool_create(xform_pool_name,
312                                 num_elements,
313                                 qat_comp_xform_size(), 0, 0,
314                                 NULL, NULL, NULL, NULL, config->socket_id,
315                                 0);
316         if (mp == NULL) {
317                 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
318                         xform_pool_name, num_elements, qat_comp_xform_size());
319                 return NULL;
320         }
321
322         return mp;
323 }
324
325 static void
326 qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque,
327                      void *obj, unsigned int obj_idx)
328 {
329         struct stream_create_info *info = opaque;
330         struct qat_comp_stream *stream = obj;
331         char mz_name[RTE_MEMZONE_NAMESIZE];
332         const struct rte_memzone *memzone;
333         struct qat_inter_sgl *ram_banks_desc;
334
335         /* find a memzone for RAM banks */
336         snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks",
337                  info->comp_dev->qat_dev->name, obj_idx);
338         memzone = rte_memzone_lookup(mz_name);
339         if (memzone == NULL) {
340                 /* allocate a memzone for compression state and RAM banks */
341                 memzone = rte_memzone_reserve_aligned(mz_name,
342                         QAT_STATE_REGISTERS_MAX_SIZE
343                                 + sizeof(struct qat_inter_sgl)
344                                 + QAT_INFLATE_CONTEXT_SIZE,
345                         info->socket_id,
346                         RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
347                 if (memzone == NULL) {
348                         QAT_LOG(ERR,
349                             "Can't allocate RAM banks for device %s, object %u",
350                                 info->comp_dev->qat_dev->name, obj_idx);
351                         info->error = -ENOMEM;
352                         return;
353                 }
354         }
355
356         /* prepare the buffer list descriptor for RAM banks */
357         ram_banks_desc = (struct qat_inter_sgl *)
358                 (((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE);
359         ram_banks_desc->num_bufs = 1;
360         ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE;
361         ram_banks_desc->buffers[0].addr = memzone->iova
362                         + QAT_STATE_REGISTERS_MAX_SIZE
363                         + sizeof(struct qat_inter_sgl);
364
365         memset(stream, 0, qat_comp_stream_size());
366         stream->memzone = memzone;
367         stream->state_registers_decomp = memzone->addr;
368         stream->state_registers_decomp_phys = memzone->iova;
369         stream->inflate_context = ((uint8_t *) memzone->addr)
370                         + QAT_STATE_REGISTERS_MAX_SIZE;
371         stream->inflate_context_phys = memzone->iova
372                         + QAT_STATE_REGISTERS_MAX_SIZE;
373 }
374
375 static void
376 qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused,
377                         void *opaque __rte_unused, void *obj,
378                         unsigned obj_idx __rte_unused)
379 {
380         struct qat_comp_stream *stream = obj;
381
382         rte_memzone_free(stream->memzone);
383 }
384
385 static struct rte_mempool *
386 qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
387                             int socket_id,
388                             uint32_t num_elements)
389 {
390         char stream_pool_name[RTE_MEMPOOL_NAMESIZE];
391         struct rte_mempool *mp;
392
393         snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE,
394                  "%s_streams", comp_dev->qat_dev->name);
395
396         QAT_LOG(DEBUG, "streampool: %s", stream_pool_name);
397         mp = rte_mempool_lookup(stream_pool_name);
398
399         if (mp != NULL) {
400                 QAT_LOG(DEBUG, "streampool already created");
401                 if (mp->size != num_elements) {
402                         QAT_LOG(DEBUG, "streampool wrong size - delete it");
403                         rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
404                         rte_mempool_free(mp);
405                         mp = NULL;
406                         comp_dev->streampool = NULL;
407                 }
408         }
409
410         if (mp == NULL) {
411                 struct stream_create_info info = {
412                         .comp_dev = comp_dev,
413                         .socket_id = socket_id,
414                         .error = 0
415                 };
416                 mp = rte_mempool_create(stream_pool_name,
417                                 num_elements,
418                                 qat_comp_stream_size(), 0, 0,
419                                 NULL, NULL, qat_comp_stream_init, &info,
420                                 socket_id, 0);
421                 if (mp == NULL) {
422                         QAT_LOG(ERR,
423                              "Err creating mempool %s w %d elements of size %d",
424                              stream_pool_name, num_elements,
425                              qat_comp_stream_size());
426                 } else if (info.error) {
427                         rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
428                         QAT_LOG(ERR,
429                              "Destoying mempool %s as at least one element failed initialisation",
430                              stream_pool_name);
431                         rte_mempool_free(mp);
432                         mp = NULL;
433                 }
434         }
435
436         return mp;
437 }
438
439 static void
440 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
441 {
442         /* Free intermediate buffers */
443         if (comp_dev->interm_buff_mz) {
444                 rte_memzone_free(comp_dev->interm_buff_mz);
445                 comp_dev->interm_buff_mz = NULL;
446         }
447
448         /* Free private_xform pool */
449         if (comp_dev->xformpool) {
450                 /* Free internal mempool for private xforms */
451                 rte_mempool_free(comp_dev->xformpool);
452                 comp_dev->xformpool = NULL;
453         }
454
455         /* Free stream pool */
456         if (comp_dev->streampool) {
457                 rte_mempool_obj_iter(comp_dev->streampool,
458                                      qat_comp_stream_destroy, NULL);
459                 rte_mempool_free(comp_dev->streampool);
460                 comp_dev->streampool = NULL;
461         }
462 }
463
464 static int
465 qat_comp_dev_config(struct rte_compressdev *dev,
466                 struct rte_compressdev_config *config)
467 {
468         struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
469         int ret = 0;
470
471         if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
472                 QAT_LOG(WARNING,
473                         "RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
474                         " QAT device can't be used for Dynamic Deflate. "
475                         "Did you really intend to do this?");
476         } else {
477                 comp_dev->interm_buff_mz =
478                                 qat_comp_setup_inter_buffers(comp_dev,
479                                         RTE_PMD_QAT_COMP_IM_BUFFER_SIZE);
480                 if (comp_dev->interm_buff_mz == NULL) {
481                         ret = -ENOMEM;
482                         goto error_out;
483                 }
484         }
485
486         if (config->max_nb_priv_xforms) {
487                 comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
488                                             config, config->max_nb_priv_xforms);
489                 if (comp_dev->xformpool == NULL) {
490                         ret = -ENOMEM;
491                         goto error_out;
492                 }
493         } else
494                 comp_dev->xformpool = NULL;
495
496         if (config->max_nb_streams) {
497                 comp_dev->streampool = qat_comp_create_stream_pool(comp_dev,
498                                      config->socket_id, config->max_nb_streams);
499                 if (comp_dev->streampool == NULL) {
500                         ret = -ENOMEM;
501                         goto error_out;
502                 }
503         } else
504                 comp_dev->streampool = NULL;
505
506         return 0;
507
508 error_out:
509         _qat_comp_dev_config_clear(comp_dev);
510         return ret;
511 }
512
513 static int
514 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
515 {
516         return 0;
517 }
518
519 static void
520 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
521 {
522
523 }
524
525 static int
526 qat_comp_dev_close(struct rte_compressdev *dev)
527 {
528         int i;
529         int ret = 0;
530         struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
531
532         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
533                 ret = qat_comp_qp_release(dev, i);
534                 if (ret < 0)
535                         return ret;
536         }
537
538         _qat_comp_dev_config_clear(comp_dev);
539
540         return ret;
541 }
542
543
544 static void
545 qat_comp_dev_info_get(struct rte_compressdev *dev,
546                         struct rte_compressdev_info *info)
547 {
548         struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
549         const struct qat_qp_hw_data *comp_hw_qps =
550                 qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
551                               .qp_hw_data[QAT_SERVICE_COMPRESSION];
552
553         if (info != NULL) {
554                 info->max_nb_queue_pairs =
555                         qat_qps_per_service(comp_hw_qps,
556                                             QAT_SERVICE_COMPRESSION);
557                 info->feature_flags = dev->feature_flags;
558                 info->capabilities = comp_dev->qat_dev_capabilities;
559         }
560 }
561
562 static uint16_t
563 qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
564                 uint16_t nb_ops)
565 {
566         return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
567 }
568
569 static uint16_t
570 qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
571                               uint16_t nb_ops)
572 {
573         return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
574 }
575
576 static uint16_t
577 qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
578                                     struct rte_comp_op **ops __rte_unused,
579                                     uint16_t nb_ops __rte_unused)
580 {
581         QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
582         return 0;
583 }
584
585 static struct rte_compressdev_ops compress_qat_dummy_ops = {
586
587         /* Device related operations */
588         .dev_configure          = NULL,
589         .dev_start              = NULL,
590         .dev_stop               = qat_comp_dev_stop,
591         .dev_close              = qat_comp_dev_close,
592         .dev_infos_get          = NULL,
593
594         .stats_get              = NULL,
595         .stats_reset            = qat_comp_stats_reset,
596         .queue_pair_setup       = NULL,
597         .queue_pair_release     = qat_comp_qp_release,
598
599         /* Compression related operations */
600         .private_xform_create   = NULL,
601         .private_xform_free     = qat_comp_private_xform_free
602 };
603
604 static uint16_t
605 qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
606                                    uint16_t nb_ops)
607 {
608         uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
609         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
610
611         if (ret) {
612                 if ((*ops)->debug_status ==
613                                 (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
614                         tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
615                                         qat_comp_pmd_enq_deq_dummy_op_burst;
616                         tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
617                                         qat_comp_pmd_enq_deq_dummy_op_burst;
618
619                         tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
620                                         &compress_qat_dummy_ops;
621                         QAT_LOG(ERR, "QAT PMD detected wrong FW version !");
622
623                 } else {
624                         tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
625                                         qat_comp_pmd_dequeue_op_burst;
626                 }
627         }
628         return ret;
629 }
630
631 static struct rte_compressdev_ops compress_qat_ops = {
632
633         /* Device related operations */
634         .dev_configure          = qat_comp_dev_config,
635         .dev_start              = qat_comp_dev_start,
636         .dev_stop               = qat_comp_dev_stop,
637         .dev_close              = qat_comp_dev_close,
638         .dev_infos_get          = qat_comp_dev_info_get,
639
640         .stats_get              = qat_comp_stats_get,
641         .stats_reset            = qat_comp_stats_reset,
642         .queue_pair_setup       = qat_comp_qp_setup,
643         .queue_pair_release     = qat_comp_qp_release,
644
645         /* Compression related operations */
646         .private_xform_create   = qat_comp_private_xform_create,
647         .private_xform_free     = qat_comp_private_xform_free,
648         .stream_create          = qat_comp_stream_create,
649         .stream_free            = qat_comp_stream_free
650 };
651
652 /* An rte_driver is needed in the registration of the device with compressdev.
653  * The actual qat pci's rte_driver can't be used as its name represents
654  * the whole pci device with all services. Think of this as a holder for a name
655  * for the compression part of the pci device.
656  */
657 static const char qat_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_QAT_PMD);
658 static const struct rte_driver compdev_qat_driver = {
659         .name = qat_comp_drv_name,
660         .alias = qat_comp_drv_name
661 };
662 int
663 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
664 {
665         if (qat_pci_dev->qat_dev_gen == QAT_GEN3) {
666                 QAT_LOG(ERR, "Compression PMD not supported on QAT c4xxx");
667                 return 0;
668         }
669
670         struct rte_compressdev_pmd_init_params init_params = {
671                 .name = "",
672                 .socket_id = qat_pci_dev->pci_dev->device.numa_node,
673         };
674         char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
675         struct rte_compressdev *compressdev;
676         struct qat_comp_dev_private *comp_dev;
677
678         snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
679                         qat_pci_dev->name, "comp");
680         QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
681
682         /* Populate subset device to use in compressdev device creation */
683         qat_pci_dev->comp_rte_dev.driver = &compdev_qat_driver;
684         qat_pci_dev->comp_rte_dev.numa_node =
685                                         qat_pci_dev->pci_dev->device.numa_node;
686         qat_pci_dev->comp_rte_dev.devargs = NULL;
687
688         compressdev = rte_compressdev_pmd_create(name,
689                         &(qat_pci_dev->comp_rte_dev),
690                         sizeof(struct qat_comp_dev_private),
691                         &init_params);
692
693         if (compressdev == NULL)
694                 return -ENODEV;
695
696         compressdev->dev_ops = &compress_qat_ops;
697
698         compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
699         compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
700
701         compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
702
703         comp_dev = compressdev->data->dev_private;
704         comp_dev->qat_dev = qat_pci_dev;
705         comp_dev->compressdev = compressdev;
706         qat_pci_dev->comp_dev = comp_dev;
707
708         switch (qat_pci_dev->qat_dev_gen) {
709         case QAT_GEN1:
710         case QAT_GEN2:
711         case QAT_GEN3:
712                 comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
713                 break;
714         default:
715                 comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
716                 QAT_LOG(DEBUG,
717                         "QAT gen %d capabilities unknown, default to GEN1",
718                                         qat_pci_dev->qat_dev_gen);
719                 break;
720         }
721
722         QAT_LOG(DEBUG,
723                     "Created QAT COMP device %s as compressdev instance %d",
724                         name, compressdev->data->dev_id);
725         return 0;
726 }
727
728 int
729 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
730 {
731         struct qat_comp_dev_private *comp_dev;
732
733         if (qat_pci_dev == NULL)
734                 return -ENODEV;
735
736         comp_dev = qat_pci_dev->comp_dev;
737         if (comp_dev == NULL)
738                 return 0;
739
740         /* clean up any resources used by the device */
741         qat_comp_dev_close(comp_dev->compressdev);
742
743         rte_compressdev_pmd_destroy(comp_dev->compressdev);
744         qat_pci_dev->comp_dev = NULL;
745
746         return 0;
747 }