compress/octeontx: create private xform
[dpdk.git] / drivers / compress / octeontx / otx_zip_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4
5 #include <string.h>
6
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
11
12 #include "otx_zip.h"
13
14 static const struct rte_compressdev_capabilities
15                                 octtx_zip_pmd_capabilities[] = {
16         {       .algo = RTE_COMP_ALGO_DEFLATE,
17                 /* Deflate */
18                 .comp_feature_flags =   RTE_COMP_FF_HUFFMAN_FIXED |
19                                         RTE_COMP_FF_HUFFMAN_DYNAMIC,
20                 /* Non sharable Priv XFORM and Stateless */
21                 .window_size = {
22                                 .min = 1,
23                                 .max = 14,
24                                 .increment = 1
25                                 /* size supported 2^1 to 2^14 */
26                 },
27         },
28         RTE_COMP_END_OF_CAPABILITIES_LIST()
29 };
30
31 /** Parse xform parameters and setup a stream */
32 static int
33 zip_set_stream_parameters(struct rte_compressdev *dev,
34                         const struct rte_comp_xform *xform,
35                         struct zip_stream *z_stream)
36 {
37         int ret;
38         union zip_inst_s *inst;
39         struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
40         void *res;
41
42         /* Allocate resources required by a stream */
43         ret = rte_mempool_get_bulk(vf->zip_mp,
44                         z_stream->bufs, MAX_BUFS_PER_STREAM);
45         if (ret < 0)
46                 return -1;
47
48         /* get one command buffer from pool and set up */
49         inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
50         res = z_stream->bufs[RES_BUF];
51
52         memset(inst->u, 0, sizeof(inst->u));
53
54         /* set bf for only first ops of stream */
55         inst->s.bf = 1;
56
57         if (xform->type == RTE_COMP_COMPRESS) {
58                 inst->s.op = ZIP_OP_E_COMP;
59
60                 switch (xform->compress.deflate.huffman) {
61                 case RTE_COMP_HUFFMAN_DEFAULT:
62                         inst->s.cc = ZIP_CC_DEFAULT;
63                         break;
64                 case RTE_COMP_HUFFMAN_FIXED:
65                         inst->s.cc = ZIP_CC_FIXED_HUFF;
66                         break;
67                 case RTE_COMP_HUFFMAN_DYNAMIC:
68                         inst->s.cc = ZIP_CC_DYN_HUFF;
69                         break;
70                 default:
71                         ret = -1;
72                         goto err;
73                 }
74
75                 switch (xform->compress.level) {
76                 case RTE_COMP_LEVEL_MIN:
77                         inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
78                         break;
79                 case RTE_COMP_LEVEL_MAX:
80                         inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
81                         break;
82                 case RTE_COMP_LEVEL_NONE:
83                         ZIP_PMD_ERR("Compression level not supported");
84                         ret = -1;
85                         goto err;
86                 default:
87                         /* for any value between min and max , choose
88                          * PMD default.
89                          */
90                         inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
91                         break;
92                 }
93         } else if (xform->type == RTE_COMP_DECOMPRESS) {
94                 inst->s.op = ZIP_OP_E_DECOMP;
95                 /* from HRM,
96                  * For DEFLATE decompression, [CC] must be 0x0.
97                  * For decompression, [SS] must be 0x0
98                  */
99                 inst->s.cc = 0;
100                 /* Speed bit should not be set for decompression */
101                 inst->s.ss = 0;
102                 /* decompression context is supported only for STATEFUL
103                  * operations. Currently we support STATELESS ONLY so
104                  * skip setting of ctx pointer
105                  */
106
107         } else {
108                 ZIP_PMD_ERR("\nxform type not supported");
109                 ret = -1;
110                 goto err;
111         }
112
113         inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
114         inst->s.res_ptr_ctl.s.length = 0;
115
116         z_stream->inst = inst;
117
118         return 0;
119
120 err:
121         rte_mempool_put_bulk(vf->zip_mp,
122                              (void *)&(z_stream->bufs[0]),
123                              MAX_BUFS_PER_STREAM);
124
125         return ret;
126 }
127
128 /** Configure device */
129 static int
130 zip_pmd_config(struct rte_compressdev *dev,
131                 struct rte_compressdev_config *config)
132 {
133         int nb_streams;
134         char res_pool[RTE_MEMZONE_NAMESIZE];
135         struct zip_vf *vf;
136         struct rte_mempool *zip_buf_mp;
137
138         if (!config || !dev)
139                 return -EIO;
140
141         vf = (struct zip_vf *)(dev->data->dev_private);
142
143         /* create pool with maximum numbers of resources
144          * required by streams
145          */
146
147         /* use common pool for non-shareable priv_xform and stream */
148         nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
149
150         snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
151                  dev->data->dev_id);
152
153         /** TBD Should we use the per core object cache for stream resources */
154         zip_buf_mp = rte_mempool_create(
155                         res_pool,
156                         nb_streams * MAX_BUFS_PER_STREAM,
157                         ZIP_BUF_SIZE,
158                         0,
159                         0,
160                         NULL,
161                         NULL,
162                         NULL,
163                         NULL,
164                         SOCKET_ID_ANY,
165                         0);
166
167         if (zip_buf_mp == NULL) {
168                 ZIP_PMD_ERR(
169                         "Failed to create buf mempool octtx_zip_res_pool%u",
170                         dev->data->dev_id);
171                 return -1;
172         }
173
174         vf->zip_mp = zip_buf_mp;
175
176         return 0;
177 }
178
179 /** Start device */
180 static int
181 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
182 {
183         return 0;
184 }
185
186 /** Stop device */
187 static void
188 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
189 {
190
191 }
192
193 /** Close device */
194 static int
195 zip_pmd_close(struct rte_compressdev *dev)
196 {
197         if (dev == NULL)
198                 return -1;
199
200         struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
201         rte_mempool_free(vf->zip_mp);
202
203         return 0;
204 }
205
206 /** Get device statistics */
207 static void
208 zip_pmd_stats_get(struct rte_compressdev *dev,
209                 struct rte_compressdev_stats *stats)
210 {
211         int qp_id;
212
213         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
214                 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
215
216                 stats->enqueued_count += qp->qp_stats.enqueued_count;
217                 stats->dequeued_count += qp->qp_stats.dequeued_count;
218
219                 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
220                 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
221         }
222 }
223
224 /** Reset device statistics */
225 static void
226 zip_pmd_stats_reset(struct rte_compressdev *dev)
227 {
228         int qp_id;
229
230         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
231                 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
232                 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
233         }
234 }
235
236 /** Get device info */
237 static void
238 zip_pmd_info_get(struct rte_compressdev *dev,
239                 struct rte_compressdev_info *dev_info)
240 {
241         struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
242
243         if (dev_info != NULL) {
244                 dev_info->driver_name = dev->device->driver->name;
245                 dev_info->feature_flags = dev->feature_flags;
246                 dev_info->capabilities = octtx_zip_pmd_capabilities;
247                 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
248         }
249 }
250
251 /** Release queue pair */
252 static int
253 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
254 {
255         struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
256
257         if (qp != NULL) {
258                 zipvf_q_term(qp);
259
260                 if (qp->processed_pkts)
261                         rte_ring_free(qp->processed_pkts);
262
263                 rte_free(qp);
264                 dev->data->queue_pairs[qp_id] = NULL;
265         }
266         return 0;
267 }
268
269 /** Create a ring to place process packets on */
270 static struct rte_ring *
271 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
272                 unsigned int ring_size, int socket_id)
273 {
274         struct rte_ring *r;
275
276         r = rte_ring_lookup(qp->name);
277         if (r) {
278                 if (rte_ring_get_size(r) >= ring_size) {
279                         ZIP_PMD_INFO("Reusing existing ring %s for processed"
280                                         " packets", qp->name);
281                         return r;
282                 }
283
284                 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
285                                 " packets", qp->name);
286                 return NULL;
287         }
288
289         return rte_ring_create(qp->name, ring_size, socket_id,
290                                                 RING_F_EXACT_SZ);
291 }
292
293 /** Setup a queue pair */
294 static int
295 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
296                 uint32_t max_inflight_ops, int socket_id)
297 {
298         struct zipvf_qp *qp = NULL;
299         struct zip_vf *vf;
300         char *name;
301         int ret;
302
303         if (!dev)
304                 return -1;
305
306         vf = (struct zip_vf *) (dev->data->dev_private);
307
308         /* Free memory prior to re-allocation if needed. */
309         if (dev->data->queue_pairs[qp_id] != NULL) {
310                 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
311                 return 0;
312         }
313
314         name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
315         snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
316                  "zip_pmd_%u_qp_%u",
317                  dev->data->dev_id, qp_id);
318
319         /* Allocate the queue pair data structure. */
320         qp = rte_zmalloc_socket(name, sizeof(*qp),
321                                 RTE_CACHE_LINE_SIZE, socket_id);
322         if (qp == NULL)
323                 return (-ENOMEM);
324
325         qp->name = name;
326
327         /* Create completion queue upto max_inflight_ops */
328         qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
329                                                 max_inflight_ops, socket_id);
330         if (qp->processed_pkts == NULL)
331                 goto qp_setup_cleanup;
332
333         qp->id = qp_id;
334         qp->vf = vf;
335
336         ret = zipvf_q_init(qp);
337         if (ret < 0)
338                 goto qp_setup_cleanup;
339
340         dev->data->queue_pairs[qp_id] = qp;
341
342         memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
343         return 0;
344
345 qp_setup_cleanup:
346         if (qp->processed_pkts)
347                 rte_ring_free(qp->processed_pkts);
348         if (qp)
349                 rte_free(qp);
350         return -1;
351 }
352
353 static int
354 zip_pmd_stream_create(struct rte_compressdev *dev,
355                 const struct rte_comp_xform *xform, void **stream)
356 {
357         int ret;
358         struct zip_stream *strm = NULL;
359
360         strm = rte_malloc(NULL,
361                         sizeof(struct zip_stream), 0);
362
363         if (strm == NULL)
364                 return (-ENOMEM);
365
366         ret = zip_set_stream_parameters(dev, xform, strm);
367         if (ret < 0) {
368                 ZIP_PMD_ERR("failed configure xform parameters");
369                 rte_free(strm);
370                 return ret;
371         }
372         *stream = strm;
373         return 0;
374 }
375
376 static int
377 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
378 {
379         struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
380         struct zip_stream *z_stream;
381
382         if (stream == NULL)
383                 return 0;
384
385         z_stream = (struct zip_stream *)stream;
386
387         /* Free resources back to pool */
388         rte_mempool_put_bulk(vf->zip_mp,
389                                 (void *)&(z_stream->bufs[0]),
390                                 MAX_BUFS_PER_STREAM);
391
392         /* Zero out the whole structure */
393         memset(stream, 0, sizeof(struct zip_stream));
394         rte_free(stream);
395
396         return 0;
397 }
398
399
400 struct rte_compressdev_ops octtx_zip_pmd_ops = {
401                 .dev_configure          = zip_pmd_config,
402                 .dev_start              = zip_pmd_start,
403                 .dev_stop               = zip_pmd_stop,
404                 .dev_close              = zip_pmd_close,
405
406                 .stats_get              = zip_pmd_stats_get,
407                 .stats_reset            = zip_pmd_stats_reset,
408
409                 .dev_infos_get          = zip_pmd_info_get,
410
411                 .queue_pair_setup       = zip_pmd_qp_setup,
412                 .queue_pair_release     = zip_pmd_qp_release,
413
414                 .private_xform_create   = zip_pmd_stream_create,
415                 .private_xform_free     = zip_pmd_stream_free,
416                 .stream_create          = NULL,
417                 .stream_free            = NULL
418 };
419
420 static int
421 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
422         struct rte_pci_device *pci_dev)
423 {
424         int ret = 0;
425         char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
426         struct rte_compressdev *compressdev;
427         struct rte_compressdev_pmd_init_params init_params = {
428                 "",
429                 rte_socket_id(),
430         };
431
432         ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
433                         (unsigned int)pci_dev->id.vendor_id,
434                         (unsigned int)pci_dev->id.device_id);
435
436         rte_pci_device_name(&pci_dev->addr, compressdev_name,
437                             sizeof(compressdev_name));
438
439         compressdev = rte_compressdev_pmd_create(compressdev_name,
440                 &pci_dev->device, sizeof(struct zip_vf), &init_params);
441         if (compressdev == NULL) {
442                 ZIP_PMD_ERR("driver %s: create failed", init_params.name);
443                 return -ENODEV;
444         }
445
446         /*
447          * create only if proc_type is primary.
448          */
449         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
450                 /*  create vf dev with given pmd dev id */
451                 ret = zipvf_create(compressdev);
452                 if (ret < 0) {
453                         ZIP_PMD_ERR("Device creation failed");
454                         rte_compressdev_pmd_destroy(compressdev);
455                         return ret;
456                 }
457         }
458
459         compressdev->dev_ops = &octtx_zip_pmd_ops;
460         /* register rx/tx burst functions for data path */
461         compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
462         return ret;
463 }
464
465 static int
466 zip_pci_remove(struct rte_pci_device *pci_dev)
467 {
468         struct rte_compressdev *compressdev;
469         char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
470
471         if (pci_dev == NULL) {
472                 ZIP_PMD_ERR(" Invalid PCI Device\n");
473                 return -EINVAL;
474         }
475         rte_pci_device_name(&pci_dev->addr, compressdev_name,
476                         sizeof(compressdev_name));
477
478         compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
479         if (compressdev == NULL)
480                 return -ENODEV;
481
482         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
483                 if (zipvf_destroy(compressdev) < 0)
484                         return -ENODEV;
485         }
486         return rte_compressdev_pmd_destroy(compressdev);
487 }
488
489 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
490         {
491                 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
492                         PCI_DEVICE_ID_OCTEONTX_ZIPVF),
493         },
494         {
495                 .device_id = 0
496         },
497 };
498
499 /**
500  * Structure that represents a PCI driver
501  */
502 static struct rte_pci_driver octtx_zip_pmd = {
503         .id_table    = pci_id_octtx_zipvf_table,
504         .drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
505         .probe       = zip_pci_probe,
506         .remove      = zip_pci_remove,
507 };
508
509 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
510 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
511
512 RTE_INIT(octtx_zip_init_log);
513
514 static void
515 octtx_zip_init_log(void)
516 {
517         octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
518         if (octtx_zip_logtype_driver >= 0)
519                 rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
520 }