f9b8f7a1ec6ed5d71ef6ebd090039ec4fa264dab
[dpdk.git] / drivers / compress / octeontx / otx_zip_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4
5 #include <string.h>
6
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
11
12 #include "otx_zip.h"
13
14 static const struct rte_compressdev_capabilities
15                                 octtx_zip_pmd_capabilities[] = {
16         {       .algo = RTE_COMP_ALGO_DEFLATE,
17                 /* Deflate */
18                 .comp_feature_flags =   RTE_COMP_FF_HUFFMAN_FIXED |
19                                         RTE_COMP_FF_HUFFMAN_DYNAMIC,
20                 /* Non sharable Priv XFORM and Stateless */
21                 .window_size = {
22                                 .min = 1,
23                                 .max = 14,
24                                 .increment = 1
25                                 /* size supported 2^1 to 2^14 */
26                 },
27         },
28         RTE_COMP_END_OF_CAPABILITIES_LIST()
29 };
30
31 /*
32  * Reset session to default state for next set of stateless operation
33  */
34 static inline void
35 reset_stream(struct zip_stream *z_stream)
36 {
37         union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
38
39         inst->s.bf = 1;
40         inst->s.ef = 0;
41 }
42
43 int
44 zip_process_op(struct rte_comp_op *op,
45                 struct zipvf_qp *qp,
46                 struct zip_stream *zstrm)
47 {
48         union zip_inst_s *inst = zstrm->inst;
49         volatile union zip_zres_s *zresult = NULL;
50
51
52         if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
53                         (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
54                         (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
55                 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
56                 ZIP_PMD_ERR("Segmented packet is not supported\n");
57                 return 0;
58         }
59
60         zipvf_prepare_cmd_stateless(op, zstrm);
61
62         zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
63         zresult->s.compcode = 0;
64
65 #ifdef ZIP_DBG
66         zip_dump_instruction(inst);
67 #endif
68
69         /* Submit zip command */
70         zipvf_push_command(qp, (void *)inst);
71
72         /* Check and Process results in sync mode */
73         do {
74         } while (!zresult->s.compcode);
75
76         if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
77                 op->status = RTE_COMP_OP_STATUS_SUCCESS;
78         } else {
79                 /* FATAL error cannot do anything */
80                 ZIP_PMD_ERR("operation failed with error code:%d\n",
81                         zresult->s.compcode);
82                 if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
83                         op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
84                 else
85                         op->status = RTE_COMP_OP_STATUS_ERROR;
86         }
87
88         ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
89
90         /* Update op stats */
91         switch (op->status) {
92         case RTE_COMP_OP_STATUS_SUCCESS:
93                 op->consumed = zresult->s.totalbytesread;
94         /* Fall-through */
95         case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
96                 op->produced = zresult->s.totalbyteswritten;
97                 break;
98         default:
99                 ZIP_PMD_ERR("stats not updated for status:%d\n",
100                                 op->status);
101                 break;
102         }
103         /* zstream is reset irrespective of result */
104         reset_stream(zstrm);
105
106         zresult->s.compcode = ZIP_COMP_E_NOTDONE;
107         return 0;
108 }
109
110 /** Parse xform parameters and setup a stream */
111 static int
112 zip_set_stream_parameters(struct rte_compressdev *dev,
113                         const struct rte_comp_xform *xform,
114                         struct zip_stream *z_stream)
115 {
116         int ret;
117         union zip_inst_s *inst;
118         struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
119         void *res;
120
121         /* Allocate resources required by a stream */
122         ret = rte_mempool_get_bulk(vf->zip_mp,
123                         z_stream->bufs, MAX_BUFS_PER_STREAM);
124         if (ret < 0)
125                 return -1;
126
127         /* get one command buffer from pool and set up */
128         inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
129         res = z_stream->bufs[RES_BUF];
130
131         memset(inst->u, 0, sizeof(inst->u));
132
133         /* set bf for only first ops of stream */
134         inst->s.bf = 1;
135
136         if (xform->type == RTE_COMP_COMPRESS) {
137                 inst->s.op = ZIP_OP_E_COMP;
138
139                 switch (xform->compress.deflate.huffman) {
140                 case RTE_COMP_HUFFMAN_DEFAULT:
141                         inst->s.cc = ZIP_CC_DEFAULT;
142                         break;
143                 case RTE_COMP_HUFFMAN_FIXED:
144                         inst->s.cc = ZIP_CC_FIXED_HUFF;
145                         break;
146                 case RTE_COMP_HUFFMAN_DYNAMIC:
147                         inst->s.cc = ZIP_CC_DYN_HUFF;
148                         break;
149                 default:
150                         ret = -1;
151                         goto err;
152                 }
153
154                 switch (xform->compress.level) {
155                 case RTE_COMP_LEVEL_MIN:
156                         inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
157                         break;
158                 case RTE_COMP_LEVEL_MAX:
159                         inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
160                         break;
161                 case RTE_COMP_LEVEL_NONE:
162                         ZIP_PMD_ERR("Compression level not supported");
163                         ret = -1;
164                         goto err;
165                 default:
166                         /* for any value between min and max , choose
167                          * PMD default.
168                          */
169                         inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
170                         break;
171                 }
172         } else if (xform->type == RTE_COMP_DECOMPRESS) {
173                 inst->s.op = ZIP_OP_E_DECOMP;
174                 /* from HRM,
175                  * For DEFLATE decompression, [CC] must be 0x0.
176                  * For decompression, [SS] must be 0x0
177                  */
178                 inst->s.cc = 0;
179                 /* Speed bit should not be set for decompression */
180                 inst->s.ss = 0;
181                 /* decompression context is supported only for STATEFUL
182                  * operations. Currently we support STATELESS ONLY so
183                  * skip setting of ctx pointer
184                  */
185
186         } else {
187                 ZIP_PMD_ERR("\nxform type not supported");
188                 ret = -1;
189                 goto err;
190         }
191
192         inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
193         inst->s.res_ptr_ctl.s.length = 0;
194
195         z_stream->inst = inst;
196         z_stream->func = zip_process_op;
197
198         return 0;
199
200 err:
201         rte_mempool_put_bulk(vf->zip_mp,
202                              (void *)&(z_stream->bufs[0]),
203                              MAX_BUFS_PER_STREAM);
204
205         return ret;
206 }
207
208 /** Configure device */
209 static int
210 zip_pmd_config(struct rte_compressdev *dev,
211                 struct rte_compressdev_config *config)
212 {
213         int nb_streams;
214         char res_pool[RTE_MEMZONE_NAMESIZE];
215         struct zip_vf *vf;
216         struct rte_mempool *zip_buf_mp;
217
218         if (!config || !dev)
219                 return -EIO;
220
221         vf = (struct zip_vf *)(dev->data->dev_private);
222
223         /* create pool with maximum numbers of resources
224          * required by streams
225          */
226
227         /* use common pool for non-shareable priv_xform and stream */
228         nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
229
230         snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
231                  dev->data->dev_id);
232
233         /** TBD Should we use the per core object cache for stream resources */
234         zip_buf_mp = rte_mempool_create(
235                         res_pool,
236                         nb_streams * MAX_BUFS_PER_STREAM,
237                         ZIP_BUF_SIZE,
238                         0,
239                         0,
240                         NULL,
241                         NULL,
242                         NULL,
243                         NULL,
244                         SOCKET_ID_ANY,
245                         0);
246
247         if (zip_buf_mp == NULL) {
248                 ZIP_PMD_ERR(
249                         "Failed to create buf mempool octtx_zip_res_pool%u",
250                         dev->data->dev_id);
251                 return -1;
252         }
253
254         vf->zip_mp = zip_buf_mp;
255
256         return 0;
257 }
258
259 /** Start device */
260 static int
261 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
262 {
263         return 0;
264 }
265
266 /** Stop device */
267 static void
268 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
269 {
270
271 }
272
273 /** Close device */
274 static int
275 zip_pmd_close(struct rte_compressdev *dev)
276 {
277         if (dev == NULL)
278                 return -1;
279
280         struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
281         rte_mempool_free(vf->zip_mp);
282
283         return 0;
284 }
285
286 /** Get device statistics */
287 static void
288 zip_pmd_stats_get(struct rte_compressdev *dev,
289                 struct rte_compressdev_stats *stats)
290 {
291         int qp_id;
292
293         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
294                 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
295
296                 stats->enqueued_count += qp->qp_stats.enqueued_count;
297                 stats->dequeued_count += qp->qp_stats.dequeued_count;
298
299                 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
300                 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
301         }
302 }
303
304 /** Reset device statistics */
305 static void
306 zip_pmd_stats_reset(struct rte_compressdev *dev)
307 {
308         int qp_id;
309
310         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
311                 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
312                 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
313         }
314 }
315
316 /** Get device info */
317 static void
318 zip_pmd_info_get(struct rte_compressdev *dev,
319                 struct rte_compressdev_info *dev_info)
320 {
321         struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
322
323         if (dev_info != NULL) {
324                 dev_info->driver_name = dev->device->driver->name;
325                 dev_info->feature_flags = dev->feature_flags;
326                 dev_info->capabilities = octtx_zip_pmd_capabilities;
327                 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
328         }
329 }
330
331 /** Release queue pair */
332 static int
333 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
334 {
335         struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
336
337         if (qp != NULL) {
338                 zipvf_q_term(qp);
339
340                 rte_ring_free(qp->processed_pkts);
341
342                 rte_free(qp);
343                 dev->data->queue_pairs[qp_id] = NULL;
344         }
345         return 0;
346 }
347
348 /** Create a ring to place process packets on */
349 static struct rte_ring *
350 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
351                 unsigned int ring_size, int socket_id)
352 {
353         struct rte_ring *r;
354
355         r = rte_ring_lookup(qp->name);
356         if (r) {
357                 if (rte_ring_get_size(r) >= ring_size) {
358                         ZIP_PMD_INFO("Reusing existing ring %s for processed"
359                                         " packets", qp->name);
360                         return r;
361                 }
362
363                 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
364                                 " packets", qp->name);
365                 return NULL;
366         }
367
368         return rte_ring_create(qp->name, ring_size, socket_id,
369                                                 RING_F_EXACT_SZ);
370 }
371
372 /** Setup a queue pair */
373 static int
374 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
375                 uint32_t max_inflight_ops, int socket_id)
376 {
377         struct zipvf_qp *qp = NULL;
378         struct zip_vf *vf;
379         char *name;
380         int ret;
381
382         if (!dev)
383                 return -1;
384
385         vf = (struct zip_vf *) (dev->data->dev_private);
386
387         /* Free memory prior to re-allocation if needed. */
388         if (dev->data->queue_pairs[qp_id] != NULL) {
389                 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
390                 return 0;
391         }
392
393         name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
394         if (name == NULL)
395                 return (-ENOMEM);
396         snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
397                  "zip_pmd_%u_qp_%u",
398                  dev->data->dev_id, qp_id);
399
400         /* Allocate the queue pair data structure. */
401         qp = rte_zmalloc_socket(name, sizeof(*qp),
402                                 RTE_CACHE_LINE_SIZE, socket_id);
403         if (qp == NULL) {
404                 rte_free(name);
405                 return (-ENOMEM);
406         }
407
408         qp->name = name;
409
410         /* Create completion queue up to max_inflight_ops */
411         qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
412                                                 max_inflight_ops, socket_id);
413         if (qp->processed_pkts == NULL)
414                 goto qp_setup_cleanup;
415
416         qp->id = qp_id;
417         qp->vf = vf;
418
419         ret = zipvf_q_init(qp);
420         if (ret < 0)
421                 goto qp_setup_cleanup;
422
423         dev->data->queue_pairs[qp_id] = qp;
424
425         memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
426         return 0;
427
428 qp_setup_cleanup:
429         rte_ring_free(qp->processed_pkts);
430         rte_free(qp);
431         return -1;
432 }
433
434 static int
435 zip_pmd_stream_create(struct rte_compressdev *dev,
436                 const struct rte_comp_xform *xform, void **stream)
437 {
438         int ret;
439         struct zip_stream *strm = NULL;
440
441         strm = rte_malloc(NULL,
442                         sizeof(struct zip_stream), 0);
443
444         if (strm == NULL)
445                 return (-ENOMEM);
446
447         ret = zip_set_stream_parameters(dev, xform, strm);
448         if (ret < 0) {
449                 ZIP_PMD_ERR("failed configure xform parameters");
450                 rte_free(strm);
451                 return ret;
452         }
453         *stream = strm;
454         return 0;
455 }
456
457 static int
458 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
459 {
460         struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
461         struct zip_stream *z_stream;
462
463         if (stream == NULL)
464                 return 0;
465
466         z_stream = (struct zip_stream *)stream;
467
468         /* Free resources back to pool */
469         rte_mempool_put_bulk(vf->zip_mp,
470                                 (void *)&(z_stream->bufs[0]),
471                                 MAX_BUFS_PER_STREAM);
472
473         /* Zero out the whole structure */
474         memset(stream, 0, sizeof(struct zip_stream));
475         rte_free(stream);
476
477         return 0;
478 }
479
480
481 static uint16_t
482 zip_pmd_enqueue_burst_sync(void *queue_pair,
483                 struct rte_comp_op **ops, uint16_t nb_ops)
484 {
485         struct zipvf_qp *qp = queue_pair;
486         struct rte_comp_op *op;
487         struct zip_stream *zstrm;
488         int i, ret = 0;
489         uint16_t enqd = 0;
490
491         for (i = 0; i < nb_ops; i++) {
492                 op = ops[i];
493
494                 if (op->op_type == RTE_COMP_OP_STATEFUL) {
495                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
496                 } else {
497                         /* process stateless ops */
498                         zstrm = (struct zip_stream *)op->private_xform;
499                         if (unlikely(zstrm == NULL))
500                                 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
501                         else
502                                 ret = zstrm->func(op, qp, zstrm);
503                 }
504
505                 /* Whatever is out of op, put it into completion queue with
506                  * its status
507                  */
508                 if (!ret)
509                         ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
510
511                 if (unlikely(ret < 0)) {
512                         /* increment count if failed to enqueue op*/
513                         qp->qp_stats.enqueue_err_count++;
514                 } else {
515                         qp->qp_stats.enqueued_count++;
516                         enqd++;
517                 }
518         }
519         return enqd;
520 }
521
522 static uint16_t
523 zip_pmd_dequeue_burst_sync(void *queue_pair,
524                 struct rte_comp_op **ops, uint16_t nb_ops)
525 {
526         struct zipvf_qp *qp = queue_pair;
527
528         unsigned int nb_dequeued = 0;
529
530         nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
531                         (void **)ops, nb_ops, NULL);
532         qp->qp_stats.dequeued_count += nb_dequeued;
533
534         return nb_dequeued;
535 }
536
537 static struct rte_compressdev_ops octtx_zip_pmd_ops = {
538                 .dev_configure          = zip_pmd_config,
539                 .dev_start              = zip_pmd_start,
540                 .dev_stop               = zip_pmd_stop,
541                 .dev_close              = zip_pmd_close,
542
543                 .stats_get              = zip_pmd_stats_get,
544                 .stats_reset            = zip_pmd_stats_reset,
545
546                 .dev_infos_get          = zip_pmd_info_get,
547
548                 .queue_pair_setup       = zip_pmd_qp_setup,
549                 .queue_pair_release     = zip_pmd_qp_release,
550
551                 .private_xform_create   = zip_pmd_stream_create,
552                 .private_xform_free     = zip_pmd_stream_free,
553                 .stream_create          = NULL,
554                 .stream_free            = NULL
555 };
556
557 static int
558 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
559         struct rte_pci_device *pci_dev)
560 {
561         int ret = 0;
562         char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
563         struct rte_compressdev *compressdev;
564         struct rte_compressdev_pmd_init_params init_params = {
565                 "",
566                 rte_socket_id(),
567         };
568
569         ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
570                         (unsigned int)pci_dev->id.vendor_id,
571                         (unsigned int)pci_dev->id.device_id);
572
573         rte_pci_device_name(&pci_dev->addr, compressdev_name,
574                             sizeof(compressdev_name));
575
576         compressdev = rte_compressdev_pmd_create(compressdev_name,
577                 &pci_dev->device, sizeof(struct zip_vf), &init_params);
578         if (compressdev == NULL) {
579                 ZIP_PMD_ERR("driver %s: create failed", init_params.name);
580                 return -ENODEV;
581         }
582
583         /*
584          * create only if proc_type is primary.
585          */
586         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
587                 /*  create vf dev with given pmd dev id */
588                 ret = zipvf_create(compressdev);
589                 if (ret < 0) {
590                         ZIP_PMD_ERR("Device creation failed");
591                         rte_compressdev_pmd_destroy(compressdev);
592                         return ret;
593                 }
594         }
595
596         compressdev->dev_ops = &octtx_zip_pmd_ops;
597         /* register rx/tx burst functions for data path */
598         compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
599         compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
600         compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
601         return ret;
602 }
603
604 static int
605 zip_pci_remove(struct rte_pci_device *pci_dev)
606 {
607         struct rte_compressdev *compressdev;
608         char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
609
610         if (pci_dev == NULL) {
611                 ZIP_PMD_ERR(" Invalid PCI Device\n");
612                 return -EINVAL;
613         }
614         rte_pci_device_name(&pci_dev->addr, compressdev_name,
615                         sizeof(compressdev_name));
616
617         compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
618         if (compressdev == NULL)
619                 return -ENODEV;
620
621         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
622                 if (zipvf_destroy(compressdev) < 0)
623                         return -ENODEV;
624         }
625         return rte_compressdev_pmd_destroy(compressdev);
626 }
627
628 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
629         {
630                 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
631                         PCI_DEVICE_ID_OCTEONTX_ZIPVF),
632         },
633         {
634                 .device_id = 0
635         },
636 };
637
638 /**
639  * Structure that represents a PCI driver
640  */
641 static struct rte_pci_driver octtx_zip_pmd = {
642         .id_table    = pci_id_octtx_zipvf_table,
643         .drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
644         .probe       = zip_pci_probe,
645         .remove      = zip_pci_remove,
646 };
647
648 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
649 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
650 RTE_LOG_REGISTER_DEFAULT(octtx_zip_logtype_driver, INFO);