1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
14 static const struct rte_compressdev_capabilities
15 octtx_zip_pmd_capabilities[] = {
16 { .algo = RTE_COMP_ALGO_DEFLATE,
18 .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
19 RTE_COMP_FF_HUFFMAN_DYNAMIC,
20 /* Non sharable Priv XFORM and Stateless */
25 /* size supported 2^1 to 2^14 */
28 RTE_COMP_END_OF_CAPABILITIES_LIST()
32 * Reset session to default state for next set of stateless operation
35 reset_stream(struct zip_stream *z_stream)
37 union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
44 zip_process_op(struct rte_comp_op *op,
46 struct zip_stream *zstrm)
48 union zip_inst_s *inst = zstrm->inst;
49 volatile union zip_zres_s *zresult = NULL;
52 if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
53 (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
54 (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
55 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
56 ZIP_PMD_ERR("Segmented packet is not supported\n");
60 zipvf_prepare_cmd_stateless(op, zstrm);
62 zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
63 zresult->s.compcode = 0;
66 zip_dump_instruction(inst);
69 /* Submit zip command */
70 zipvf_push_command(qp, (void *)inst);
72 /* Check and Process results in sync mode */
74 } while (!zresult->s.compcode);
76 if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
77 op->status = RTE_COMP_OP_STATUS_SUCCESS;
79 /* FATAL error cannot do anything */
80 ZIP_PMD_ERR("operation failed with error code:%d\n",
82 if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
83 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
85 op->status = RTE_COMP_OP_STATUS_ERROR;
88 ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
92 case RTE_COMP_OP_STATUS_SUCCESS:
93 op->consumed = zresult->s.totalbytesread;
95 case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
96 op->produced = zresult->s.totalbyteswritten;
99 ZIP_PMD_ERR("stats not updated for status:%d\n",
103 /* zstream is reset irrespective of result */
106 zresult->s.compcode = ZIP_COMP_E_NOTDONE;
110 /** Parse xform parameters and setup a stream */
112 zip_set_stream_parameters(struct rte_compressdev *dev,
113 const struct rte_comp_xform *xform,
114 struct zip_stream *z_stream)
117 union zip_inst_s *inst;
118 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
121 /* Allocate resources required by a stream */
122 ret = rte_mempool_get_bulk(vf->zip_mp,
123 z_stream->bufs, MAX_BUFS_PER_STREAM);
127 /* get one command buffer from pool and set up */
128 inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
129 res = z_stream->bufs[RES_BUF];
131 memset(inst->u, 0, sizeof(inst->u));
133 /* set bf for only first ops of stream */
136 if (xform->type == RTE_COMP_COMPRESS) {
137 inst->s.op = ZIP_OP_E_COMP;
139 switch (xform->compress.deflate.huffman) {
140 case RTE_COMP_HUFFMAN_DEFAULT:
141 inst->s.cc = ZIP_CC_DEFAULT;
143 case RTE_COMP_HUFFMAN_FIXED:
144 inst->s.cc = ZIP_CC_FIXED_HUFF;
146 case RTE_COMP_HUFFMAN_DYNAMIC:
147 inst->s.cc = ZIP_CC_DYN_HUFF;
154 switch (xform->compress.level) {
155 case RTE_COMP_LEVEL_MIN:
156 inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
158 case RTE_COMP_LEVEL_MAX:
159 inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
161 case RTE_COMP_LEVEL_NONE:
162 ZIP_PMD_ERR("Compression level not supported");
166 /* for any value between min and max , choose
169 inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
172 } else if (xform->type == RTE_COMP_DECOMPRESS) {
173 inst->s.op = ZIP_OP_E_DECOMP;
175 * For DEFLATE decompression, [CC] must be 0x0.
176 * For decompression, [SS] must be 0x0
179 /* Speed bit should not be set for decompression */
181 /* decompression context is supported only for STATEFUL
182 * operations. Currently we support STATELESS ONLY so
183 * skip setting of ctx pointer
187 ZIP_PMD_ERR("\nxform type not supported");
192 inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
193 inst->s.res_ptr_ctl.s.length = 0;
195 z_stream->inst = inst;
196 z_stream->func = zip_process_op;
201 rte_mempool_put_bulk(vf->zip_mp,
202 (void *)&(z_stream->bufs[0]),
203 MAX_BUFS_PER_STREAM);
208 /** Configure device */
210 zip_pmd_config(struct rte_compressdev *dev,
211 struct rte_compressdev_config *config)
214 char res_pool[RTE_MEMZONE_NAMESIZE];
216 struct rte_mempool *zip_buf_mp;
221 vf = (struct zip_vf *)(dev->data->dev_private);
223 /* create pool with maximum numbers of resources
224 * required by streams
227 /* use common pool for non-shareable priv_xform and stream */
228 nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
230 snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
233 /** TBD Should we use the per core object cache for stream resources */
234 zip_buf_mp = rte_mempool_create(
236 nb_streams * MAX_BUFS_PER_STREAM,
247 if (zip_buf_mp == NULL) {
249 "Failed to create buf mempool octtx_zip_res_pool%u",
254 vf->zip_mp = zip_buf_mp;
261 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
268 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
275 zip_pmd_close(struct rte_compressdev *dev)
280 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
281 rte_mempool_free(vf->zip_mp);
286 /** Get device statistics */
288 zip_pmd_stats_get(struct rte_compressdev *dev,
289 struct rte_compressdev_stats *stats)
293 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
294 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
296 stats->enqueued_count += qp->qp_stats.enqueued_count;
297 stats->dequeued_count += qp->qp_stats.dequeued_count;
299 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
300 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
304 /** Reset device statistics */
306 zip_pmd_stats_reset(struct rte_compressdev *dev)
310 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
311 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
312 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
316 /** Get device info */
318 zip_pmd_info_get(struct rte_compressdev *dev,
319 struct rte_compressdev_info *dev_info)
321 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
323 if (dev_info != NULL) {
324 dev_info->driver_name = dev->device->driver->name;
325 dev_info->feature_flags = dev->feature_flags;
326 dev_info->capabilities = octtx_zip_pmd_capabilities;
327 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
331 /** Release queue pair */
333 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
335 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
340 if (qp->processed_pkts)
341 rte_ring_free(qp->processed_pkts);
344 dev->data->queue_pairs[qp_id] = NULL;
349 /** Create a ring to place process packets on */
350 static struct rte_ring *
351 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
352 unsigned int ring_size, int socket_id)
356 r = rte_ring_lookup(qp->name);
358 if (rte_ring_get_size(r) >= ring_size) {
359 ZIP_PMD_INFO("Reusing existing ring %s for processed"
360 " packets", qp->name);
364 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
365 " packets", qp->name);
369 return rte_ring_create(qp->name, ring_size, socket_id,
373 /** Setup a queue pair */
375 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
376 uint32_t max_inflight_ops, int socket_id)
378 struct zipvf_qp *qp = NULL;
386 vf = (struct zip_vf *) (dev->data->dev_private);
388 /* Free memory prior to re-allocation if needed. */
389 if (dev->data->queue_pairs[qp_id] != NULL) {
390 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
394 name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
395 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
397 dev->data->dev_id, qp_id);
399 /* Allocate the queue pair data structure. */
400 qp = rte_zmalloc_socket(name, sizeof(*qp),
401 RTE_CACHE_LINE_SIZE, socket_id);
407 /* Create completion queue up to max_inflight_ops */
408 qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
409 max_inflight_ops, socket_id);
410 if (qp->processed_pkts == NULL)
411 goto qp_setup_cleanup;
416 ret = zipvf_q_init(qp);
418 goto qp_setup_cleanup;
420 dev->data->queue_pairs[qp_id] = qp;
422 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
426 if (qp->processed_pkts)
427 rte_ring_free(qp->processed_pkts);
434 zip_pmd_stream_create(struct rte_compressdev *dev,
435 const struct rte_comp_xform *xform, void **stream)
438 struct zip_stream *strm = NULL;
440 strm = rte_malloc(NULL,
441 sizeof(struct zip_stream), 0);
446 ret = zip_set_stream_parameters(dev, xform, strm);
448 ZIP_PMD_ERR("failed configure xform parameters");
457 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
459 struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
460 struct zip_stream *z_stream;
465 z_stream = (struct zip_stream *)stream;
467 /* Free resources back to pool */
468 rte_mempool_put_bulk(vf->zip_mp,
469 (void *)&(z_stream->bufs[0]),
470 MAX_BUFS_PER_STREAM);
472 /* Zero out the whole structure */
473 memset(stream, 0, sizeof(struct zip_stream));
481 zip_pmd_enqueue_burst_sync(void *queue_pair,
482 struct rte_comp_op **ops, uint16_t nb_ops)
484 struct zipvf_qp *qp = queue_pair;
485 struct rte_comp_op *op;
486 struct zip_stream *zstrm;
490 for (i = 0; i < nb_ops; i++) {
493 if (op->op_type == RTE_COMP_OP_STATEFUL) {
494 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
496 /* process stateless ops */
497 zstrm = (struct zip_stream *)op->private_xform;
498 if (unlikely(zstrm == NULL))
499 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
501 ret = zstrm->func(op, qp, zstrm);
504 /* Whatever is out of op, put it into completion queue with
508 ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
510 if (unlikely(ret < 0)) {
511 /* increment count if failed to enqueue op*/
512 qp->qp_stats.enqueue_err_count++;
514 qp->qp_stats.enqueued_count++;
522 zip_pmd_dequeue_burst_sync(void *queue_pair,
523 struct rte_comp_op **ops, uint16_t nb_ops)
525 struct zipvf_qp *qp = queue_pair;
527 unsigned int nb_dequeued = 0;
529 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
530 (void **)ops, nb_ops, NULL);
531 qp->qp_stats.dequeued_count += nb_dequeued;
536 static struct rte_compressdev_ops octtx_zip_pmd_ops = {
537 .dev_configure = zip_pmd_config,
538 .dev_start = zip_pmd_start,
539 .dev_stop = zip_pmd_stop,
540 .dev_close = zip_pmd_close,
542 .stats_get = zip_pmd_stats_get,
543 .stats_reset = zip_pmd_stats_reset,
545 .dev_infos_get = zip_pmd_info_get,
547 .queue_pair_setup = zip_pmd_qp_setup,
548 .queue_pair_release = zip_pmd_qp_release,
550 .private_xform_create = zip_pmd_stream_create,
551 .private_xform_free = zip_pmd_stream_free,
552 .stream_create = NULL,
557 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
558 struct rte_pci_device *pci_dev)
561 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
562 struct rte_compressdev *compressdev;
563 struct rte_compressdev_pmd_init_params init_params = {
568 ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
569 (unsigned int)pci_dev->id.vendor_id,
570 (unsigned int)pci_dev->id.device_id);
572 rte_pci_device_name(&pci_dev->addr, compressdev_name,
573 sizeof(compressdev_name));
575 compressdev = rte_compressdev_pmd_create(compressdev_name,
576 &pci_dev->device, sizeof(struct zip_vf), &init_params);
577 if (compressdev == NULL) {
578 ZIP_PMD_ERR("driver %s: create failed", init_params.name);
583 * create only if proc_type is primary.
585 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
586 /* create vf dev with given pmd dev id */
587 ret = zipvf_create(compressdev);
589 ZIP_PMD_ERR("Device creation failed");
590 rte_compressdev_pmd_destroy(compressdev);
595 compressdev->dev_ops = &octtx_zip_pmd_ops;
596 /* register rx/tx burst functions for data path */
597 compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
598 compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
599 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
604 zip_pci_remove(struct rte_pci_device *pci_dev)
606 struct rte_compressdev *compressdev;
607 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
609 if (pci_dev == NULL) {
610 ZIP_PMD_ERR(" Invalid PCI Device\n");
613 rte_pci_device_name(&pci_dev->addr, compressdev_name,
614 sizeof(compressdev_name));
616 compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
617 if (compressdev == NULL)
620 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
621 if (zipvf_destroy(compressdev) < 0)
624 return rte_compressdev_pmd_destroy(compressdev);
627 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
629 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
630 PCI_DEVICE_ID_OCTEONTX_ZIPVF),
638 * Structure that represents a PCI driver
640 static struct rte_pci_driver octtx_zip_pmd = {
641 .id_table = pci_id_octtx_zipvf_table,
642 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
643 .probe = zip_pci_probe,
644 .remove = zip_pci_remove,
647 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
648 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
649 RTE_LOG_REGISTER(octtx_zip_logtype_driver, pmd.compress.octeontx, INFO);