1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
14 int octtx_zip_logtype_driver;
16 static const struct rte_compressdev_capabilities
17 octtx_zip_pmd_capabilities[] = {
18 { .algo = RTE_COMP_ALGO_DEFLATE,
20 .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
21 RTE_COMP_FF_HUFFMAN_DYNAMIC,
22 /* Non sharable Priv XFORM and Stateless */
27 /* size supported 2^1 to 2^14 */
30 RTE_COMP_END_OF_CAPABILITIES_LIST()
34 * Reset session to default state for next set of stateless operation
37 reset_stream(struct zip_stream *z_stream)
39 union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
46 zip_process_op(struct rte_comp_op *op,
48 struct zip_stream *zstrm)
50 union zip_inst_s *inst = zstrm->inst;
51 volatile union zip_zres_s *zresult = NULL;
54 if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
55 (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
56 (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
57 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
58 ZIP_PMD_ERR("Segmented packet is not supported\n");
62 zipvf_prepare_cmd_stateless(op, zstrm);
64 zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
65 zresult->s.compcode = 0;
68 zip_dump_instruction(inst);
71 /* Submit zip command */
72 zipvf_push_command(qp, (void *)inst);
74 /* Check and Process results in sync mode */
76 } while (!zresult->s.compcode);
78 if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
79 op->status = RTE_COMP_OP_STATUS_SUCCESS;
81 /* FATAL error cannot do anything */
82 ZIP_PMD_ERR("operation failed with error code:%d\n",
84 if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
85 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
87 op->status = RTE_COMP_OP_STATUS_ERROR;
90 ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
94 case RTE_COMP_OP_STATUS_SUCCESS:
95 op->consumed = zresult->s.totalbytesread;
97 case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
98 op->produced = zresult->s.totalbyteswritten;
101 ZIP_PMD_ERR("stats not updated for status:%d\n",
105 /* zstream is reset irrespective of result */
108 zresult->s.compcode = ZIP_COMP_E_NOTDONE;
112 /** Parse xform parameters and setup a stream */
114 zip_set_stream_parameters(struct rte_compressdev *dev,
115 const struct rte_comp_xform *xform,
116 struct zip_stream *z_stream)
119 union zip_inst_s *inst;
120 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
123 /* Allocate resources required by a stream */
124 ret = rte_mempool_get_bulk(vf->zip_mp,
125 z_stream->bufs, MAX_BUFS_PER_STREAM);
129 /* get one command buffer from pool and set up */
130 inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
131 res = z_stream->bufs[RES_BUF];
133 memset(inst->u, 0, sizeof(inst->u));
135 /* set bf for only first ops of stream */
138 if (xform->type == RTE_COMP_COMPRESS) {
139 inst->s.op = ZIP_OP_E_COMP;
141 switch (xform->compress.deflate.huffman) {
142 case RTE_COMP_HUFFMAN_DEFAULT:
143 inst->s.cc = ZIP_CC_DEFAULT;
145 case RTE_COMP_HUFFMAN_FIXED:
146 inst->s.cc = ZIP_CC_FIXED_HUFF;
148 case RTE_COMP_HUFFMAN_DYNAMIC:
149 inst->s.cc = ZIP_CC_DYN_HUFF;
156 switch (xform->compress.level) {
157 case RTE_COMP_LEVEL_MIN:
158 inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
160 case RTE_COMP_LEVEL_MAX:
161 inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
163 case RTE_COMP_LEVEL_NONE:
164 ZIP_PMD_ERR("Compression level not supported");
168 /* for any value between min and max , choose
171 inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
174 } else if (xform->type == RTE_COMP_DECOMPRESS) {
175 inst->s.op = ZIP_OP_E_DECOMP;
177 * For DEFLATE decompression, [CC] must be 0x0.
178 * For decompression, [SS] must be 0x0
181 /* Speed bit should not be set for decompression */
183 /* decompression context is supported only for STATEFUL
184 * operations. Currently we support STATELESS ONLY so
185 * skip setting of ctx pointer
189 ZIP_PMD_ERR("\nxform type not supported");
194 inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
195 inst->s.res_ptr_ctl.s.length = 0;
197 z_stream->inst = inst;
198 z_stream->func = zip_process_op;
203 rte_mempool_put_bulk(vf->zip_mp,
204 (void *)&(z_stream->bufs[0]),
205 MAX_BUFS_PER_STREAM);
210 /** Configure device */
212 zip_pmd_config(struct rte_compressdev *dev,
213 struct rte_compressdev_config *config)
216 char res_pool[RTE_MEMZONE_NAMESIZE];
218 struct rte_mempool *zip_buf_mp;
223 vf = (struct zip_vf *)(dev->data->dev_private);
225 /* create pool with maximum numbers of resources
226 * required by streams
229 /* use common pool for non-shareable priv_xform and stream */
230 nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
232 snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
235 /** TBD Should we use the per core object cache for stream resources */
236 zip_buf_mp = rte_mempool_create(
238 nb_streams * MAX_BUFS_PER_STREAM,
249 if (zip_buf_mp == NULL) {
251 "Failed to create buf mempool octtx_zip_res_pool%u",
256 vf->zip_mp = zip_buf_mp;
263 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
270 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
277 zip_pmd_close(struct rte_compressdev *dev)
282 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
283 rte_mempool_free(vf->zip_mp);
288 /** Get device statistics */
290 zip_pmd_stats_get(struct rte_compressdev *dev,
291 struct rte_compressdev_stats *stats)
295 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
296 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
298 stats->enqueued_count += qp->qp_stats.enqueued_count;
299 stats->dequeued_count += qp->qp_stats.dequeued_count;
301 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
302 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
306 /** Reset device statistics */
308 zip_pmd_stats_reset(struct rte_compressdev *dev)
312 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
313 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
314 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
318 /** Get device info */
320 zip_pmd_info_get(struct rte_compressdev *dev,
321 struct rte_compressdev_info *dev_info)
323 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
325 if (dev_info != NULL) {
326 dev_info->driver_name = dev->device->driver->name;
327 dev_info->feature_flags = dev->feature_flags;
328 dev_info->capabilities = octtx_zip_pmd_capabilities;
329 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
333 /** Release queue pair */
335 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
337 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
342 if (qp->processed_pkts)
343 rte_ring_free(qp->processed_pkts);
346 dev->data->queue_pairs[qp_id] = NULL;
351 /** Create a ring to place process packets on */
352 static struct rte_ring *
353 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
354 unsigned int ring_size, int socket_id)
358 r = rte_ring_lookup(qp->name);
360 if (rte_ring_get_size(r) >= ring_size) {
361 ZIP_PMD_INFO("Reusing existing ring %s for processed"
362 " packets", qp->name);
366 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
367 " packets", qp->name);
371 return rte_ring_create(qp->name, ring_size, socket_id,
375 /** Setup a queue pair */
377 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
378 uint32_t max_inflight_ops, int socket_id)
380 struct zipvf_qp *qp = NULL;
388 vf = (struct zip_vf *) (dev->data->dev_private);
390 /* Free memory prior to re-allocation if needed. */
391 if (dev->data->queue_pairs[qp_id] != NULL) {
392 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
396 name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
397 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
399 dev->data->dev_id, qp_id);
401 /* Allocate the queue pair data structure. */
402 qp = rte_zmalloc_socket(name, sizeof(*qp),
403 RTE_CACHE_LINE_SIZE, socket_id);
409 /* Create completion queue up to max_inflight_ops */
410 qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
411 max_inflight_ops, socket_id);
412 if (qp->processed_pkts == NULL)
413 goto qp_setup_cleanup;
418 ret = zipvf_q_init(qp);
420 goto qp_setup_cleanup;
422 dev->data->queue_pairs[qp_id] = qp;
424 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
428 if (qp->processed_pkts)
429 rte_ring_free(qp->processed_pkts);
436 zip_pmd_stream_create(struct rte_compressdev *dev,
437 const struct rte_comp_xform *xform, void **stream)
440 struct zip_stream *strm = NULL;
442 strm = rte_malloc(NULL,
443 sizeof(struct zip_stream), 0);
448 ret = zip_set_stream_parameters(dev, xform, strm);
450 ZIP_PMD_ERR("failed configure xform parameters");
459 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
461 struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
462 struct zip_stream *z_stream;
467 z_stream = (struct zip_stream *)stream;
469 /* Free resources back to pool */
470 rte_mempool_put_bulk(vf->zip_mp,
471 (void *)&(z_stream->bufs[0]),
472 MAX_BUFS_PER_STREAM);
474 /* Zero out the whole structure */
475 memset(stream, 0, sizeof(struct zip_stream));
483 zip_pmd_enqueue_burst_sync(void *queue_pair,
484 struct rte_comp_op **ops, uint16_t nb_ops)
486 struct zipvf_qp *qp = queue_pair;
487 struct rte_comp_op *op;
488 struct zip_stream *zstrm;
492 for (i = 0; i < nb_ops; i++) {
495 if (op->op_type == RTE_COMP_OP_STATEFUL) {
496 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
498 /* process stateless ops */
499 zstrm = (struct zip_stream *)op->private_xform;
500 if (unlikely(zstrm == NULL))
501 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
503 ret = zstrm->func(op, qp, zstrm);
506 /* Whatever is out of op, put it into completion queue with
510 ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
512 if (unlikely(ret < 0)) {
513 /* increment count if failed to enqueue op*/
514 qp->qp_stats.enqueue_err_count++;
516 qp->qp_stats.enqueued_count++;
524 zip_pmd_dequeue_burst_sync(void *queue_pair,
525 struct rte_comp_op **ops, uint16_t nb_ops)
527 struct zipvf_qp *qp = queue_pair;
529 unsigned int nb_dequeued = 0;
531 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
532 (void **)ops, nb_ops, NULL);
533 qp->qp_stats.dequeued_count += nb_dequeued;
538 static struct rte_compressdev_ops octtx_zip_pmd_ops = {
539 .dev_configure = zip_pmd_config,
540 .dev_start = zip_pmd_start,
541 .dev_stop = zip_pmd_stop,
542 .dev_close = zip_pmd_close,
544 .stats_get = zip_pmd_stats_get,
545 .stats_reset = zip_pmd_stats_reset,
547 .dev_infos_get = zip_pmd_info_get,
549 .queue_pair_setup = zip_pmd_qp_setup,
550 .queue_pair_release = zip_pmd_qp_release,
552 .private_xform_create = zip_pmd_stream_create,
553 .private_xform_free = zip_pmd_stream_free,
554 .stream_create = NULL,
559 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
560 struct rte_pci_device *pci_dev)
563 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
564 struct rte_compressdev *compressdev;
565 struct rte_compressdev_pmd_init_params init_params = {
570 ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
571 (unsigned int)pci_dev->id.vendor_id,
572 (unsigned int)pci_dev->id.device_id);
574 rte_pci_device_name(&pci_dev->addr, compressdev_name,
575 sizeof(compressdev_name));
577 compressdev = rte_compressdev_pmd_create(compressdev_name,
578 &pci_dev->device, sizeof(struct zip_vf), &init_params);
579 if (compressdev == NULL) {
580 ZIP_PMD_ERR("driver %s: create failed", init_params.name);
585 * create only if proc_type is primary.
587 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
588 /* create vf dev with given pmd dev id */
589 ret = zipvf_create(compressdev);
591 ZIP_PMD_ERR("Device creation failed");
592 rte_compressdev_pmd_destroy(compressdev);
597 compressdev->dev_ops = &octtx_zip_pmd_ops;
598 /* register rx/tx burst functions for data path */
599 compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
600 compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
601 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
606 zip_pci_remove(struct rte_pci_device *pci_dev)
608 struct rte_compressdev *compressdev;
609 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
611 if (pci_dev == NULL) {
612 ZIP_PMD_ERR(" Invalid PCI Device\n");
615 rte_pci_device_name(&pci_dev->addr, compressdev_name,
616 sizeof(compressdev_name));
618 compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
619 if (compressdev == NULL)
622 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
623 if (zipvf_destroy(compressdev) < 0)
626 return rte_compressdev_pmd_destroy(compressdev);
629 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
631 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
632 PCI_DEVICE_ID_OCTEONTX_ZIPVF),
640 * Structure that represents a PCI driver
642 static struct rte_pci_driver octtx_zip_pmd = {
643 .id_table = pci_id_octtx_zipvf_table,
644 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
645 .probe = zip_pci_probe,
646 .remove = zip_pci_remove,
649 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
650 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
652 RTE_INIT(octtx_zip_init_log)
654 octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
655 if (octtx_zip_logtype_driver >= 0)
656 rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);