1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
14 static const struct rte_compressdev_capabilities
15 octtx_zip_pmd_capabilities[] = {
16 { .algo = RTE_COMP_ALGO_DEFLATE,
18 .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
19 RTE_COMP_FF_HUFFMAN_DYNAMIC,
20 /* Non sharable Priv XFORM and Stateless */
25 /* size supported 2^1 to 2^14 */
28 RTE_COMP_END_OF_CAPABILITIES_LIST()
31 /** Parse xform parameters and setup a stream */
33 zip_set_stream_parameters(struct rte_compressdev *dev,
34 const struct rte_comp_xform *xform,
35 struct zip_stream *z_stream)
38 union zip_inst_s *inst;
39 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
42 /* Allocate resources required by a stream */
43 ret = rte_mempool_get_bulk(vf->zip_mp,
44 z_stream->bufs, MAX_BUFS_PER_STREAM);
48 /* get one command buffer from pool and set up */
49 inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
50 res = z_stream->bufs[RES_BUF];
52 memset(inst->u, 0, sizeof(inst->u));
54 /* set bf for only first ops of stream */
57 if (xform->type == RTE_COMP_COMPRESS) {
58 inst->s.op = ZIP_OP_E_COMP;
60 switch (xform->compress.deflate.huffman) {
61 case RTE_COMP_HUFFMAN_DEFAULT:
62 inst->s.cc = ZIP_CC_DEFAULT;
64 case RTE_COMP_HUFFMAN_FIXED:
65 inst->s.cc = ZIP_CC_FIXED_HUFF;
67 case RTE_COMP_HUFFMAN_DYNAMIC:
68 inst->s.cc = ZIP_CC_DYN_HUFF;
75 switch (xform->compress.level) {
76 case RTE_COMP_LEVEL_MIN:
77 inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
79 case RTE_COMP_LEVEL_MAX:
80 inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
82 case RTE_COMP_LEVEL_NONE:
83 ZIP_PMD_ERR("Compression level not supported");
87 /* for any value between min and max , choose
90 inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
93 } else if (xform->type == RTE_COMP_DECOMPRESS) {
94 inst->s.op = ZIP_OP_E_DECOMP;
96 * For DEFLATE decompression, [CC] must be 0x0.
97 * For decompression, [SS] must be 0x0
100 /* Speed bit should not be set for decompression */
102 /* decompression context is supported only for STATEFUL
103 * operations. Currently we support STATELESS ONLY so
104 * skip setting of ctx pointer
108 ZIP_PMD_ERR("\nxform type not supported");
113 inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
114 inst->s.res_ptr_ctl.s.length = 0;
116 z_stream->inst = inst;
121 rte_mempool_put_bulk(vf->zip_mp,
122 (void *)&(z_stream->bufs[0]),
123 MAX_BUFS_PER_STREAM);
128 /** Configure device */
130 zip_pmd_config(struct rte_compressdev *dev,
131 struct rte_compressdev_config *config)
134 char res_pool[RTE_MEMZONE_NAMESIZE];
136 struct rte_mempool *zip_buf_mp;
141 vf = (struct zip_vf *)(dev->data->dev_private);
143 /* create pool with maximum numbers of resources
144 * required by streams
147 /* use common pool for non-shareable priv_xform and stream */
148 nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
150 snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
153 /** TBD Should we use the per core object cache for stream resources */
154 zip_buf_mp = rte_mempool_create(
156 nb_streams * MAX_BUFS_PER_STREAM,
167 if (zip_buf_mp == NULL) {
169 "Failed to create buf mempool octtx_zip_res_pool%u",
174 vf->zip_mp = zip_buf_mp;
181 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
188 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
195 zip_pmd_close(struct rte_compressdev *dev)
200 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
201 rte_mempool_free(vf->zip_mp);
206 /** Get device statistics */
208 zip_pmd_stats_get(struct rte_compressdev *dev,
209 struct rte_compressdev_stats *stats)
213 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
214 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
216 stats->enqueued_count += qp->qp_stats.enqueued_count;
217 stats->dequeued_count += qp->qp_stats.dequeued_count;
219 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
220 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
224 /** Reset device statistics */
226 zip_pmd_stats_reset(struct rte_compressdev *dev)
230 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
231 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
232 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
236 /** Get device info */
238 zip_pmd_info_get(struct rte_compressdev *dev,
239 struct rte_compressdev_info *dev_info)
241 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
243 if (dev_info != NULL) {
244 dev_info->driver_name = dev->device->driver->name;
245 dev_info->feature_flags = dev->feature_flags;
246 dev_info->capabilities = octtx_zip_pmd_capabilities;
247 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
251 /** Release queue pair */
253 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
255 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
260 if (qp->processed_pkts)
261 rte_ring_free(qp->processed_pkts);
264 dev->data->queue_pairs[qp_id] = NULL;
269 /** Create a ring to place process packets on */
270 static struct rte_ring *
271 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
272 unsigned int ring_size, int socket_id)
276 r = rte_ring_lookup(qp->name);
278 if (rte_ring_get_size(r) >= ring_size) {
279 ZIP_PMD_INFO("Reusing existing ring %s for processed"
280 " packets", qp->name);
284 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
285 " packets", qp->name);
289 return rte_ring_create(qp->name, ring_size, socket_id,
293 /** Setup a queue pair */
295 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
296 uint32_t max_inflight_ops, int socket_id)
298 struct zipvf_qp *qp = NULL;
306 vf = (struct zip_vf *) (dev->data->dev_private);
308 /* Free memory prior to re-allocation if needed. */
309 if (dev->data->queue_pairs[qp_id] != NULL) {
310 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
314 name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
315 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
317 dev->data->dev_id, qp_id);
319 /* Allocate the queue pair data structure. */
320 qp = rte_zmalloc_socket(name, sizeof(*qp),
321 RTE_CACHE_LINE_SIZE, socket_id);
327 /* Create completion queue upto max_inflight_ops */
328 qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
329 max_inflight_ops, socket_id);
330 if (qp->processed_pkts == NULL)
331 goto qp_setup_cleanup;
336 ret = zipvf_q_init(qp);
338 goto qp_setup_cleanup;
340 dev->data->queue_pairs[qp_id] = qp;
342 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
346 if (qp->processed_pkts)
347 rte_ring_free(qp->processed_pkts);
354 zip_pmd_stream_create(struct rte_compressdev *dev,
355 const struct rte_comp_xform *xform, void **stream)
358 struct zip_stream *strm = NULL;
360 strm = rte_malloc(NULL,
361 sizeof(struct zip_stream), 0);
366 ret = zip_set_stream_parameters(dev, xform, strm);
368 ZIP_PMD_ERR("failed configure xform parameters");
377 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
379 struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
380 struct zip_stream *z_stream;
385 z_stream = (struct zip_stream *)stream;
387 /* Free resources back to pool */
388 rte_mempool_put_bulk(vf->zip_mp,
389 (void *)&(z_stream->bufs[0]),
390 MAX_BUFS_PER_STREAM);
392 /* Zero out the whole structure */
393 memset(stream, 0, sizeof(struct zip_stream));
400 struct rte_compressdev_ops octtx_zip_pmd_ops = {
401 .dev_configure = zip_pmd_config,
402 .dev_start = zip_pmd_start,
403 .dev_stop = zip_pmd_stop,
404 .dev_close = zip_pmd_close,
406 .stats_get = zip_pmd_stats_get,
407 .stats_reset = zip_pmd_stats_reset,
409 .dev_infos_get = zip_pmd_info_get,
411 .queue_pair_setup = zip_pmd_qp_setup,
412 .queue_pair_release = zip_pmd_qp_release,
414 .private_xform_create = zip_pmd_stream_create,
415 .private_xform_free = zip_pmd_stream_free,
416 .stream_create = NULL,
421 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
422 struct rte_pci_device *pci_dev)
425 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
426 struct rte_compressdev *compressdev;
427 struct rte_compressdev_pmd_init_params init_params = {
432 ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
433 (unsigned int)pci_dev->id.vendor_id,
434 (unsigned int)pci_dev->id.device_id);
436 rte_pci_device_name(&pci_dev->addr, compressdev_name,
437 sizeof(compressdev_name));
439 compressdev = rte_compressdev_pmd_create(compressdev_name,
440 &pci_dev->device, sizeof(struct zip_vf), &init_params);
441 if (compressdev == NULL) {
442 ZIP_PMD_ERR("driver %s: create failed", init_params.name);
447 * create only if proc_type is primary.
449 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
450 /* create vf dev with given pmd dev id */
451 ret = zipvf_create(compressdev);
453 ZIP_PMD_ERR("Device creation failed");
454 rte_compressdev_pmd_destroy(compressdev);
459 compressdev->dev_ops = &octtx_zip_pmd_ops;
460 /* register rx/tx burst functions for data path */
461 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
466 zip_pci_remove(struct rte_pci_device *pci_dev)
468 struct rte_compressdev *compressdev;
469 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
471 if (pci_dev == NULL) {
472 ZIP_PMD_ERR(" Invalid PCI Device\n");
475 rte_pci_device_name(&pci_dev->addr, compressdev_name,
476 sizeof(compressdev_name));
478 compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
479 if (compressdev == NULL)
482 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
483 if (zipvf_destroy(compressdev) < 0)
486 return rte_compressdev_pmd_destroy(compressdev);
489 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
491 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
492 PCI_DEVICE_ID_OCTEONTX_ZIPVF),
500 * Structure that represents a PCI driver
502 static struct rte_pci_driver octtx_zip_pmd = {
503 .id_table = pci_id_octtx_zipvf_table,
504 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
505 .probe = zip_pci_probe,
506 .remove = zip_pci_remove,
509 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
510 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
512 RTE_INIT(octtx_zip_init_log);
515 octtx_zip_init_log(void)
517 octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
518 if (octtx_zip_logtype_driver >= 0)
519 rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);