1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
14 static const struct rte_compressdev_capabilities
15 octtx_zip_pmd_capabilities[] = {
16 { .algo = RTE_COMP_ALGO_DEFLATE,
18 .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
19 RTE_COMP_FF_HUFFMAN_DYNAMIC,
20 /* Non sharable Priv XFORM and Stateless */
25 /* size supported 2^1 to 2^14 */
28 RTE_COMP_END_OF_CAPABILITIES_LIST()
31 /** Configure device */
33 zip_pmd_config(struct rte_compressdev *dev,
34 struct rte_compressdev_config *config)
37 char res_pool[RTE_MEMZONE_NAMESIZE];
39 struct rte_mempool *zip_buf_mp;
44 vf = (struct zip_vf *)(dev->data->dev_private);
46 /* create pool with maximum numbers of resources
50 /* use common pool for non-shareable priv_xform and stream */
51 nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
53 snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
56 /** TBD Should we use the per core object cache for stream resources */
57 zip_buf_mp = rte_mempool_create(
59 nb_streams * MAX_BUFS_PER_STREAM,
70 if (zip_buf_mp == NULL) {
72 "Failed to create buf mempool octtx_zip_res_pool%u",
77 vf->zip_mp = zip_buf_mp;
84 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
91 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
98 zip_pmd_close(struct rte_compressdev *dev)
103 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
104 rte_mempool_free(vf->zip_mp);
109 /** Get device statistics */
111 zip_pmd_stats_get(struct rte_compressdev *dev,
112 struct rte_compressdev_stats *stats)
116 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
117 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
119 stats->enqueued_count += qp->qp_stats.enqueued_count;
120 stats->dequeued_count += qp->qp_stats.dequeued_count;
122 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
123 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
127 /** Reset device statistics */
129 zip_pmd_stats_reset(struct rte_compressdev *dev)
133 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
134 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
135 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
139 /** Get device info */
141 zip_pmd_info_get(struct rte_compressdev *dev,
142 struct rte_compressdev_info *dev_info)
144 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
146 if (dev_info != NULL) {
147 dev_info->driver_name = dev->device->driver->name;
148 dev_info->feature_flags = dev->feature_flags;
149 dev_info->capabilities = octtx_zip_pmd_capabilities;
150 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
154 /** Release queue pair */
156 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
158 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
163 if (qp->processed_pkts)
164 rte_ring_free(qp->processed_pkts);
167 dev->data->queue_pairs[qp_id] = NULL;
172 /** Create a ring to place process packets on */
173 static struct rte_ring *
174 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
175 unsigned int ring_size, int socket_id)
179 r = rte_ring_lookup(qp->name);
181 if (rte_ring_get_size(r) >= ring_size) {
182 ZIP_PMD_INFO("Reusing existing ring %s for processed"
183 " packets", qp->name);
187 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
188 " packets", qp->name);
192 return rte_ring_create(qp->name, ring_size, socket_id,
196 /** Setup a queue pair */
198 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
199 uint32_t max_inflight_ops, int socket_id)
201 struct zipvf_qp *qp = NULL;
209 vf = (struct zip_vf *) (dev->data->dev_private);
211 /* Free memory prior to re-allocation if needed. */
212 if (dev->data->queue_pairs[qp_id] != NULL) {
213 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
217 name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
218 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
220 dev->data->dev_id, qp_id);
222 /* Allocate the queue pair data structure. */
223 qp = rte_zmalloc_socket(name, sizeof(*qp),
224 RTE_CACHE_LINE_SIZE, socket_id);
230 /* Create completion queue upto max_inflight_ops */
231 qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
232 max_inflight_ops, socket_id);
233 if (qp->processed_pkts == NULL)
234 goto qp_setup_cleanup;
239 ret = zipvf_q_init(qp);
241 goto qp_setup_cleanup;
243 dev->data->queue_pairs[qp_id] = qp;
245 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
249 if (qp->processed_pkts)
250 rte_ring_free(qp->processed_pkts);
256 struct rte_compressdev_ops octtx_zip_pmd_ops = {
257 .dev_configure = zip_pmd_config,
258 .dev_start = zip_pmd_start,
259 .dev_stop = zip_pmd_stop,
260 .dev_close = zip_pmd_close,
262 .stats_get = zip_pmd_stats_get,
263 .stats_reset = zip_pmd_stats_reset,
265 .dev_infos_get = zip_pmd_info_get,
267 .queue_pair_setup = zip_pmd_qp_setup,
268 .queue_pair_release = zip_pmd_qp_release,
272 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
273 struct rte_pci_device *pci_dev)
276 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
277 struct rte_compressdev *compressdev;
278 struct rte_compressdev_pmd_init_params init_params = {
283 ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
284 (unsigned int)pci_dev->id.vendor_id,
285 (unsigned int)pci_dev->id.device_id);
287 rte_pci_device_name(&pci_dev->addr, compressdev_name,
288 sizeof(compressdev_name));
290 compressdev = rte_compressdev_pmd_create(compressdev_name,
291 &pci_dev->device, sizeof(struct zip_vf), &init_params);
292 if (compressdev == NULL) {
293 ZIP_PMD_ERR("driver %s: create failed", init_params.name);
298 * create only if proc_type is primary.
300 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
301 /* create vf dev with given pmd dev id */
302 ret = zipvf_create(compressdev);
304 ZIP_PMD_ERR("Device creation failed");
305 rte_compressdev_pmd_destroy(compressdev);
310 compressdev->dev_ops = &octtx_zip_pmd_ops;
311 /* register rx/tx burst functions for data path */
312 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
317 zip_pci_remove(struct rte_pci_device *pci_dev)
319 struct rte_compressdev *compressdev;
320 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
322 if (pci_dev == NULL) {
323 ZIP_PMD_ERR(" Invalid PCI Device\n");
326 rte_pci_device_name(&pci_dev->addr, compressdev_name,
327 sizeof(compressdev_name));
329 compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
330 if (compressdev == NULL)
333 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
334 if (zipvf_destroy(compressdev) < 0)
337 return rte_compressdev_pmd_destroy(compressdev);
340 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
342 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
343 PCI_DEVICE_ID_OCTEONTX_ZIPVF),
351 * Structure that represents a PCI driver
353 static struct rte_pci_driver octtx_zip_pmd = {
354 .id_table = pci_id_octtx_zipvf_table,
355 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
356 .probe = zip_pci_probe,
357 .remove = zip_pci_remove,
360 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
361 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
363 RTE_INIT(octtx_zip_init_log);
366 octtx_zip_init_log(void)
368 octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
369 if (octtx_zip_logtype_driver >= 0)
370 rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);