compress/octeontx: add device setup ops
[dpdk.git] / drivers / compress / octeontx / otx_zip_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4
5 #include <string.h>
6
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
11
12 #include "otx_zip.h"
13
14 static const struct rte_compressdev_capabilities
15                                 octtx_zip_pmd_capabilities[] = {
16         {       .algo = RTE_COMP_ALGO_DEFLATE,
17                 /* Deflate */
18                 .comp_feature_flags =   RTE_COMP_FF_HUFFMAN_FIXED |
19                                         RTE_COMP_FF_HUFFMAN_DYNAMIC,
20                 /* Non sharable Priv XFORM and Stateless */
21                 .window_size = {
22                                 .min = 1,
23                                 .max = 14,
24                                 .increment = 1
25                                 /* size supported 2^1 to 2^14 */
26                 },
27         },
28         RTE_COMP_END_OF_CAPABILITIES_LIST()
29 };
30
31 /** Configure device */
32 static int
33 zip_pmd_config(struct rte_compressdev *dev,
34                 struct rte_compressdev_config *config)
35 {
36         int nb_streams;
37         char res_pool[RTE_MEMZONE_NAMESIZE];
38         struct zip_vf *vf;
39         struct rte_mempool *zip_buf_mp;
40
41         if (!config || !dev)
42                 return -EIO;
43
44         vf = (struct zip_vf *)(dev->data->dev_private);
45
46         /* create pool with maximum numbers of resources
47          * required by streams
48          */
49
50         /* use common pool for non-shareable priv_xform and stream */
51         nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
52
53         snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
54                  dev->data->dev_id);
55
56         /** TBD Should we use the per core object cache for stream resources */
57         zip_buf_mp = rte_mempool_create(
58                         res_pool,
59                         nb_streams * MAX_BUFS_PER_STREAM,
60                         ZIP_BUF_SIZE,
61                         0,
62                         0,
63                         NULL,
64                         NULL,
65                         NULL,
66                         NULL,
67                         SOCKET_ID_ANY,
68                         0);
69
70         if (zip_buf_mp == NULL) {
71                 ZIP_PMD_ERR(
72                         "Failed to create buf mempool octtx_zip_res_pool%u",
73                         dev->data->dev_id);
74                 return -1;
75         }
76
77         vf->zip_mp = zip_buf_mp;
78
79         return 0;
80 }
81
82 /** Start device */
83 static int
84 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
85 {
86         return 0;
87 }
88
89 /** Stop device */
90 static void
91 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
92 {
93
94 }
95
96 /** Close device */
97 static int
98 zip_pmd_close(struct rte_compressdev *dev)
99 {
100         if (dev == NULL)
101                 return -1;
102
103         struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
104         rte_mempool_free(vf->zip_mp);
105
106         return 0;
107 }
108
109 /** Get device statistics */
110 static void
111 zip_pmd_stats_get(struct rte_compressdev *dev,
112                 struct rte_compressdev_stats *stats)
113 {
114         int qp_id;
115
116         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
117                 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
118
119                 stats->enqueued_count += qp->qp_stats.enqueued_count;
120                 stats->dequeued_count += qp->qp_stats.dequeued_count;
121
122                 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
123                 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
124         }
125 }
126
127 /** Reset device statistics */
128 static void
129 zip_pmd_stats_reset(struct rte_compressdev *dev)
130 {
131         int qp_id;
132
133         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
134                 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
135                 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
136         }
137 }
138
139 /** Get device info */
140 static void
141 zip_pmd_info_get(struct rte_compressdev *dev,
142                 struct rte_compressdev_info *dev_info)
143 {
144         struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
145
146         if (dev_info != NULL) {
147                 dev_info->driver_name = dev->device->driver->name;
148                 dev_info->feature_flags = dev->feature_flags;
149                 dev_info->capabilities = octtx_zip_pmd_capabilities;
150                 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
151         }
152 }
153
154 /** Release queue pair */
155 static int
156 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
157 {
158         struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
159
160         if (qp != NULL) {
161                 zipvf_q_term(qp);
162
163                 if (qp->processed_pkts)
164                         rte_ring_free(qp->processed_pkts);
165
166                 rte_free(qp);
167                 dev->data->queue_pairs[qp_id] = NULL;
168         }
169         return 0;
170 }
171
172 /** Create a ring to place process packets on */
173 static struct rte_ring *
174 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
175                 unsigned int ring_size, int socket_id)
176 {
177         struct rte_ring *r;
178
179         r = rte_ring_lookup(qp->name);
180         if (r) {
181                 if (rte_ring_get_size(r) >= ring_size) {
182                         ZIP_PMD_INFO("Reusing existing ring %s for processed"
183                                         " packets", qp->name);
184                         return r;
185                 }
186
187                 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
188                                 " packets", qp->name);
189                 return NULL;
190         }
191
192         return rte_ring_create(qp->name, ring_size, socket_id,
193                                                 RING_F_EXACT_SZ);
194 }
195
196 /** Setup a queue pair */
197 static int
198 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
199                 uint32_t max_inflight_ops, int socket_id)
200 {
201         struct zipvf_qp *qp = NULL;
202         struct zip_vf *vf;
203         char *name;
204         int ret;
205
206         if (!dev)
207                 return -1;
208
209         vf = (struct zip_vf *) (dev->data->dev_private);
210
211         /* Free memory prior to re-allocation if needed. */
212         if (dev->data->queue_pairs[qp_id] != NULL) {
213                 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
214                 return 0;
215         }
216
217         name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
218         snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
219                  "zip_pmd_%u_qp_%u",
220                  dev->data->dev_id, qp_id);
221
222         /* Allocate the queue pair data structure. */
223         qp = rte_zmalloc_socket(name, sizeof(*qp),
224                                 RTE_CACHE_LINE_SIZE, socket_id);
225         if (qp == NULL)
226                 return (-ENOMEM);
227
228         qp->name = name;
229
230         /* Create completion queue upto max_inflight_ops */
231         qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
232                                                 max_inflight_ops, socket_id);
233         if (qp->processed_pkts == NULL)
234                 goto qp_setup_cleanup;
235
236         qp->id = qp_id;
237         qp->vf = vf;
238
239         ret = zipvf_q_init(qp);
240         if (ret < 0)
241                 goto qp_setup_cleanup;
242
243         dev->data->queue_pairs[qp_id] = qp;
244
245         memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
246         return 0;
247
248 qp_setup_cleanup:
249         if (qp->processed_pkts)
250                 rte_ring_free(qp->processed_pkts);
251         if (qp)
252                 rte_free(qp);
253         return -1;
254 }
255
256 struct rte_compressdev_ops octtx_zip_pmd_ops = {
257                 .dev_configure          = zip_pmd_config,
258                 .dev_start              = zip_pmd_start,
259                 .dev_stop               = zip_pmd_stop,
260                 .dev_close              = zip_pmd_close,
261
262                 .stats_get              = zip_pmd_stats_get,
263                 .stats_reset            = zip_pmd_stats_reset,
264
265                 .dev_infos_get          = zip_pmd_info_get,
266
267                 .queue_pair_setup       = zip_pmd_qp_setup,
268                 .queue_pair_release     = zip_pmd_qp_release,
269 };
270
271 static int
272 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
273         struct rte_pci_device *pci_dev)
274 {
275         int ret = 0;
276         char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
277         struct rte_compressdev *compressdev;
278         struct rte_compressdev_pmd_init_params init_params = {
279                 "",
280                 rte_socket_id(),
281         };
282
283         ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
284                         (unsigned int)pci_dev->id.vendor_id,
285                         (unsigned int)pci_dev->id.device_id);
286
287         rte_pci_device_name(&pci_dev->addr, compressdev_name,
288                             sizeof(compressdev_name));
289
290         compressdev = rte_compressdev_pmd_create(compressdev_name,
291                 &pci_dev->device, sizeof(struct zip_vf), &init_params);
292         if (compressdev == NULL) {
293                 ZIP_PMD_ERR("driver %s: create failed", init_params.name);
294                 return -ENODEV;
295         }
296
297         /*
298          * create only if proc_type is primary.
299          */
300         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
301                 /*  create vf dev with given pmd dev id */
302                 ret = zipvf_create(compressdev);
303                 if (ret < 0) {
304                         ZIP_PMD_ERR("Device creation failed");
305                         rte_compressdev_pmd_destroy(compressdev);
306                         return ret;
307                 }
308         }
309
310         compressdev->dev_ops = &octtx_zip_pmd_ops;
311         /* register rx/tx burst functions for data path */
312         compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
313         return ret;
314 }
315
316 static int
317 zip_pci_remove(struct rte_pci_device *pci_dev)
318 {
319         struct rte_compressdev *compressdev;
320         char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
321
322         if (pci_dev == NULL) {
323                 ZIP_PMD_ERR(" Invalid PCI Device\n");
324                 return -EINVAL;
325         }
326         rte_pci_device_name(&pci_dev->addr, compressdev_name,
327                         sizeof(compressdev_name));
328
329         compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
330         if (compressdev == NULL)
331                 return -ENODEV;
332
333         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
334                 if (zipvf_destroy(compressdev) < 0)
335                         return -ENODEV;
336         }
337         return rte_compressdev_pmd_destroy(compressdev);
338 }
339
340 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
341         {
342                 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
343                         PCI_DEVICE_ID_OCTEONTX_ZIPVF),
344         },
345         {
346                 .device_id = 0
347         },
348 };
349
350 /**
351  * Structure that represents a PCI driver
352  */
353 static struct rte_pci_driver octtx_zip_pmd = {
354         .id_table    = pci_id_octtx_zipvf_table,
355         .drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
356         .probe       = zip_pci_probe,
357         .remove      = zip_pci_remove,
358 };
359
360 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
361 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
362
363 RTE_INIT(octtx_zip_init_log);
364
365 static void
366 octtx_zip_init_log(void)
367 {
368         octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
369         if (octtx_zip_logtype_driver >= 0)
370                 rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
371 }