compress/qat: add enqueue/dequeue functions
[dpdk.git] / drivers / compress / qat / qat_comp_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include "qat_comp.h"
6 #include "qat_comp_pmd.h"
7
8 void
9 qat_comp_stats_get(struct rte_compressdev *dev,
10                 struct rte_compressdev_stats *stats)
11 {
12         struct qat_common_stats qat_stats = {0};
13         struct qat_comp_dev_private *qat_priv;
14
15         if (stats == NULL || dev == NULL) {
16                 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
17                 return;
18         }
19         qat_priv = dev->data->dev_private;
20
21         qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
22         stats->enqueued_count = qat_stats.enqueued_count;
23         stats->dequeued_count = qat_stats.dequeued_count;
24         stats->enqueue_err_count = qat_stats.enqueue_err_count;
25         stats->dequeue_err_count = qat_stats.dequeue_err_count;
26 }
27
28 void
29 qat_comp_stats_reset(struct rte_compressdev *dev)
30 {
31         struct qat_comp_dev_private *qat_priv;
32
33         if (dev == NULL) {
34                 QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
35                 return;
36         }
37         qat_priv = dev->data->dev_private;
38
39         qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
40
41 }
42
43 int
44 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
45 {
46         struct qat_comp_dev_private *qat_private = dev->data->dev_private;
47
48         QAT_LOG(DEBUG, "Release comp qp %u on device %d",
49                                 queue_pair_id, dev->data->dev_id);
50
51         qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
52                                                 = NULL;
53
54         return qat_qp_release((struct qat_qp **)
55                         &(dev->data->queue_pairs[queue_pair_id]));
56 }
57
58 int
59 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
60                   uint32_t max_inflight_ops, int socket_id)
61 {
62         int ret = 0;
63         struct qat_qp_config qat_qp_conf;
64
65         struct qat_qp **qp_addr =
66                         (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
67         struct qat_comp_dev_private *qat_private = dev->data->dev_private;
68         const struct qat_qp_hw_data *comp_hw_qps =
69                         qat_gen_config[qat_private->qat_dev->qat_dev_gen]
70                                       .qp_hw_data[QAT_SERVICE_COMPRESSION];
71         const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
72
73         /* If qp is already in use free ring memory and qp metadata. */
74         if (*qp_addr != NULL) {
75                 ret = qat_comp_qp_release(dev, qp_id);
76                 if (ret < 0)
77                         return ret;
78         }
79         if (qp_id >= qat_qps_per_service(comp_hw_qps,
80                                          QAT_SERVICE_COMPRESSION)) {
81                 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
82                 return -EINVAL;
83         }
84
85         qat_qp_conf.hw = qp_hw_data;
86         qat_qp_conf.build_request = qat_comp_build_request;
87         qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
88         qat_qp_conf.nb_descriptors = max_inflight_ops;
89         qat_qp_conf.socket_id = socket_id;
90         qat_qp_conf.service_str = "comp";
91
92         ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
93         if (ret != 0)
94                 return ret;
95
96         /* store a link to the qp in the qat_pci_device */
97         qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
98                                                         = *qp_addr;
99
100         return ret;
101 }
102
103 static struct rte_mempool *
104 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
105                               uint32_t num_elements)
106 {
107         char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
108         struct rte_mempool *mp;
109
110         snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
111                         "%s_xforms", comp_dev->qat_dev->name);
112
113         QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
114         mp = rte_mempool_lookup(xform_pool_name);
115
116         if (mp != NULL) {
117                 QAT_LOG(DEBUG, "xformpool already created");
118                 if (mp->size != num_elements) {
119                         QAT_LOG(DEBUG, "xformpool wrong size - delete it");
120                         rte_mempool_free(mp);
121                         mp = NULL;
122                         comp_dev->xformpool = NULL;
123                 }
124         }
125
126         if (mp == NULL)
127                 mp = rte_mempool_create(xform_pool_name,
128                                 num_elements,
129                                 qat_comp_xform_size(), 0, 0,
130                                 NULL, NULL, NULL, NULL, rte_socket_id(),
131                                 0);
132         if (mp == NULL) {
133                 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
134                         xform_pool_name, num_elements, qat_comp_xform_size());
135                 return NULL;
136         }
137
138         return mp;
139 }
140
141 static void
142 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
143 {
144         /* Free private_xform pool */
145         if (comp_dev->xformpool) {
146                 /* Free internal mempool for private xforms */
147                 rte_mempool_free(comp_dev->xformpool);
148                 comp_dev->xformpool = NULL;
149         }
150 }
151
152 int
153 qat_comp_dev_config(struct rte_compressdev *dev,
154                 struct rte_compressdev_config *config)
155 {
156         struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
157         int ret = 0;
158
159         if (config->max_nb_streams != 0) {
160                 QAT_LOG(ERR,
161         "QAT device does not support STATEFUL so max_nb_streams must be 0");
162                 return -EINVAL;
163         }
164
165         comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
166                                         config->max_nb_priv_xforms);
167         if (comp_dev->xformpool == NULL) {
168
169                 ret = -ENOMEM;
170                 goto error_out;
171         }
172         return 0;
173
174 error_out:
175         _qat_comp_dev_config_clear(comp_dev);
176         return ret;
177 }
178
179
180 int
181 qat_comp_dev_close(struct rte_compressdev *dev)
182 {
183         int i;
184         int ret = 0;
185         struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
186
187         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
188                 ret = qat_comp_qp_release(dev, i);
189                 if (ret < 0)
190                         return ret;
191         }
192
193         _qat_comp_dev_config_clear(comp_dev);
194
195         return ret;
196 }
197
198
199 void
200 qat_comp_dev_info_get(struct rte_compressdev *dev,
201                         struct rte_compressdev_info *info)
202 {
203         struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
204         const struct qat_qp_hw_data *comp_hw_qps =
205                 qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
206                               .qp_hw_data[QAT_SERVICE_COMPRESSION];
207
208         if (info != NULL) {
209                 info->max_nb_queue_pairs =
210                         qat_qps_per_service(comp_hw_qps,
211                                             QAT_SERVICE_COMPRESSION);
212                 info->feature_flags = dev->feature_flags;
213                 info->capabilities = comp_dev->qat_dev_capabilities;
214         }
215 }
216
217 uint16_t
218 qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
219                 uint16_t nb_ops)
220 {
221         return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
222 }
223
224 uint16_t
225 qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
226                               uint16_t nb_ops)
227 {
228         return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
229 }