crypto/qat: move generic qp function to qp file
[dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
14
15 #include "qat_logs.h"
16 #include "qat_qp.h"
17 #include "qat_sym.h"
18
19 #include "adf_transport_access_macros.h"
20
21 #define ADF_MAX_SYM_DESC                        4096
22 #define ADF_MIN_SYM_DESC                        128
23 #define ADF_SYM_TX_RING_DESC_SIZE               128
24 #define ADF_SYM_RX_RING_DESC_SIZE               32
25 #define ADF_SYM_TX_QUEUE_STARTOFF               2
26 /* Offset from bundle start to 1st Sym Tx queue */
27 #define ADF_SYM_RX_QUEUE_STARTOFF               10
28 #define ADF_ARB_REG_SLOT                        0x1000
29 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
30
31 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
32         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
33         (ADF_ARB_REG_SLOT * index), value)
34
35 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
36         uint32_t queue_size_bytes);
37 static int qat_tx_queue_create(struct rte_cryptodev *dev,
38         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
39         int socket_id);
40 static int qat_rx_queue_create(struct rte_cryptodev *dev,
41         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
42         int socket_id);
43 static void qat_queue_delete(struct qat_queue *queue);
44 static int qat_queue_create(struct rte_cryptodev *dev,
45         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
46         int socket_id);
47 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
48         uint32_t *queue_size_for_csr);
49 static void adf_configure_queues(struct qat_qp *queue);
50 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
51 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
52
53 static const struct rte_memzone *
54 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
55                         int socket_id)
56 {
57         const struct rte_memzone *mz;
58
59         PMD_INIT_FUNC_TRACE();
60         mz = rte_memzone_lookup(queue_name);
61         if (mz != 0) {
62                 if (((size_t)queue_size <= mz->len) &&
63                                 ((socket_id == SOCKET_ID_ANY) ||
64                                         (socket_id == mz->socket_id))) {
65                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
66                                         "allocated for %s", queue_name);
67                         return mz;
68                 }
69
70                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
71                                 "allocated %s, size %u, socket %d. "
72                                 "Requested size %u, socket %u",
73                                 queue_name, (uint32_t)mz->len,
74                                 mz->socket_id, queue_size, socket_id);
75                 return NULL;
76         }
77
78         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
79                                         queue_name, queue_size, socket_id);
80         return rte_memzone_reserve_aligned(queue_name, queue_size,
81                 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
82 }
83
84 int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
85         const struct rte_cryptodev_qp_conf *qp_conf,
86         int socket_id, struct rte_mempool *session_pool __rte_unused)
87 {
88         struct qat_qp *qp;
89         struct rte_pci_device *pci_dev;
90         int ret;
91         char op_cookie_pool_name[RTE_RING_NAMESIZE];
92         uint32_t i;
93
94         PMD_INIT_FUNC_TRACE();
95
96         /* If qp is already in use free ring memory and qp metadata. */
97         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
98                 ret = qat_sym_qp_release(dev, queue_pair_id);
99                 if (ret < 0)
100                         return ret;
101         }
102
103         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
104                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
105                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
106                                 qp_conf->nb_descriptors);
107                 return -EINVAL;
108         }
109
110         pci_dev = RTE_DEV_TO_PCI(dev->device);
111
112         if (pci_dev->mem_resource[0].addr == NULL) {
113                 PMD_DRV_LOG(ERR, "Could not find VF config space "
114                                 "(UIO driver attached?).");
115                 return -EINVAL;
116         }
117
118         if (queue_pair_id >=
119                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
120                                         ADF_NUM_BUNDLES_PER_DEV)) {
121                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
122                                 queue_pair_id);
123                 return -EINVAL;
124         }
125         /* Allocate the queue pair data structure. */
126         qp = rte_zmalloc("qat PMD qp metadata",
127                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
128         if (qp == NULL) {
129                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
130                 return -ENOMEM;
131         }
132         qp->nb_descriptors = qp_conf->nb_descriptors;
133         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
134                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
135                         RTE_CACHE_LINE_SIZE);
136         if (qp->op_cookies == NULL) {
137                 PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
138                 rte_free(qp);
139                 return -ENOMEM;
140         }
141
142         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
143         qp->inflights16 = 0;
144
145         if (qat_tx_queue_create(dev, &(qp->tx_q),
146                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
147                 PMD_INIT_LOG(ERR, "Tx queue create failed "
148                                 "queue_pair_id=%u", queue_pair_id);
149                 goto create_err;
150         }
151
152         if (qat_rx_queue_create(dev, &(qp->rx_q),
153                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
154                 PMD_DRV_LOG(ERR, "Rx queue create failed "
155                                 "queue_pair_id=%hu", queue_pair_id);
156                 qat_queue_delete(&(qp->tx_q));
157                 goto create_err;
158         }
159
160         adf_configure_queues(qp);
161         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
162         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
163                 pci_dev->driver->driver.name, dev->data->dev_id,
164                 queue_pair_id);
165
166         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
167         if (qp->op_cookie_pool == NULL)
168                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
169                                 qp->nb_descriptors,
170                                 sizeof(struct qat_sym_op_cookie), 64, 0,
171                                 NULL, NULL, NULL, NULL, socket_id,
172                                 0);
173         if (!qp->op_cookie_pool) {
174                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
175                                 " op mempool");
176                 goto create_err;
177         }
178
179         for (i = 0; i < qp->nb_descriptors; i++) {
180                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
181                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
182                         goto create_err;
183                 }
184
185                 struct qat_sym_op_cookie *sql_cookie =
186                                 qp->op_cookies[i];
187
188                 sql_cookie->qat_sgl_src_phys_addr =
189                                 rte_mempool_virt2iova(sql_cookie) +
190                                 offsetof(struct qat_sym_op_cookie,
191                                 qat_sgl_list_src);
192
193                 sql_cookie->qat_sgl_dst_phys_addr =
194                                 rte_mempool_virt2iova(sql_cookie) +
195                                 offsetof(struct qat_sym_op_cookie,
196                                 qat_sgl_list_dst);
197         }
198
199         struct qat_pmd_private *internals
200                 = dev->data->dev_private;
201         qp->qat_dev_gen = internals->qat_dev_gen;
202         qp->build_request = qat_sym_build_request;
203         qp->process_response = qat_sym_process_response;
204
205         dev->data->queue_pairs[queue_pair_id] = qp;
206         return 0;
207
208 create_err:
209         if (qp->op_cookie_pool)
210                 rte_mempool_free(qp->op_cookie_pool);
211         rte_free(qp->op_cookies);
212         rte_free(qp);
213         return -EFAULT;
214 }
215
216 int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
217 {
218         struct qat_qp *qp =
219                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
220         uint32_t i;
221
222         PMD_INIT_FUNC_TRACE();
223         if (qp == NULL) {
224                 PMD_DRV_LOG(DEBUG, "qp already freed");
225                 return 0;
226         }
227
228         /* Don't free memory if there are still responses to be processed */
229         if (qp->inflights16 == 0) {
230                 qat_queue_delete(&(qp->tx_q));
231                 qat_queue_delete(&(qp->rx_q));
232         } else {
233                 return -EAGAIN;
234         }
235
236         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
237
238         for (i = 0; i < qp->nb_descriptors; i++)
239                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
240
241         if (qp->op_cookie_pool)
242                 rte_mempool_free(qp->op_cookie_pool);
243
244         rte_free(qp->op_cookies);
245         rte_free(qp);
246         dev->data->queue_pairs[queue_pair_id] = NULL;
247         return 0;
248 }
249
250 static int qat_tx_queue_create(struct rte_cryptodev *dev,
251         struct qat_queue *queue, uint8_t qp_id,
252         uint32_t nb_desc, int socket_id)
253 {
254         PMD_INIT_FUNC_TRACE();
255         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
256         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
257                                                 ADF_SYM_TX_QUEUE_STARTOFF;
258         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
259                 nb_desc, qp_id, queue->hw_bundle_number,
260                 queue->hw_queue_number);
261
262         return qat_queue_create(dev, queue, nb_desc,
263                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
264 }
265
266 static int qat_rx_queue_create(struct rte_cryptodev *dev,
267                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
268                 int socket_id)
269 {
270         PMD_INIT_FUNC_TRACE();
271         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
272         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
273                                                 ADF_SYM_RX_QUEUE_STARTOFF;
274
275         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
276                 nb_desc, qp_id, queue->hw_bundle_number,
277                 queue->hw_queue_number);
278         return qat_queue_create(dev, queue, nb_desc,
279                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
280 }
281
282 static void qat_queue_delete(struct qat_queue *queue)
283 {
284         const struct rte_memzone *mz;
285         int status = 0;
286
287         if (queue == NULL) {
288                 PMD_DRV_LOG(DEBUG, "Invalid queue");
289                 return;
290         }
291         mz = rte_memzone_lookup(queue->memz_name);
292         if (mz != NULL) {
293                 /* Write an unused pattern to the queue memory. */
294                 memset(queue->base_addr, 0x7F, queue->queue_size);
295                 status = rte_memzone_free(mz);
296                 if (status != 0)
297                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
298                                         status, queue->memz_name);
299         } else {
300                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
301                                 queue->memz_name);
302         }
303 }
304
305 static int
306 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
307                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
308 {
309         uint64_t queue_base;
310         void *io_addr;
311         const struct rte_memzone *qp_mz;
312         uint32_t queue_size_bytes = nb_desc*desc_size;
313         struct rte_pci_device *pci_dev;
314
315         PMD_INIT_FUNC_TRACE();
316         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
317                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
318                 return -EINVAL;
319         }
320
321         pci_dev = RTE_DEV_TO_PCI(dev->device);
322
323         /*
324          * Allocate a memzone for the queue - create a unique name.
325          */
326         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
327                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
328                 queue->hw_bundle_number, queue->hw_queue_number);
329         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
330                         socket_id);
331         if (qp_mz == NULL) {
332                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
333                 return -ENOMEM;
334         }
335
336         queue->base_addr = (char *)qp_mz->addr;
337         queue->base_phys_addr = qp_mz->iova;
338         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
339                         queue_size_bytes)) {
340                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
341                                         " 0x%"PRIx64"\n",
342                                         queue->base_phys_addr);
343                 return -EFAULT;
344         }
345
346         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
347                         != 0) {
348                 PMD_DRV_LOG(ERR, "Invalid num inflights");
349                 return -EINVAL;
350         }
351
352         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
353                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
354         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
355         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
356                                 " msg_size %u, max_inflights %u modulo %u",
357                                 queue->queue_size, queue_size_bytes,
358                                 nb_desc, desc_size, queue->max_inflights,
359                                 queue->modulo);
360
361         if (queue->max_inflights < 2) {
362                 PMD_DRV_LOG(ERR, "Invalid num inflights");
363                 return -EINVAL;
364         }
365         queue->head = 0;
366         queue->tail = 0;
367         queue->msg_size = desc_size;
368
369         /*
370          * Write an unused pattern to the queue memory.
371          */
372         memset(queue->base_addr, 0x7F, queue_size_bytes);
373
374         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
375                                         queue->queue_size);
376
377         io_addr = pci_dev->mem_resource[0].addr;
378
379         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
380                         queue->hw_queue_number, queue_base);
381         return 0;
382 }
383
384 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
385                                         uint32_t queue_size_bytes)
386 {
387         PMD_INIT_FUNC_TRACE();
388         if (((queue_size_bytes - 1) & phys_addr) != 0)
389                 return -EINVAL;
390         return 0;
391 }
392
393 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
394         uint32_t *p_queue_size_for_csr)
395 {
396         uint8_t i = ADF_MIN_RING_SIZE;
397
398         PMD_INIT_FUNC_TRACE();
399         for (; i <= ADF_MAX_RING_SIZE; i++)
400                 if ((msg_size * msg_num) ==
401                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
402                         *p_queue_size_for_csr = i;
403                         return 0;
404                 }
405         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
406         return -EINVAL;
407 }
408
409 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
410 {
411         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
412                                         (ADF_ARB_REG_SLOT *
413                                                         txq->hw_bundle_number);
414         uint32_t value;
415
416         PMD_INIT_FUNC_TRACE();
417         value = ADF_CSR_RD(base_addr, arb_csr_offset);
418         value |= (0x01 << txq->hw_queue_number);
419         ADF_CSR_WR(base_addr, arb_csr_offset, value);
420 }
421
422 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
423 {
424         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
425                                         (ADF_ARB_REG_SLOT *
426                                                         txq->hw_bundle_number);
427         uint32_t value;
428
429         PMD_INIT_FUNC_TRACE();
430         value = ADF_CSR_RD(base_addr, arb_csr_offset);
431         value ^= (0x01 << txq->hw_queue_number);
432         ADF_CSR_WR(base_addr, arb_csr_offset, value);
433 }
434
435 static void adf_configure_queues(struct qat_qp *qp)
436 {
437         uint32_t queue_config;
438         struct qat_queue *queue = &qp->tx_q;
439
440         PMD_INIT_FUNC_TRACE();
441         queue_config = BUILD_RING_CONFIG(queue->queue_size);
442
443         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
444                         queue->hw_queue_number, queue_config);
445
446         queue = &qp->rx_q;
447         queue_config =
448                         BUILD_RESP_RING_CONFIG(queue->queue_size,
449                                         ADF_RING_NEAR_WATERMARK_512,
450                                         ADF_RING_NEAR_WATERMARK_0);
451
452         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
453                         queue->hw_queue_number, queue_config);
454 }
455
456
457 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
458 {
459         uint32_t div = data >> shift;
460         uint32_t mult = div << shift;
461
462         return data - mult;
463 }
464
465 static inline void
466 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
467         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
468                         q->hw_queue_number, q->tail);
469         q->nb_pending_requests = 0;
470         q->csr_tail = q->tail;
471 }
472
473 static inline
474 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
475 {
476         uint32_t old_head, new_head;
477         uint32_t max_head;
478
479         old_head = q->csr_head;
480         new_head = q->head;
481         max_head = qp->nb_descriptors * q->msg_size;
482
483         /* write out free descriptors */
484         void *cur_desc = (uint8_t *)q->base_addr + old_head;
485
486         if (new_head < old_head) {
487                 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
488                 memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
489         } else {
490                 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
491         }
492         q->nb_processed_responses = 0;
493         q->csr_head = new_head;
494
495         /* write current head to CSR */
496         WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
497                             q->hw_queue_number, new_head);
498 }
499
500 uint16_t
501 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
502 {
503         register struct qat_queue *queue;
504         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
505         register uint32_t nb_ops_sent = 0;
506         register int ret;
507         uint16_t nb_ops_possible = nb_ops;
508         register uint8_t *base_addr;
509         register uint32_t tail;
510         int overflow;
511
512         if (unlikely(nb_ops == 0))
513                 return 0;
514
515         /* read params used a lot in main loop into registers */
516         queue = &(tmp_qp->tx_q);
517         base_addr = (uint8_t *)queue->base_addr;
518         tail = queue->tail;
519
520         /* Find how many can actually fit on the ring */
521         tmp_qp->inflights16 += nb_ops;
522         overflow = tmp_qp->inflights16 - queue->max_inflights;
523         if (overflow > 0) {
524                 tmp_qp->inflights16 -= overflow;
525                 nb_ops_possible = nb_ops - overflow;
526                 if (nb_ops_possible == 0)
527                         return 0;
528         }
529
530         while (nb_ops_sent != nb_ops_possible) {
531                 ret = tmp_qp->build_request(*ops, base_addr + tail,
532                                 tmp_qp->op_cookies[tail / queue->msg_size],
533                                 tmp_qp->qat_dev_gen);
534                 if (ret != 0) {
535                         tmp_qp->stats.enqueue_err_count++;
536                         /*
537                          * This message cannot be enqueued,
538                          * decrease number of ops that wasn't sent
539                          */
540                         tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
541                         if (nb_ops_sent == 0)
542                                 return 0;
543                         goto kick_tail;
544                 }
545
546                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
547                 ops++;
548                 nb_ops_sent++;
549         }
550 kick_tail:
551         queue->tail = tail;
552         tmp_qp->stats.enqueued_count += nb_ops_sent;
553         queue->nb_pending_requests += nb_ops_sent;
554         if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
555                     queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
556                 txq_write_tail(tmp_qp, queue);
557         }
558         return nb_ops_sent;
559 }
560
561 uint16_t
562 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
563 {
564         struct qat_queue *rx_queue, *tx_queue;
565         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
566         uint32_t head;
567         uint32_t resp_counter = 0;
568         uint8_t *resp_msg;
569
570         rx_queue = &(tmp_qp->rx_q);
571         tx_queue = &(tmp_qp->tx_q);
572         head = rx_queue->head;
573         resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
574
575         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
576                         resp_counter != nb_ops) {
577
578                 tmp_qp->process_response(ops, resp_msg,
579                         tmp_qp->op_cookies[head / rx_queue->msg_size],
580                         tmp_qp->qat_dev_gen);
581
582                 head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
583
584                 resp_msg = (uint8_t *)rx_queue->base_addr + head;
585                 ops++;
586                 resp_counter++;
587         }
588         if (resp_counter > 0) {
589                 rx_queue->head = head;
590                 tmp_qp->stats.dequeued_count += resp_counter;
591                 rx_queue->nb_processed_responses += resp_counter;
592                 tmp_qp->inflights16 -= resp_counter;
593
594                 if (rx_queue->nb_processed_responses >
595                                                 QAT_CSR_HEAD_WRITE_THRESH)
596                         rxq_free_desc(tmp_qp, rx_queue);
597         }
598         /* also check if tail needs to be advanced */
599         if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
600                 tx_queue->tail != tx_queue->csr_tail) {
601                 txq_write_tail(tmp_qp, tx_queue);
602         }
603         return resp_counter;
604 }