net/liquidio: add APIs to enable and disable IO queues
[dpdk.git] / drivers / net / liquidio / lio_rxtx.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
37
38 #include "lio_logs.h"
39 #include "lio_struct.h"
40 #include "lio_ethdev.h"
41 #include "lio_rxtx.h"
42
43 #define LIO_MAX_SG 12
44
45 static void
46 lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
47 {
48         uint32_t count = 0;
49
50         do {
51                 count += droq->buffer_size;
52         } while (count < LIO_MAX_RX_PKTLEN);
53 }
54
55 static void
56 lio_droq_reset_indices(struct lio_droq *droq)
57 {
58         droq->read_idx  = 0;
59         droq->write_idx = 0;
60         droq->refill_idx = 0;
61         droq->refill_count = 0;
62         rte_atomic64_set(&droq->pkts_pending, 0);
63 }
64
65 static void
66 lio_droq_destroy_ring_buffers(struct lio_droq *droq)
67 {
68         uint32_t i;
69
70         for (i = 0; i < droq->max_count; i++) {
71                 if (droq->recv_buf_list[i].buffer) {
72                         rte_pktmbuf_free((struct rte_mbuf *)
73                                          droq->recv_buf_list[i].buffer);
74                         droq->recv_buf_list[i].buffer = NULL;
75                 }
76         }
77
78         lio_droq_reset_indices(droq);
79 }
80
81 static void *
82 lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
83 {
84         struct lio_droq *droq = lio_dev->droq[q_no];
85         struct rte_mempool *mpool = droq->mpool;
86         struct rte_mbuf *m;
87
88         m = rte_pktmbuf_alloc(mpool);
89         if (m == NULL) {
90                 lio_dev_err(lio_dev, "Cannot allocate\n");
91                 return NULL;
92         }
93
94         rte_mbuf_refcnt_set(m, 1);
95         m->next = NULL;
96         m->data_off = RTE_PKTMBUF_HEADROOM;
97         m->nb_segs = 1;
98         m->pool = mpool;
99
100         return m;
101 }
102
103 static int
104 lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
105                             struct lio_droq *droq)
106 {
107         struct lio_droq_desc *desc_ring = droq->desc_ring;
108         uint32_t i;
109         void *buf;
110
111         for (i = 0; i < droq->max_count; i++) {
112                 buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
113                 if (buf == NULL) {
114                         lio_dev_err(lio_dev, "buffer alloc failed\n");
115                         lio_droq_destroy_ring_buffers(droq);
116                         return -ENOMEM;
117                 }
118
119                 droq->recv_buf_list[i].buffer = buf;
120                 droq->info_list[i].length = 0;
121
122                 /* map ring buffers into memory */
123                 desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
124                 desc_ring[i].buffer_ptr =
125                         lio_map_ring(droq->recv_buf_list[i].buffer);
126         }
127
128         lio_droq_reset_indices(droq);
129
130         lio_droq_compute_max_packet_bufs(droq);
131
132         return 0;
133 }
134
135 static void
136 lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
137 {
138         const struct rte_memzone *mz_tmp;
139         int ret = 0;
140
141         if (mz == NULL) {
142                 lio_dev_err(lio_dev, "Memzone NULL\n");
143                 return;
144         }
145
146         mz_tmp = rte_memzone_lookup(mz->name);
147         if (mz_tmp == NULL) {
148                 lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
149                 return;
150         }
151
152         ret = rte_memzone_free(mz);
153         if (ret)
154                 lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
155 }
156
157 /**
158  *  Frees the space for descriptor ring for the droq.
159  *
160  *  @param lio_dev      - pointer to the lio device structure
161  *  @param q_no         - droq no.
162  */
163 static void
164 lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
165 {
166         struct lio_droq *droq = lio_dev->droq[q_no];
167
168         lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
169
170         lio_droq_destroy_ring_buffers(droq);
171         rte_free(droq->recv_buf_list);
172         droq->recv_buf_list = NULL;
173         lio_dma_zone_free(lio_dev, droq->info_mz);
174         lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
175
176         memset(droq, 0, LIO_DROQ_SIZE);
177 }
178
179 static void *
180 lio_alloc_info_buffer(struct lio_device *lio_dev,
181                       struct lio_droq *droq, unsigned int socket_id)
182 {
183         droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
184                                                  "info_list", droq->q_no,
185                                                  (droq->max_count *
186                                                         LIO_DROQ_INFO_SIZE),
187                                                  RTE_CACHE_LINE_SIZE,
188                                                  socket_id);
189
190         if (droq->info_mz == NULL)
191                 return NULL;
192
193         droq->info_list_dma = droq->info_mz->phys_addr;
194         droq->info_alloc_size = droq->info_mz->len;
195         droq->info_base_addr = (size_t)droq->info_mz->addr;
196
197         return droq->info_mz->addr;
198 }
199
200 /**
201  *  Allocates space for the descriptor ring for the droq and
202  *  sets the base addr, num desc etc in Octeon registers.
203  *
204  * @param lio_dev       - pointer to the lio device structure
205  * @param q_no          - droq no.
206  * @param app_ctx       - pointer to application context
207  * @return Success: 0   Failure: -1
208  */
209 static int
210 lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
211               uint32_t num_descs, uint32_t desc_size,
212               struct rte_mempool *mpool, unsigned int socket_id)
213 {
214         uint32_t c_refill_threshold;
215         uint32_t desc_ring_size;
216         struct lio_droq *droq;
217
218         lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
219
220         droq = lio_dev->droq[q_no];
221         droq->lio_dev = lio_dev;
222         droq->q_no = q_no;
223         droq->mpool = mpool;
224
225         c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
226
227         droq->max_count = num_descs;
228         droq->buffer_size = desc_size;
229
230         desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
231         droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
232                                                       "droq", q_no,
233                                                       desc_ring_size,
234                                                       RTE_CACHE_LINE_SIZE,
235                                                       socket_id);
236
237         if (droq->desc_ring_mz == NULL) {
238                 lio_dev_err(lio_dev,
239                             "Output queue %d ring alloc failed\n", q_no);
240                 return -1;
241         }
242
243         droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
244         droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
245
246         lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
247                     q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
248         lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
249                     droq->max_count);
250
251         droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
252         if (droq->info_list == NULL) {
253                 lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
254                 goto init_droq_fail;
255         }
256
257         droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
258                                                  (droq->max_count *
259                                                         LIO_DROQ_RECVBUF_SIZE),
260                                                  RTE_CACHE_LINE_SIZE,
261                                                  socket_id);
262         if (droq->recv_buf_list == NULL) {
263                 lio_dev_err(lio_dev,
264                             "Output queue recv buf list alloc failed\n");
265                 goto init_droq_fail;
266         }
267
268         if (lio_droq_setup_ring_buffers(lio_dev, droq))
269                 goto init_droq_fail;
270
271         droq->refill_threshold = c_refill_threshold;
272
273         rte_spinlock_init(&droq->lock);
274
275         lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
276
277         lio_dev->io_qmask.oq |= (1ULL << q_no);
278
279         return 0;
280
281 init_droq_fail:
282         lio_delete_droq(lio_dev, q_no);
283
284         return -1;
285 }
286
287 int
288 lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
289                int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
290 {
291         struct lio_droq *droq;
292
293         PMD_INIT_FUNC_TRACE();
294
295         if (lio_dev->droq[oq_no]) {
296                 lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
297                 return 0;
298         }
299
300         /* Allocate the DS for the new droq. */
301         droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
302                                   RTE_CACHE_LINE_SIZE, socket_id);
303         if (droq == NULL)
304                 return -ENOMEM;
305
306         lio_dev->droq[oq_no] = droq;
307
308         /* Initialize the Droq */
309         if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
310                           socket_id)) {
311                 lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
312                 rte_free(lio_dev->droq[oq_no]);
313                 lio_dev->droq[oq_no] = NULL;
314                 return -ENOMEM;
315         }
316
317         lio_dev->num_oqs++;
318
319         lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
320
321         /* Send credit for octeon output queues. credits are always
322          * sent after the output queue is enabled.
323          */
324         rte_write32(lio_dev->droq[oq_no]->max_count,
325                     lio_dev->droq[oq_no]->pkts_credit_reg);
326         rte_wmb();
327
328         return 0;
329 }
330
331 static inline uint32_t
332 lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
333 {
334         uint32_t buf_cnt = 0;
335
336         while (total_len > (buf_size * buf_cnt))
337                 buf_cnt++;
338
339         return buf_cnt;
340 }
341
342 /* If we were not able to refill all buffers, try to move around
343  * the buffers that were not dispatched.
344  */
345 static inline uint32_t
346 lio_droq_refill_pullup_descs(struct lio_droq *droq,
347                              struct lio_droq_desc *desc_ring)
348 {
349         uint32_t refill_index = droq->refill_idx;
350         uint32_t desc_refilled = 0;
351
352         while (refill_index != droq->read_idx) {
353                 if (droq->recv_buf_list[refill_index].buffer) {
354                         droq->recv_buf_list[droq->refill_idx].buffer =
355                                 droq->recv_buf_list[refill_index].buffer;
356                         desc_ring[droq->refill_idx].buffer_ptr =
357                                 desc_ring[refill_index].buffer_ptr;
358                         droq->recv_buf_list[refill_index].buffer = NULL;
359                         desc_ring[refill_index].buffer_ptr = 0;
360                         do {
361                                 droq->refill_idx = lio_incr_index(
362                                                         droq->refill_idx, 1,
363                                                         droq->max_count);
364                                 desc_refilled++;
365                                 droq->refill_count--;
366                         } while (droq->recv_buf_list[droq->refill_idx].buffer);
367                 }
368                 refill_index = lio_incr_index(refill_index, 1,
369                                               droq->max_count);
370         }       /* while */
371
372         return desc_refilled;
373 }
374
375 /* lio_droq_refill
376  *
377  * @param lio_dev       - pointer to the lio device structure
378  * @param droq          - droq in which descriptors require new buffers.
379  *
380  * Description:
381  *  Called during normal DROQ processing in interrupt mode or by the poll
382  *  thread to refill the descriptors from which buffers were dispatched
383  *  to upper layers. Attempts to allocate new buffers. If that fails, moves
384  *  up buffers (that were not dispatched) to form a contiguous ring.
385  *
386  * Returns:
387  *  No of descriptors refilled.
388  *
389  * Locks:
390  * This routine is called with droq->lock held.
391  */
392 static uint32_t
393 lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
394 {
395         struct lio_droq_desc *desc_ring;
396         uint32_t desc_refilled = 0;
397         void *buf = NULL;
398
399         desc_ring = droq->desc_ring;
400
401         while (droq->refill_count && (desc_refilled < droq->max_count)) {
402                 /* If a valid buffer exists (happens if there is no dispatch),
403                  * reuse the buffer, else allocate.
404                  */
405                 if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
406                         buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
407                         /* If a buffer could not be allocated, no point in
408                          * continuing
409                          */
410                         if (buf == NULL)
411                                 break;
412
413                         droq->recv_buf_list[droq->refill_idx].buffer = buf;
414                 }
415
416                 desc_ring[droq->refill_idx].buffer_ptr =
417                     lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);
418                 /* Reset any previous values in the length field. */
419                 droq->info_list[droq->refill_idx].length = 0;
420
421                 droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
422                                                   droq->max_count);
423                 desc_refilled++;
424                 droq->refill_count--;
425         }
426
427         if (droq->refill_count)
428                 desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
429
430         /* if droq->refill_count
431          * The refill count would not change in pass two. We only moved buffers
432          * to close the gap in the ring, but we would still have the same no. of
433          * buffers to refill.
434          */
435         return desc_refilled;
436 }
437
438 static int
439 lio_droq_fast_process_packet(struct lio_device *lio_dev,
440                              struct lio_droq *droq,
441                              struct rte_mbuf **rx_pkts)
442 {
443         struct rte_mbuf *nicbuf = NULL;
444         struct lio_droq_info *info;
445         uint32_t total_len = 0;
446         int data_total_len = 0;
447         uint32_t pkt_len = 0;
448         union octeon_rh *rh;
449         int data_pkts = 0;
450
451         info = &droq->info_list[droq->read_idx];
452         lio_swap_8B_data((uint64_t *)info, 2);
453
454         if (!info->length)
455                 return -1;
456
457         /* Len of resp hdr in included in the received data len. */
458         info->length -= OCTEON_RH_SIZE;
459         rh = &info->rh;
460
461         total_len += (uint32_t)info->length;
462
463         if (lio_opcode_slow_path(rh)) {
464                 uint32_t buf_cnt;
465
466                 buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
467                                                 (uint32_t)info->length);
468                 droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
469                                                 droq->max_count);
470                 droq->refill_count += buf_cnt;
471         } else {
472                 if (info->length <= droq->buffer_size) {
473                         if (rh->r_dh.has_hash)
474                                 pkt_len = (uint32_t)(info->length - 8);
475                         else
476                                 pkt_len = (uint32_t)info->length;
477
478                         nicbuf = droq->recv_buf_list[droq->read_idx].buffer;
479                         droq->recv_buf_list[droq->read_idx].buffer = NULL;
480                         droq->read_idx = lio_incr_index(
481                                                 droq->read_idx, 1,
482                                                 droq->max_count);
483                         droq->refill_count++;
484
485                         if (likely(nicbuf != NULL)) {
486                                 nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
487                                 nicbuf->nb_segs = 1;
488                                 nicbuf->next = NULL;
489                                 /* We don't have a way to pass flags yet */
490                                 nicbuf->ol_flags = 0;
491                                 if (rh->r_dh.has_hash) {
492                                         uint64_t *hash_ptr;
493
494                                         nicbuf->ol_flags |= PKT_RX_RSS_HASH;
495                                         hash_ptr = rte_pktmbuf_mtod(nicbuf,
496                                                                     uint64_t *);
497                                         lio_swap_8B_data(hash_ptr, 1);
498                                         nicbuf->hash.rss = (uint32_t)*hash_ptr;
499                                         nicbuf->data_off += 8;
500                                 }
501
502                                 nicbuf->pkt_len = pkt_len;
503                                 nicbuf->data_len = pkt_len;
504                                 nicbuf->port = lio_dev->port_id;
505                                 /* Store the mbuf */
506                                 rx_pkts[data_pkts++] = nicbuf;
507                                 data_total_len += pkt_len;
508                         }
509
510                         /* Prefetch buffer pointers when on a cache line
511                          * boundary
512                          */
513                         if ((droq->read_idx & 3) == 0) {
514                                 rte_prefetch0(
515                                     &droq->recv_buf_list[droq->read_idx]);
516                                 rte_prefetch0(
517                                     &droq->info_list[droq->read_idx]);
518                         }
519                 } else {
520                         struct rte_mbuf *first_buf = NULL;
521                         struct rte_mbuf *last_buf = NULL;
522
523                         while (pkt_len < info->length) {
524                                 int cpy_len = 0;
525
526                                 cpy_len = ((pkt_len + droq->buffer_size) >
527                                                 info->length)
528                                                 ? ((uint32_t)info->length -
529                                                         pkt_len)
530                                                 : droq->buffer_size;
531
532                                 nicbuf =
533                                     droq->recv_buf_list[droq->read_idx].buffer;
534                                 droq->recv_buf_list[droq->read_idx].buffer =
535                                     NULL;
536
537                                 if (likely(nicbuf != NULL)) {
538                                         /* Note the first seg */
539                                         if (!pkt_len)
540                                                 first_buf = nicbuf;
541
542                                         nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
543                                         nicbuf->nb_segs = 1;
544                                         nicbuf->next = NULL;
545                                         nicbuf->port = lio_dev->port_id;
546                                         /* We don't have a way to pass
547                                          * flags yet
548                                          */
549                                         nicbuf->ol_flags = 0;
550                                         if ((!pkt_len) && (rh->r_dh.has_hash)) {
551                                                 uint64_t *hash_ptr;
552
553                                                 nicbuf->ol_flags |=
554                                                     PKT_RX_RSS_HASH;
555                                                 hash_ptr = rte_pktmbuf_mtod(
556                                                     nicbuf, uint64_t *);
557                                                 lio_swap_8B_data(hash_ptr, 1);
558                                                 nicbuf->hash.rss =
559                                                     (uint32_t)*hash_ptr;
560                                                 nicbuf->data_off += 8;
561                                                 nicbuf->pkt_len = cpy_len - 8;
562                                                 nicbuf->data_len = cpy_len - 8;
563                                         } else {
564                                                 nicbuf->pkt_len = cpy_len;
565                                                 nicbuf->data_len = cpy_len;
566                                         }
567
568                                         if (pkt_len)
569                                                 first_buf->nb_segs++;
570
571                                         if (last_buf)
572                                                 last_buf->next = nicbuf;
573
574                                         last_buf = nicbuf;
575                                 } else {
576                                         PMD_RX_LOG(lio_dev, ERR, "no buf\n");
577                                 }
578
579                                 pkt_len += cpy_len;
580                                 droq->read_idx = lio_incr_index(
581                                                         droq->read_idx,
582                                                         1, droq->max_count);
583                                 droq->refill_count++;
584
585                                 /* Prefetch buffer pointers when on a
586                                  * cache line boundary
587                                  */
588                                 if ((droq->read_idx & 3) == 0) {
589                                         rte_prefetch0(&droq->recv_buf_list
590                                                               [droq->read_idx]);
591
592                                         rte_prefetch0(
593                                             &droq->info_list[droq->read_idx]);
594                                 }
595                         }
596                         rx_pkts[data_pkts++] = first_buf;
597                         if (rh->r_dh.has_hash)
598                                 data_total_len += (pkt_len - 8);
599                         else
600                                 data_total_len += pkt_len;
601                 }
602
603                 /* Inform upper layer about packet checksum verification */
604                 struct rte_mbuf *m = rx_pkts[data_pkts - 1];
605
606                 if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
607                         m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
608
609                 if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
610                         m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
611         }
612
613         if (droq->refill_count >= droq->refill_threshold) {
614                 int desc_refilled = lio_droq_refill(lio_dev, droq);
615
616                 /* Flush the droq descriptor data to memory to be sure
617                  * that when we update the credits the data in memory is
618                  * accurate.
619                  */
620                 rte_wmb();
621                 rte_write32(desc_refilled, droq->pkts_credit_reg);
622                 /* make sure mmio write completes */
623                 rte_wmb();
624         }
625
626         info->length = 0;
627         info->rh.rh64 = 0;
628
629         return data_pkts;
630 }
631
632 static uint32_t
633 lio_droq_fast_process_packets(struct lio_device *lio_dev,
634                               struct lio_droq *droq,
635                               struct rte_mbuf **rx_pkts,
636                               uint32_t pkts_to_process)
637 {
638         int ret, data_pkts = 0;
639         uint32_t pkt;
640
641         for (pkt = 0; pkt < pkts_to_process; pkt++) {
642                 ret = lio_droq_fast_process_packet(lio_dev, droq,
643                                                    &rx_pkts[data_pkts]);
644                 if (ret < 0) {
645                         lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
646                                     lio_dev->port_id, droq->q_no,
647                                     droq->read_idx, pkts_to_process);
648                         break;
649                 }
650                 data_pkts += ret;
651         }
652
653         rte_atomic64_sub(&droq->pkts_pending, pkt);
654
655         return data_pkts;
656 }
657
658 static inline uint32_t
659 lio_droq_check_hw_for_pkts(struct lio_droq *droq)
660 {
661         uint32_t last_count;
662         uint32_t pkt_count;
663
664         pkt_count = rte_read32(droq->pkts_sent_reg);
665
666         last_count = pkt_count - droq->pkt_count;
667         droq->pkt_count = pkt_count;
668
669         if (last_count)
670                 rte_atomic64_add(&droq->pkts_pending, last_count);
671
672         return last_count;
673 }
674
675 uint16_t
676 lio_dev_recv_pkts(void *rx_queue,
677                   struct rte_mbuf **rx_pkts,
678                   uint16_t budget)
679 {
680         struct lio_droq *droq = rx_queue;
681         struct lio_device *lio_dev = droq->lio_dev;
682         uint32_t pkts_processed = 0;
683         uint32_t pkt_count = 0;
684
685         lio_droq_check_hw_for_pkts(droq);
686
687         pkt_count = rte_atomic64_read(&droq->pkts_pending);
688         if (!pkt_count)
689                 return 0;
690
691         if (pkt_count > budget)
692                 pkt_count = budget;
693
694         /* Grab the lock */
695         rte_spinlock_lock(&droq->lock);
696         pkts_processed = lio_droq_fast_process_packets(lio_dev,
697                                                        droq, rx_pkts,
698                                                        pkt_count);
699
700         if (droq->pkt_count) {
701                 rte_write32(droq->pkt_count, droq->pkts_sent_reg);
702                 droq->pkt_count = 0;
703         }
704
705         /* Release the spin lock */
706         rte_spinlock_unlock(&droq->lock);
707
708         return pkts_processed;
709 }
710
711 void
712 lio_delete_droq_queue(struct lio_device *lio_dev,
713                       int oq_no)
714 {
715         lio_delete_droq(lio_dev, oq_no);
716         lio_dev->num_oqs--;
717         rte_free(lio_dev->droq[oq_no]);
718         lio_dev->droq[oq_no] = NULL;
719 }
720
721 /**
722  *  lio_init_instr_queue()
723  *  @param lio_dev      - pointer to the lio device structure.
724  *  @param txpciq       - queue to be initialized.
725  *
726  *  Called at driver init time for each input queue. iq_conf has the
727  *  configuration parameters for the queue.
728  *
729  *  @return  Success: 0 Failure: -1
730  */
731 static int
732 lio_init_instr_queue(struct lio_device *lio_dev,
733                      union octeon_txpciq txpciq,
734                      uint32_t num_descs, unsigned int socket_id)
735 {
736         uint32_t iq_no = (uint32_t)txpciq.s.q_no;
737         struct lio_instr_queue *iq;
738         uint32_t instr_type;
739         uint32_t q_size;
740
741         instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
742
743         q_size = instr_type * num_descs;
744         iq = lio_dev->instr_queue[iq_no];
745         iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
746                                              "instr_queue", iq_no, q_size,
747                                              RTE_CACHE_LINE_SIZE,
748                                              socket_id);
749         if (iq->iq_mz == NULL) {
750                 lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
751                             iq_no);
752                 return -1;
753         }
754
755         iq->base_addr_dma = iq->iq_mz->phys_addr;
756         iq->base_addr = (uint8_t *)iq->iq_mz->addr;
757
758         iq->max_count = num_descs;
759
760         /* Initialize a list to holds requests that have been posted to Octeon
761          * but has yet to be fetched by octeon
762          */
763         iq->request_list = rte_zmalloc_socket("request_list",
764                                               sizeof(*iq->request_list) *
765                                                         num_descs,
766                                               RTE_CACHE_LINE_SIZE,
767                                               socket_id);
768         if (iq->request_list == NULL) {
769                 lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
770                             iq_no);
771                 lio_dma_zone_free(lio_dev, iq->iq_mz);
772                 return -1;
773         }
774
775         lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
776                     iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
777                     iq->max_count);
778
779         iq->lio_dev = lio_dev;
780         iq->txpciq.txpciq64 = txpciq.txpciq64;
781         iq->fill_cnt = 0;
782         iq->host_write_index = 0;
783         iq->lio_read_index = 0;
784         iq->flush_index = 0;
785
786         rte_atomic64_set(&iq->instr_pending, 0);
787
788         /* Initialize the spinlock for this instruction queue */
789         rte_spinlock_init(&iq->lock);
790         rte_spinlock_init(&iq->post_lock);
791
792         rte_atomic64_clear(&iq->iq_flush_running);
793
794         lio_dev->io_qmask.iq |= (1ULL << iq_no);
795
796         /* Set the 32B/64B mode for each input queue */
797         lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
798         iq->iqcmd_64B = (instr_type == 64);
799
800         lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
801
802         return 0;
803 }
804
805 int
806 lio_setup_instr_queue0(struct lio_device *lio_dev)
807 {
808         union octeon_txpciq txpciq;
809         uint32_t num_descs = 0;
810         uint32_t iq_no = 0;
811
812         num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
813
814         lio_dev->num_iqs = 0;
815
816         lio_dev->instr_queue[0] = rte_zmalloc(NULL,
817                                         sizeof(struct lio_instr_queue), 0);
818         if (lio_dev->instr_queue[0] == NULL)
819                 return -ENOMEM;
820
821         lio_dev->instr_queue[0]->q_index = 0;
822         lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
823         txpciq.txpciq64 = 0;
824         txpciq.s.q_no = iq_no;
825         txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
826         txpciq.s.use_qpg = 0;
827         txpciq.s.qpg = 0;
828         if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
829                 rte_free(lio_dev->instr_queue[0]);
830                 lio_dev->instr_queue[0] = NULL;
831                 return -1;
832         }
833
834         lio_dev->num_iqs++;
835
836         return 0;
837 }
838
839 /**
840  *  lio_delete_instr_queue()
841  *  @param lio_dev      - pointer to the lio device structure.
842  *  @param iq_no        - queue to be deleted.
843  *
844  *  Called at driver unload time for each input queue. Deletes all
845  *  allocated resources for the input queue.
846  */
847 static void
848 lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
849 {
850         struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
851
852         rte_free(iq->request_list);
853         iq->request_list = NULL;
854         lio_dma_zone_free(lio_dev, iq->iq_mz);
855 }
856
857 void
858 lio_free_instr_queue0(struct lio_device *lio_dev)
859 {
860         lio_delete_instr_queue(lio_dev, 0);
861         rte_free(lio_dev->instr_queue[0]);
862         lio_dev->instr_queue[0] = NULL;
863         lio_dev->num_iqs--;
864 }
865
866 /* Return 0 on success, -1 on failure */
867 int
868 lio_setup_iq(struct lio_device *lio_dev, int q_index,
869              union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,
870              unsigned int socket_id)
871 {
872         uint32_t iq_no = (uint32_t)txpciq.s.q_no;
873
874         if (lio_dev->instr_queue[iq_no]) {
875                 lio_dev_dbg(lio_dev, "IQ is in use. Cannot create the IQ: %d again\n",
876                             iq_no);
877                 lio_dev->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
878                 lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
879                 return 0;
880         }
881
882         lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
883                                                 sizeof(struct lio_instr_queue),
884                                                 RTE_CACHE_LINE_SIZE, socket_id);
885         if (lio_dev->instr_queue[iq_no] == NULL)
886                 return -1;
887
888         lio_dev->instr_queue[iq_no]->q_index = q_index;
889         lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
890
891         if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id))
892                 goto release_lio_iq;
893
894         lio_dev->num_iqs++;
895         if (lio_dev->fn_list.enable_io_queues(lio_dev))
896                 goto delete_lio_iq;
897
898         return 0;
899
900 delete_lio_iq:
901         lio_delete_instr_queue(lio_dev, iq_no);
902         lio_dev->num_iqs--;
903 release_lio_iq:
904         rte_free(lio_dev->instr_queue[iq_no]);
905         lio_dev->instr_queue[iq_no] = NULL;
906
907         return -1;
908 }
909
910 static inline void
911 lio_ring_doorbell(struct lio_device *lio_dev,
912                   struct lio_instr_queue *iq)
913 {
914         if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
915                 rte_write32(iq->fill_cnt, iq->doorbell_reg);
916                 /* make sure doorbell write goes through */
917                 rte_wmb();
918                 iq->fill_cnt = 0;
919         }
920 }
921
922 static inline void
923 copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
924 {
925         uint8_t *iqptr, cmdsize;
926
927         cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
928         iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
929
930         rte_memcpy(iqptr, cmd, cmdsize);
931 }
932
933 static inline struct lio_iq_post_status
934 post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
935 {
936         struct lio_iq_post_status st;
937
938         st.status = LIO_IQ_SEND_OK;
939
940         /* This ensures that the read index does not wrap around to the same
941          * position if queue gets full before Octeon could fetch any instr.
942          */
943         if (rte_atomic64_read(&iq->instr_pending) >=
944                         (int32_t)(iq->max_count - 1)) {
945                 st.status = LIO_IQ_SEND_FAILED;
946                 st.index = -1;
947                 return st;
948         }
949
950         if (rte_atomic64_read(&iq->instr_pending) >=
951                         (int32_t)(iq->max_count - 2))
952                 st.status = LIO_IQ_SEND_STOP;
953
954         copy_cmd_into_iq(iq, cmd);
955
956         /* "index" is returned, host_write_index is modified. */
957         st.index = iq->host_write_index;
958         iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
959                                               iq->max_count);
960         iq->fill_cnt++;
961
962         /* Flush the command into memory. We need to be sure the data is in
963          * memory before indicating that the instruction is pending.
964          */
965         rte_wmb();
966
967         rte_atomic64_inc(&iq->instr_pending);
968
969         return st;
970 }
971
972 static inline void
973 lio_add_to_request_list(struct lio_instr_queue *iq,
974                         int idx, void *buf, int reqtype)
975 {
976         iq->request_list[idx].buf = buf;
977         iq->request_list[idx].reqtype = reqtype;
978 }
979
980 static int
981 lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
982                  void *buf, uint32_t datasize __rte_unused, uint32_t reqtype)
983 {
984         struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
985         struct lio_iq_post_status st;
986
987         rte_spinlock_lock(&iq->post_lock);
988
989         st = post_command2(iq, cmd);
990
991         if (st.status != LIO_IQ_SEND_FAILED) {
992                 lio_add_to_request_list(iq, st.index, buf, reqtype);
993                 lio_ring_doorbell(lio_dev, iq);
994         }
995
996         rte_spinlock_unlock(&iq->post_lock);
997
998         return st.status;
999 }
1000
1001 void
1002 lio_prepare_soft_command(struct lio_device *lio_dev,
1003                          struct lio_soft_command *sc, uint8_t opcode,
1004                          uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
1005                          uint64_t ossp1)
1006 {
1007         struct octeon_instr_pki_ih3 *pki_ih3;
1008         struct octeon_instr_ih3 *ih3;
1009         struct octeon_instr_irh *irh;
1010         struct octeon_instr_rdp *rdp;
1011
1012         RTE_ASSERT(opcode <= 15);
1013         RTE_ASSERT(subcode <= 127);
1014
1015         ih3       = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1016
1017         ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
1018
1019         pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
1020
1021         pki_ih3->w      = 1;
1022         pki_ih3->raw    = 1;
1023         pki_ih3->utag   = 1;
1024         pki_ih3->uqpg   = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
1025         pki_ih3->utt    = 1;
1026
1027         pki_ih3->tag    = LIO_CONTROL;
1028         pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
1029         pki_ih3->qpg    = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
1030         pki_ih3->pm     = 0x7;
1031         pki_ih3->sl     = 8;
1032
1033         if (sc->datasize)
1034                 ih3->dlengsz = sc->datasize;
1035
1036         irh             = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1037         irh->opcode     = opcode;
1038         irh->subcode    = subcode;
1039
1040         /* opcode/subcode specific parameters (ossp) */
1041         irh->ossp = irh_ossp;
1042         sc->cmd.cmd3.ossp[0] = ossp0;
1043         sc->cmd.cmd3.ossp[1] = ossp1;
1044
1045         if (sc->rdatasize) {
1046                 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
1047                 rdp->pcie_port = lio_dev->pcie_port;
1048                 rdp->rlen      = sc->rdatasize;
1049                 irh->rflag = 1;
1050                 /* PKI IH3 */
1051                 ih3->fsz    = OCTEON_SOFT_CMD_RESP_IH3;
1052         } else {
1053                 irh->rflag = 0;
1054                 /* PKI IH3 */
1055                 ih3->fsz    = OCTEON_PCI_CMD_O3;
1056         }
1057 }
1058
1059 int
1060 lio_send_soft_command(struct lio_device *lio_dev,
1061                       struct lio_soft_command *sc)
1062 {
1063         struct octeon_instr_ih3 *ih3;
1064         struct octeon_instr_irh *irh;
1065         uint32_t len = 0;
1066
1067         ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1068         if (ih3->dlengsz) {
1069                 RTE_ASSERT(sc->dmadptr);
1070                 sc->cmd.cmd3.dptr = sc->dmadptr;
1071         }
1072
1073         irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1074         if (irh->rflag) {
1075                 RTE_ASSERT(sc->dmarptr);
1076                 RTE_ASSERT(sc->status_word != NULL);
1077                 *sc->status_word = LIO_COMPLETION_WORD_INIT;
1078                 sc->cmd.cmd3.rptr = sc->dmarptr;
1079         }
1080
1081         len = (uint32_t)ih3->dlengsz;
1082
1083         if (sc->wait_time)
1084                 sc->timeout = lio_uptime + sc->wait_time;
1085
1086         return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
1087                                 LIO_REQTYPE_SOFT_COMMAND);
1088 }
1089
1090 int
1091 lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
1092 {
1093         char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
1094         uint16_t buf_size;
1095
1096         buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
1097         snprintf(sc_pool_name, sizeof(sc_pool_name),
1098                  "lio_sc_pool_%u", lio_dev->port_id);
1099         lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
1100                                                 LIO_MAX_SOFT_COMMAND_BUFFERS,
1101                                                 0, 0, buf_size, SOCKET_ID_ANY);
1102         return 0;
1103 }
1104
1105 void
1106 lio_free_sc_buffer_pool(struct lio_device *lio_dev)
1107 {
1108         rte_mempool_free(lio_dev->sc_buf_pool);
1109 }
1110
1111 struct lio_soft_command *
1112 lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
1113                        uint32_t rdatasize, uint32_t ctxsize)
1114 {
1115         uint32_t offset = sizeof(struct lio_soft_command);
1116         struct lio_soft_command *sc;
1117         struct rte_mbuf *m;
1118         uint64_t dma_addr;
1119
1120         RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
1121                    LIO_SOFT_COMMAND_BUFFER_SIZE);
1122
1123         m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
1124         if (m == NULL) {
1125                 lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
1126                 return NULL;
1127         }
1128
1129         /* set rte_mbuf data size and there is only 1 segment */
1130         m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1131         m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1132
1133         /* use rte_mbuf buffer for soft command */
1134         sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
1135         memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
1136         sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
1137         sc->dma_addr = rte_mbuf_data_dma_addr(m);
1138         sc->mbuf = m;
1139
1140         dma_addr = sc->dma_addr;
1141
1142         if (ctxsize) {
1143                 sc->ctxptr = (uint8_t *)sc + offset;
1144                 sc->ctxsize = ctxsize;
1145         }
1146
1147         /* Start data at 128 byte boundary */
1148         offset = (offset + ctxsize + 127) & 0xffffff80;
1149
1150         if (datasize) {
1151                 sc->virtdptr = (uint8_t *)sc + offset;
1152                 sc->dmadptr = dma_addr + offset;
1153                 sc->datasize = datasize;
1154         }
1155
1156         /* Start rdata at 128 byte boundary */
1157         offset = (offset + datasize + 127) & 0xffffff80;
1158
1159         if (rdatasize) {
1160                 RTE_ASSERT(rdatasize >= 16);
1161                 sc->virtrptr = (uint8_t *)sc + offset;
1162                 sc->dmarptr = dma_addr + offset;
1163                 sc->rdatasize = rdatasize;
1164                 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
1165                                                rdatasize - 8);
1166         }
1167
1168         return sc;
1169 }
1170
1171 void
1172 lio_free_soft_command(struct lio_soft_command *sc)
1173 {
1174         rte_pktmbuf_free(sc->mbuf);
1175 }
1176
1177 void
1178 lio_setup_response_list(struct lio_device *lio_dev)
1179 {
1180         STAILQ_INIT(&lio_dev->response_list.head);
1181         rte_spinlock_init(&lio_dev->response_list.lock);
1182         rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
1183 }
1184
1185 int
1186 lio_process_ordered_list(struct lio_device *lio_dev)
1187 {
1188         int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
1189         struct lio_response_list *ordered_sc_list;
1190         struct lio_soft_command *sc;
1191         int request_complete = 0;
1192         uint64_t status64;
1193         uint32_t status;
1194
1195         ordered_sc_list = &lio_dev->response_list;
1196
1197         do {
1198                 rte_spinlock_lock(&ordered_sc_list->lock);
1199
1200                 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
1201                         /* ordered_sc_list is empty; there is
1202                          * nothing to process
1203                          */
1204                         rte_spinlock_unlock(&ordered_sc_list->lock);
1205                         return -1;
1206                 }
1207
1208                 sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
1209                                              struct lio_soft_command, node);
1210
1211                 status = LIO_REQUEST_PENDING;
1212
1213                 /* check if octeon has finished DMA'ing a response
1214                  * to where rptr is pointing to
1215                  */
1216                 status64 = *sc->status_word;
1217
1218                 if (status64 != LIO_COMPLETION_WORD_INIT) {
1219                         /* This logic ensures that all 64b have been written.
1220                          * 1. check byte 0 for non-FF
1221                          * 2. if non-FF, then swap result from BE to host order
1222                          * 3. check byte 7 (swapped to 0) for non-FF
1223                          * 4. if non-FF, use the low 32-bit status code
1224                          * 5. if either byte 0 or byte 7 is FF, don't use status
1225                          */
1226                         if ((status64 & 0xff) != 0xff) {
1227                                 lio_swap_8B_data(&status64, 1);
1228                                 if (((status64 & 0xff) != 0xff)) {
1229                                         /* retrieve 16-bit firmware status */
1230                                         status = (uint32_t)(status64 &
1231                                                             0xffffULL);
1232                                         if (status) {
1233                                                 status =
1234                                                 LIO_FIRMWARE_STATUS_CODE(
1235                                                                         status);
1236                                         } else {
1237                                                 /* i.e. no error */
1238                                                 status = LIO_REQUEST_DONE;
1239                                         }
1240                                 }
1241                         }
1242                 } else if ((sc->timeout && lio_check_timeout(lio_uptime,
1243                                                              sc->timeout))) {
1244                         lio_dev_err(lio_dev,
1245                                     "cmd failed, timeout (%ld, %ld)\n",
1246                                     (long)lio_uptime, (long)sc->timeout);
1247                         status = LIO_REQUEST_TIMEOUT;
1248                 }
1249
1250                 if (status != LIO_REQUEST_PENDING) {
1251                         /* we have received a response or we have timed out.
1252                          * remove node from linked list
1253                          */
1254                         STAILQ_REMOVE(&ordered_sc_list->head,
1255                                       &sc->node, lio_stailq_node, entries);
1256                         rte_atomic64_dec(
1257                             &lio_dev->response_list.pending_req_count);
1258                         rte_spinlock_unlock(&ordered_sc_list->lock);
1259
1260                         if (sc->callback)
1261                                 sc->callback(status, sc->callback_arg);
1262
1263                         request_complete++;
1264                 } else {
1265                         /* no response yet */
1266                         request_complete = 0;
1267                         rte_spinlock_unlock(&ordered_sc_list->lock);
1268                 }
1269
1270                 /* If we hit the Max Ordered requests to process every loop,
1271                  * we quit and let this function be invoked the next time
1272                  * the poll thread runs to process the remaining requests.
1273                  * This function can take up the entire CPU if there is
1274                  * no upper limit to the requests processed.
1275                  */
1276                 if (request_complete >= resp_to_process)
1277                         break;
1278         } while (request_complete);
1279
1280         return 0;
1281 }
1282
1283 static inline struct lio_stailq_node *
1284 list_delete_first_node(struct lio_stailq_head *head)
1285 {
1286         struct lio_stailq_node *node;
1287
1288         if (STAILQ_EMPTY(head))
1289                 node = NULL;
1290         else
1291                 node = STAILQ_FIRST(head);
1292
1293         if (node)
1294                 STAILQ_REMOVE(head, node, lio_stailq_node, entries);
1295
1296         return node;
1297 }
1298
1299 static void
1300 lio_delete_sglist(struct lio_instr_queue *txq)
1301 {
1302         struct lio_device *lio_dev = txq->lio_dev;
1303         int iq_no = txq->q_index;
1304         struct lio_gather *g;
1305
1306         if (lio_dev->glist_head == NULL)
1307                 return;
1308
1309         do {
1310                 g = (struct lio_gather *)list_delete_first_node(
1311                                                 &lio_dev->glist_head[iq_no]);
1312                 if (g) {
1313                         if (g->sg)
1314                                 rte_free(
1315                                     (void *)((unsigned long)g->sg - g->adjust));
1316                         rte_free(g);
1317                 }
1318         } while (g);
1319 }
1320
1321 /**
1322  * \brief Setup gather lists
1323  * @param lio per-network private data
1324  */
1325 int
1326 lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
1327                   int fw_mapped_iq, int num_descs, unsigned int socket_id)
1328 {
1329         struct lio_gather *g;
1330         int i;
1331
1332         rte_spinlock_init(&lio_dev->glist_lock[iq_no]);
1333
1334         STAILQ_INIT(&lio_dev->glist_head[iq_no]);
1335
1336         for (i = 0; i < num_descs; i++) {
1337                 g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,
1338                                        socket_id);
1339                 if (g == NULL) {
1340                         lio_dev_err(lio_dev,
1341                                     "lio_gather memory allocation failed for qno %d\n",
1342                                     iq_no);
1343                         break;
1344                 }
1345
1346                 g->sg_size =
1347                     ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);
1348
1349                 g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,
1350                                            RTE_CACHE_LINE_SIZE, socket_id);
1351                 if (g->sg == NULL) {
1352                         lio_dev_err(lio_dev,
1353                                     "sg list memory allocation failed for qno %d\n",
1354                                     iq_no);
1355                         rte_free(g);
1356                         break;
1357                 }
1358
1359                 /* The gather component should be aligned on 64-bit boundary */
1360                 if (((unsigned long)g->sg) & 7) {
1361                         g->adjust = 8 - (((unsigned long)g->sg) & 7);
1362                         g->sg =
1363                             (struct lio_sg_entry *)((unsigned long)g->sg +
1364                                                        g->adjust);
1365                 }
1366
1367                 STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,
1368                                    entries);
1369         }
1370
1371         if (i != num_descs) {
1372                 lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);
1373                 return -ENOMEM;
1374         }
1375
1376         return 0;
1377 }
1378
1379 void
1380 lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
1381 {
1382         lio_delete_instr_queue(lio_dev, iq_no);
1383         rte_free(lio_dev->instr_queue[iq_no]);
1384         lio_dev->instr_queue[iq_no] = NULL;
1385         lio_dev->num_iqs--;
1386 }