net/liquidio: initialize Rx queue
[dpdk.git] / drivers / net / liquidio / lio_rxtx.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
37
38 #include "lio_logs.h"
39 #include "lio_struct.h"
40 #include "lio_ethdev.h"
41 #include "lio_rxtx.h"
42
43 static void
44 lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
45 {
46         uint32_t count = 0;
47
48         do {
49                 count += droq->buffer_size;
50         } while (count < LIO_MAX_RX_PKTLEN);
51 }
52
53 static void
54 lio_droq_reset_indices(struct lio_droq *droq)
55 {
56         droq->read_idx  = 0;
57         droq->write_idx = 0;
58         droq->refill_idx = 0;
59         droq->refill_count = 0;
60         rte_atomic64_set(&droq->pkts_pending, 0);
61 }
62
63 static void
64 lio_droq_destroy_ring_buffers(struct lio_droq *droq)
65 {
66         uint32_t i;
67
68         for (i = 0; i < droq->max_count; i++) {
69                 if (droq->recv_buf_list[i].buffer) {
70                         rte_pktmbuf_free((struct rte_mbuf *)
71                                          droq->recv_buf_list[i].buffer);
72                         droq->recv_buf_list[i].buffer = NULL;
73                 }
74         }
75
76         lio_droq_reset_indices(droq);
77 }
78
79 static void *
80 lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
81 {
82         struct lio_droq *droq = lio_dev->droq[q_no];
83         struct rte_mempool *mpool = droq->mpool;
84         struct rte_mbuf *m;
85
86         m = rte_pktmbuf_alloc(mpool);
87         if (m == NULL) {
88                 lio_dev_err(lio_dev, "Cannot allocate\n");
89                 return NULL;
90         }
91
92         rte_mbuf_refcnt_set(m, 1);
93         m->next = NULL;
94         m->data_off = RTE_PKTMBUF_HEADROOM;
95         m->nb_segs = 1;
96         m->pool = mpool;
97
98         return m;
99 }
100
101 static int
102 lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
103                             struct lio_droq *droq)
104 {
105         struct lio_droq_desc *desc_ring = droq->desc_ring;
106         uint32_t i;
107         void *buf;
108
109         for (i = 0; i < droq->max_count; i++) {
110                 buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
111                 if (buf == NULL) {
112                         lio_dev_err(lio_dev, "buffer alloc failed\n");
113                         lio_droq_destroy_ring_buffers(droq);
114                         return -ENOMEM;
115                 }
116
117                 droq->recv_buf_list[i].buffer = buf;
118                 droq->info_list[i].length = 0;
119
120                 /* map ring buffers into memory */
121                 desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
122                 desc_ring[i].buffer_ptr =
123                         lio_map_ring(droq->recv_buf_list[i].buffer);
124         }
125
126         lio_droq_reset_indices(droq);
127
128         lio_droq_compute_max_packet_bufs(droq);
129
130         return 0;
131 }
132
133 static void
134 lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
135 {
136         const struct rte_memzone *mz_tmp;
137         int ret = 0;
138
139         if (mz == NULL) {
140                 lio_dev_err(lio_dev, "Memzone NULL\n");
141                 return;
142         }
143
144         mz_tmp = rte_memzone_lookup(mz->name);
145         if (mz_tmp == NULL) {
146                 lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
147                 return;
148         }
149
150         ret = rte_memzone_free(mz);
151         if (ret)
152                 lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
153 }
154
155 /**
156  *  Frees the space for descriptor ring for the droq.
157  *
158  *  @param lio_dev      - pointer to the lio device structure
159  *  @param q_no         - droq no.
160  */
161 static void
162 lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
163 {
164         struct lio_droq *droq = lio_dev->droq[q_no];
165
166         lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
167
168         lio_droq_destroy_ring_buffers(droq);
169         rte_free(droq->recv_buf_list);
170         droq->recv_buf_list = NULL;
171         lio_dma_zone_free(lio_dev, droq->info_mz);
172         lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
173
174         memset(droq, 0, LIO_DROQ_SIZE);
175 }
176
177 static void *
178 lio_alloc_info_buffer(struct lio_device *lio_dev,
179                       struct lio_droq *droq, unsigned int socket_id)
180 {
181         droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
182                                                  "info_list", droq->q_no,
183                                                  (droq->max_count *
184                                                         LIO_DROQ_INFO_SIZE),
185                                                  RTE_CACHE_LINE_SIZE,
186                                                  socket_id);
187
188         if (droq->info_mz == NULL)
189                 return NULL;
190
191         droq->info_list_dma = droq->info_mz->phys_addr;
192         droq->info_alloc_size = droq->info_mz->len;
193         droq->info_base_addr = (size_t)droq->info_mz->addr;
194
195         return droq->info_mz->addr;
196 }
197
198 /**
199  *  Allocates space for the descriptor ring for the droq and
200  *  sets the base addr, num desc etc in Octeon registers.
201  *
202  * @param lio_dev       - pointer to the lio device structure
203  * @param q_no          - droq no.
204  * @param app_ctx       - pointer to application context
205  * @return Success: 0   Failure: -1
206  */
207 static int
208 lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
209               uint32_t num_descs, uint32_t desc_size,
210               struct rte_mempool *mpool, unsigned int socket_id)
211 {
212         uint32_t c_refill_threshold;
213         uint32_t desc_ring_size;
214         struct lio_droq *droq;
215
216         lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
217
218         droq = lio_dev->droq[q_no];
219         droq->lio_dev = lio_dev;
220         droq->q_no = q_no;
221         droq->mpool = mpool;
222
223         c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
224
225         droq->max_count = num_descs;
226         droq->buffer_size = desc_size;
227
228         desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
229         droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
230                                                       "droq", q_no,
231                                                       desc_ring_size,
232                                                       RTE_CACHE_LINE_SIZE,
233                                                       socket_id);
234
235         if (droq->desc_ring_mz == NULL) {
236                 lio_dev_err(lio_dev,
237                             "Output queue %d ring alloc failed\n", q_no);
238                 return -1;
239         }
240
241         droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
242         droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
243
244         lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
245                     q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
246         lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
247                     droq->max_count);
248
249         droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
250         if (droq->info_list == NULL) {
251                 lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
252                 goto init_droq_fail;
253         }
254
255         droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
256                                                  (droq->max_count *
257                                                         LIO_DROQ_RECVBUF_SIZE),
258                                                  RTE_CACHE_LINE_SIZE,
259                                                  socket_id);
260         if (droq->recv_buf_list == NULL) {
261                 lio_dev_err(lio_dev,
262                             "Output queue recv buf list alloc failed\n");
263                 goto init_droq_fail;
264         }
265
266         if (lio_droq_setup_ring_buffers(lio_dev, droq))
267                 goto init_droq_fail;
268
269         droq->refill_threshold = c_refill_threshold;
270
271         rte_spinlock_init(&droq->lock);
272
273         lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
274
275         lio_dev->io_qmask.oq |= (1ULL << q_no);
276
277         return 0;
278
279 init_droq_fail:
280         lio_delete_droq(lio_dev, q_no);
281
282         return -1;
283 }
284
285 int
286 lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
287                int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
288 {
289         struct lio_droq *droq;
290
291         PMD_INIT_FUNC_TRACE();
292
293         if (lio_dev->droq[oq_no]) {
294                 lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
295                 return 0;
296         }
297
298         /* Allocate the DS for the new droq. */
299         droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
300                                   RTE_CACHE_LINE_SIZE, socket_id);
301         if (droq == NULL)
302                 return -ENOMEM;
303
304         lio_dev->droq[oq_no] = droq;
305
306         /* Initialize the Droq */
307         if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
308                           socket_id)) {
309                 lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
310                 rte_free(lio_dev->droq[oq_no]);
311                 lio_dev->droq[oq_no] = NULL;
312                 return -ENOMEM;
313         }
314
315         lio_dev->num_oqs++;
316
317         lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
318
319         /* Send credit for octeon output queues. credits are always
320          * sent after the output queue is enabled.
321          */
322         rte_write32(lio_dev->droq[oq_no]->max_count,
323                     lio_dev->droq[oq_no]->pkts_credit_reg);
324         rte_wmb();
325
326         return 0;
327 }
328
329 /**
330  *  lio_init_instr_queue()
331  *  @param lio_dev      - pointer to the lio device structure.
332  *  @param txpciq       - queue to be initialized.
333  *
334  *  Called at driver init time for each input queue. iq_conf has the
335  *  configuration parameters for the queue.
336  *
337  *  @return  Success: 0 Failure: -1
338  */
339 static int
340 lio_init_instr_queue(struct lio_device *lio_dev,
341                      union octeon_txpciq txpciq,
342                      uint32_t num_descs, unsigned int socket_id)
343 {
344         uint32_t iq_no = (uint32_t)txpciq.s.q_no;
345         struct lio_instr_queue *iq;
346         uint32_t instr_type;
347         uint32_t q_size;
348
349         instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
350
351         q_size = instr_type * num_descs;
352         iq = lio_dev->instr_queue[iq_no];
353         iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
354                                              "instr_queue", iq_no, q_size,
355                                              RTE_CACHE_LINE_SIZE,
356                                              socket_id);
357         if (iq->iq_mz == NULL) {
358                 lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
359                             iq_no);
360                 return -1;
361         }
362
363         iq->base_addr_dma = iq->iq_mz->phys_addr;
364         iq->base_addr = (uint8_t *)iq->iq_mz->addr;
365
366         iq->max_count = num_descs;
367
368         /* Initialize a list to holds requests that have been posted to Octeon
369          * but has yet to be fetched by octeon
370          */
371         iq->request_list = rte_zmalloc_socket("request_list",
372                                               sizeof(*iq->request_list) *
373                                                         num_descs,
374                                               RTE_CACHE_LINE_SIZE,
375                                               socket_id);
376         if (iq->request_list == NULL) {
377                 lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
378                             iq_no);
379                 lio_dma_zone_free(lio_dev, iq->iq_mz);
380                 return -1;
381         }
382
383         lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
384                     iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
385                     iq->max_count);
386
387         iq->lio_dev = lio_dev;
388         iq->txpciq.txpciq64 = txpciq.txpciq64;
389         iq->fill_cnt = 0;
390         iq->host_write_index = 0;
391         iq->lio_read_index = 0;
392         iq->flush_index = 0;
393
394         rte_atomic64_set(&iq->instr_pending, 0);
395
396         /* Initialize the spinlock for this instruction queue */
397         rte_spinlock_init(&iq->lock);
398         rte_spinlock_init(&iq->post_lock);
399
400         rte_atomic64_clear(&iq->iq_flush_running);
401
402         lio_dev->io_qmask.iq |= (1ULL << iq_no);
403
404         /* Set the 32B/64B mode for each input queue */
405         lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
406         iq->iqcmd_64B = (instr_type == 64);
407
408         lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
409
410         return 0;
411 }
412
413 int
414 lio_setup_instr_queue0(struct lio_device *lio_dev)
415 {
416         union octeon_txpciq txpciq;
417         uint32_t num_descs = 0;
418         uint32_t iq_no = 0;
419
420         num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
421
422         lio_dev->num_iqs = 0;
423
424         lio_dev->instr_queue[0] = rte_zmalloc(NULL,
425                                         sizeof(struct lio_instr_queue), 0);
426         if (lio_dev->instr_queue[0] == NULL)
427                 return -ENOMEM;
428
429         lio_dev->instr_queue[0]->q_index = 0;
430         lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
431         txpciq.txpciq64 = 0;
432         txpciq.s.q_no = iq_no;
433         txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
434         txpciq.s.use_qpg = 0;
435         txpciq.s.qpg = 0;
436         if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
437                 rte_free(lio_dev->instr_queue[0]);
438                 lio_dev->instr_queue[0] = NULL;
439                 return -1;
440         }
441
442         lio_dev->num_iqs++;
443
444         return 0;
445 }
446
447 /**
448  *  lio_delete_instr_queue()
449  *  @param lio_dev      - pointer to the lio device structure.
450  *  @param iq_no        - queue to be deleted.
451  *
452  *  Called at driver unload time for each input queue. Deletes all
453  *  allocated resources for the input queue.
454  */
455 static void
456 lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
457 {
458         struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
459
460         rte_free(iq->request_list);
461         iq->request_list = NULL;
462         lio_dma_zone_free(lio_dev, iq->iq_mz);
463 }
464
465 void
466 lio_free_instr_queue0(struct lio_device *lio_dev)
467 {
468         lio_delete_instr_queue(lio_dev, 0);
469         rte_free(lio_dev->instr_queue[0]);
470         lio_dev->instr_queue[0] = NULL;
471         lio_dev->num_iqs--;
472 }
473
474 static inline void
475 lio_ring_doorbell(struct lio_device *lio_dev,
476                   struct lio_instr_queue *iq)
477 {
478         if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
479                 rte_write32(iq->fill_cnt, iq->doorbell_reg);
480                 /* make sure doorbell write goes through */
481                 rte_wmb();
482                 iq->fill_cnt = 0;
483         }
484 }
485
486 static inline void
487 copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
488 {
489         uint8_t *iqptr, cmdsize;
490
491         cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
492         iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
493
494         rte_memcpy(iqptr, cmd, cmdsize);
495 }
496
497 static inline struct lio_iq_post_status
498 post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
499 {
500         struct lio_iq_post_status st;
501
502         st.status = LIO_IQ_SEND_OK;
503
504         /* This ensures that the read index does not wrap around to the same
505          * position if queue gets full before Octeon could fetch any instr.
506          */
507         if (rte_atomic64_read(&iq->instr_pending) >=
508                         (int32_t)(iq->max_count - 1)) {
509                 st.status = LIO_IQ_SEND_FAILED;
510                 st.index = -1;
511                 return st;
512         }
513
514         if (rte_atomic64_read(&iq->instr_pending) >=
515                         (int32_t)(iq->max_count - 2))
516                 st.status = LIO_IQ_SEND_STOP;
517
518         copy_cmd_into_iq(iq, cmd);
519
520         /* "index" is returned, host_write_index is modified. */
521         st.index = iq->host_write_index;
522         iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
523                                               iq->max_count);
524         iq->fill_cnt++;
525
526         /* Flush the command into memory. We need to be sure the data is in
527          * memory before indicating that the instruction is pending.
528          */
529         rte_wmb();
530
531         rte_atomic64_inc(&iq->instr_pending);
532
533         return st;
534 }
535
536 static inline void
537 lio_add_to_request_list(struct lio_instr_queue *iq,
538                         int idx, void *buf, int reqtype)
539 {
540         iq->request_list[idx].buf = buf;
541         iq->request_list[idx].reqtype = reqtype;
542 }
543
544 static int
545 lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
546                  void *buf, uint32_t datasize __rte_unused, uint32_t reqtype)
547 {
548         struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
549         struct lio_iq_post_status st;
550
551         rte_spinlock_lock(&iq->post_lock);
552
553         st = post_command2(iq, cmd);
554
555         if (st.status != LIO_IQ_SEND_FAILED) {
556                 lio_add_to_request_list(iq, st.index, buf, reqtype);
557                 lio_ring_doorbell(lio_dev, iq);
558         }
559
560         rte_spinlock_unlock(&iq->post_lock);
561
562         return st.status;
563 }
564
565 void
566 lio_prepare_soft_command(struct lio_device *lio_dev,
567                          struct lio_soft_command *sc, uint8_t opcode,
568                          uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
569                          uint64_t ossp1)
570 {
571         struct octeon_instr_pki_ih3 *pki_ih3;
572         struct octeon_instr_ih3 *ih3;
573         struct octeon_instr_irh *irh;
574         struct octeon_instr_rdp *rdp;
575
576         RTE_ASSERT(opcode <= 15);
577         RTE_ASSERT(subcode <= 127);
578
579         ih3       = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
580
581         ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
582
583         pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
584
585         pki_ih3->w      = 1;
586         pki_ih3->raw    = 1;
587         pki_ih3->utag   = 1;
588         pki_ih3->uqpg   = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
589         pki_ih3->utt    = 1;
590
591         pki_ih3->tag    = LIO_CONTROL;
592         pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
593         pki_ih3->qpg    = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
594         pki_ih3->pm     = 0x7;
595         pki_ih3->sl     = 8;
596
597         if (sc->datasize)
598                 ih3->dlengsz = sc->datasize;
599
600         irh             = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
601         irh->opcode     = opcode;
602         irh->subcode    = subcode;
603
604         /* opcode/subcode specific parameters (ossp) */
605         irh->ossp = irh_ossp;
606         sc->cmd.cmd3.ossp[0] = ossp0;
607         sc->cmd.cmd3.ossp[1] = ossp1;
608
609         if (sc->rdatasize) {
610                 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
611                 rdp->pcie_port = lio_dev->pcie_port;
612                 rdp->rlen      = sc->rdatasize;
613                 irh->rflag = 1;
614                 /* PKI IH3 */
615                 ih3->fsz    = OCTEON_SOFT_CMD_RESP_IH3;
616         } else {
617                 irh->rflag = 0;
618                 /* PKI IH3 */
619                 ih3->fsz    = OCTEON_PCI_CMD_O3;
620         }
621 }
622
623 int
624 lio_send_soft_command(struct lio_device *lio_dev,
625                       struct lio_soft_command *sc)
626 {
627         struct octeon_instr_ih3 *ih3;
628         struct octeon_instr_irh *irh;
629         uint32_t len = 0;
630
631         ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
632         if (ih3->dlengsz) {
633                 RTE_ASSERT(sc->dmadptr);
634                 sc->cmd.cmd3.dptr = sc->dmadptr;
635         }
636
637         irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
638         if (irh->rflag) {
639                 RTE_ASSERT(sc->dmarptr);
640                 RTE_ASSERT(sc->status_word != NULL);
641                 *sc->status_word = LIO_COMPLETION_WORD_INIT;
642                 sc->cmd.cmd3.rptr = sc->dmarptr;
643         }
644
645         len = (uint32_t)ih3->dlengsz;
646
647         if (sc->wait_time)
648                 sc->timeout = lio_uptime + sc->wait_time;
649
650         return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
651                                 LIO_REQTYPE_SOFT_COMMAND);
652 }
653
654 int
655 lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
656 {
657         char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
658         uint16_t buf_size;
659
660         buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
661         snprintf(sc_pool_name, sizeof(sc_pool_name),
662                  "lio_sc_pool_%u", lio_dev->port_id);
663         lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
664                                                 LIO_MAX_SOFT_COMMAND_BUFFERS,
665                                                 0, 0, buf_size, SOCKET_ID_ANY);
666         return 0;
667 }
668
669 void
670 lio_free_sc_buffer_pool(struct lio_device *lio_dev)
671 {
672         rte_mempool_free(lio_dev->sc_buf_pool);
673 }
674
675 struct lio_soft_command *
676 lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
677                        uint32_t rdatasize, uint32_t ctxsize)
678 {
679         uint32_t offset = sizeof(struct lio_soft_command);
680         struct lio_soft_command *sc;
681         struct rte_mbuf *m;
682         uint64_t dma_addr;
683
684         RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
685                    LIO_SOFT_COMMAND_BUFFER_SIZE);
686
687         m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
688         if (m == NULL) {
689                 lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
690                 return NULL;
691         }
692
693         /* set rte_mbuf data size and there is only 1 segment */
694         m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
695         m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
696
697         /* use rte_mbuf buffer for soft command */
698         sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
699         memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
700         sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
701         sc->dma_addr = rte_mbuf_data_dma_addr(m);
702         sc->mbuf = m;
703
704         dma_addr = sc->dma_addr;
705
706         if (ctxsize) {
707                 sc->ctxptr = (uint8_t *)sc + offset;
708                 sc->ctxsize = ctxsize;
709         }
710
711         /* Start data at 128 byte boundary */
712         offset = (offset + ctxsize + 127) & 0xffffff80;
713
714         if (datasize) {
715                 sc->virtdptr = (uint8_t *)sc + offset;
716                 sc->dmadptr = dma_addr + offset;
717                 sc->datasize = datasize;
718         }
719
720         /* Start rdata at 128 byte boundary */
721         offset = (offset + datasize + 127) & 0xffffff80;
722
723         if (rdatasize) {
724                 RTE_ASSERT(rdatasize >= 16);
725                 sc->virtrptr = (uint8_t *)sc + offset;
726                 sc->dmarptr = dma_addr + offset;
727                 sc->rdatasize = rdatasize;
728                 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
729                                                rdatasize - 8);
730         }
731
732         return sc;
733 }
734
735 void
736 lio_free_soft_command(struct lio_soft_command *sc)
737 {
738         rte_pktmbuf_free(sc->mbuf);
739 }
740
741 void
742 lio_setup_response_list(struct lio_device *lio_dev)
743 {
744         STAILQ_INIT(&lio_dev->response_list.head);
745         rte_spinlock_init(&lio_dev->response_list.lock);
746         rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
747 }
748
749 int
750 lio_process_ordered_list(struct lio_device *lio_dev)
751 {
752         int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
753         struct lio_response_list *ordered_sc_list;
754         struct lio_soft_command *sc;
755         int request_complete = 0;
756         uint64_t status64;
757         uint32_t status;
758
759         ordered_sc_list = &lio_dev->response_list;
760
761         do {
762                 rte_spinlock_lock(&ordered_sc_list->lock);
763
764                 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
765                         /* ordered_sc_list is empty; there is
766                          * nothing to process
767                          */
768                         rte_spinlock_unlock(&ordered_sc_list->lock);
769                         return -1;
770                 }
771
772                 sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
773                                              struct lio_soft_command, node);
774
775                 status = LIO_REQUEST_PENDING;
776
777                 /* check if octeon has finished DMA'ing a response
778                  * to where rptr is pointing to
779                  */
780                 status64 = *sc->status_word;
781
782                 if (status64 != LIO_COMPLETION_WORD_INIT) {
783                         /* This logic ensures that all 64b have been written.
784                          * 1. check byte 0 for non-FF
785                          * 2. if non-FF, then swap result from BE to host order
786                          * 3. check byte 7 (swapped to 0) for non-FF
787                          * 4. if non-FF, use the low 32-bit status code
788                          * 5. if either byte 0 or byte 7 is FF, don't use status
789                          */
790                         if ((status64 & 0xff) != 0xff) {
791                                 lio_swap_8B_data(&status64, 1);
792                                 if (((status64 & 0xff) != 0xff)) {
793                                         /* retrieve 16-bit firmware status */
794                                         status = (uint32_t)(status64 &
795                                                             0xffffULL);
796                                         if (status) {
797                                                 status =
798                                                 LIO_FIRMWARE_STATUS_CODE(
799                                                                         status);
800                                         } else {
801                                                 /* i.e. no error */
802                                                 status = LIO_REQUEST_DONE;
803                                         }
804                                 }
805                         }
806                 } else if ((sc->timeout && lio_check_timeout(lio_uptime,
807                                                              sc->timeout))) {
808                         lio_dev_err(lio_dev,
809                                     "cmd failed, timeout (%ld, %ld)\n",
810                                     (long)lio_uptime, (long)sc->timeout);
811                         status = LIO_REQUEST_TIMEOUT;
812                 }
813
814                 if (status != LIO_REQUEST_PENDING) {
815                         /* we have received a response or we have timed out.
816                          * remove node from linked list
817                          */
818                         STAILQ_REMOVE(&ordered_sc_list->head,
819                                       &sc->node, lio_stailq_node, entries);
820                         rte_atomic64_dec(
821                             &lio_dev->response_list.pending_req_count);
822                         rte_spinlock_unlock(&ordered_sc_list->lock);
823
824                         if (sc->callback)
825                                 sc->callback(status, sc->callback_arg);
826
827                         request_complete++;
828                 } else {
829                         /* no response yet */
830                         request_complete = 0;
831                         rte_spinlock_unlock(&ordered_sc_list->lock);
832                 }
833
834                 /* If we hit the Max Ordered requests to process every loop,
835                  * we quit and let this function be invoked the next time
836                  * the poll thread runs to process the remaining requests.
837                  * This function can take up the entire CPU if there is
838                  * no upper limit to the requests processed.
839                  */
840                 if (request_complete >= resp_to_process)
841                         break;
842         } while (request_complete);
843
844         return 0;
845 }