4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
39 #include "lio_struct.h"
40 #include "lio_ethdev.h"
44 lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
49 count += droq->buffer_size;
50 } while (count < LIO_MAX_RX_PKTLEN);
54 lio_droq_reset_indices(struct lio_droq *droq)
59 droq->refill_count = 0;
60 rte_atomic64_set(&droq->pkts_pending, 0);
64 lio_droq_destroy_ring_buffers(struct lio_droq *droq)
68 for (i = 0; i < droq->max_count; i++) {
69 if (droq->recv_buf_list[i].buffer) {
70 rte_pktmbuf_free((struct rte_mbuf *)
71 droq->recv_buf_list[i].buffer);
72 droq->recv_buf_list[i].buffer = NULL;
76 lio_droq_reset_indices(droq);
80 lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
82 struct lio_droq *droq = lio_dev->droq[q_no];
83 struct rte_mempool *mpool = droq->mpool;
86 m = rte_pktmbuf_alloc(mpool);
88 lio_dev_err(lio_dev, "Cannot allocate\n");
92 rte_mbuf_refcnt_set(m, 1);
94 m->data_off = RTE_PKTMBUF_HEADROOM;
102 lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
103 struct lio_droq *droq)
105 struct lio_droq_desc *desc_ring = droq->desc_ring;
109 for (i = 0; i < droq->max_count; i++) {
110 buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
112 lio_dev_err(lio_dev, "buffer alloc failed\n");
113 lio_droq_destroy_ring_buffers(droq);
117 droq->recv_buf_list[i].buffer = buf;
118 droq->info_list[i].length = 0;
120 /* map ring buffers into memory */
121 desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
122 desc_ring[i].buffer_ptr =
123 lio_map_ring(droq->recv_buf_list[i].buffer);
126 lio_droq_reset_indices(droq);
128 lio_droq_compute_max_packet_bufs(droq);
134 lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
136 const struct rte_memzone *mz_tmp;
140 lio_dev_err(lio_dev, "Memzone NULL\n");
144 mz_tmp = rte_memzone_lookup(mz->name);
145 if (mz_tmp == NULL) {
146 lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
150 ret = rte_memzone_free(mz);
152 lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
156 * Frees the space for descriptor ring for the droq.
158 * @param lio_dev - pointer to the lio device structure
159 * @param q_no - droq no.
162 lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
164 struct lio_droq *droq = lio_dev->droq[q_no];
166 lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
168 lio_droq_destroy_ring_buffers(droq);
169 rte_free(droq->recv_buf_list);
170 droq->recv_buf_list = NULL;
171 lio_dma_zone_free(lio_dev, droq->info_mz);
172 lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
174 memset(droq, 0, LIO_DROQ_SIZE);
178 lio_alloc_info_buffer(struct lio_device *lio_dev,
179 struct lio_droq *droq, unsigned int socket_id)
181 droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
182 "info_list", droq->q_no,
188 if (droq->info_mz == NULL)
191 droq->info_list_dma = droq->info_mz->phys_addr;
192 droq->info_alloc_size = droq->info_mz->len;
193 droq->info_base_addr = (size_t)droq->info_mz->addr;
195 return droq->info_mz->addr;
199 * Allocates space for the descriptor ring for the droq and
200 * sets the base addr, num desc etc in Octeon registers.
202 * @param lio_dev - pointer to the lio device structure
203 * @param q_no - droq no.
204 * @param app_ctx - pointer to application context
205 * @return Success: 0 Failure: -1
208 lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
209 uint32_t num_descs, uint32_t desc_size,
210 struct rte_mempool *mpool, unsigned int socket_id)
212 uint32_t c_refill_threshold;
213 uint32_t desc_ring_size;
214 struct lio_droq *droq;
216 lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
218 droq = lio_dev->droq[q_no];
219 droq->lio_dev = lio_dev;
223 c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
225 droq->max_count = num_descs;
226 droq->buffer_size = desc_size;
228 desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
229 droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
235 if (droq->desc_ring_mz == NULL) {
237 "Output queue %d ring alloc failed\n", q_no);
241 droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
242 droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
244 lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
245 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
246 lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
249 droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
250 if (droq->info_list == NULL) {
251 lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
255 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
257 LIO_DROQ_RECVBUF_SIZE),
260 if (droq->recv_buf_list == NULL) {
262 "Output queue recv buf list alloc failed\n");
266 if (lio_droq_setup_ring_buffers(lio_dev, droq))
269 droq->refill_threshold = c_refill_threshold;
271 rte_spinlock_init(&droq->lock);
273 lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
275 lio_dev->io_qmask.oq |= (1ULL << q_no);
280 lio_delete_droq(lio_dev, q_no);
286 lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
287 int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
289 struct lio_droq *droq;
291 PMD_INIT_FUNC_TRACE();
293 if (lio_dev->droq[oq_no]) {
294 lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
298 /* Allocate the DS for the new droq. */
299 droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
300 RTE_CACHE_LINE_SIZE, socket_id);
304 lio_dev->droq[oq_no] = droq;
306 /* Initialize the Droq */
307 if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
309 lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
310 rte_free(lio_dev->droq[oq_no]);
311 lio_dev->droq[oq_no] = NULL;
317 lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
319 /* Send credit for octeon output queues. credits are always
320 * sent after the output queue is enabled.
322 rte_write32(lio_dev->droq[oq_no]->max_count,
323 lio_dev->droq[oq_no]->pkts_credit_reg);
330 * lio_init_instr_queue()
331 * @param lio_dev - pointer to the lio device structure.
332 * @param txpciq - queue to be initialized.
334 * Called at driver init time for each input queue. iq_conf has the
335 * configuration parameters for the queue.
337 * @return Success: 0 Failure: -1
340 lio_init_instr_queue(struct lio_device *lio_dev,
341 union octeon_txpciq txpciq,
342 uint32_t num_descs, unsigned int socket_id)
344 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
345 struct lio_instr_queue *iq;
349 instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
351 q_size = instr_type * num_descs;
352 iq = lio_dev->instr_queue[iq_no];
353 iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
354 "instr_queue", iq_no, q_size,
357 if (iq->iq_mz == NULL) {
358 lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
363 iq->base_addr_dma = iq->iq_mz->phys_addr;
364 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
366 iq->max_count = num_descs;
368 /* Initialize a list to holds requests that have been posted to Octeon
369 * but has yet to be fetched by octeon
371 iq->request_list = rte_zmalloc_socket("request_list",
372 sizeof(*iq->request_list) *
376 if (iq->request_list == NULL) {
377 lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
379 lio_dma_zone_free(lio_dev, iq->iq_mz);
383 lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
384 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
387 iq->lio_dev = lio_dev;
388 iq->txpciq.txpciq64 = txpciq.txpciq64;
390 iq->host_write_index = 0;
391 iq->lio_read_index = 0;
394 rte_atomic64_set(&iq->instr_pending, 0);
396 /* Initialize the spinlock for this instruction queue */
397 rte_spinlock_init(&iq->lock);
398 rte_spinlock_init(&iq->post_lock);
400 rte_atomic64_clear(&iq->iq_flush_running);
402 lio_dev->io_qmask.iq |= (1ULL << iq_no);
404 /* Set the 32B/64B mode for each input queue */
405 lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
406 iq->iqcmd_64B = (instr_type == 64);
408 lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
414 lio_setup_instr_queue0(struct lio_device *lio_dev)
416 union octeon_txpciq txpciq;
417 uint32_t num_descs = 0;
420 num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
422 lio_dev->num_iqs = 0;
424 lio_dev->instr_queue[0] = rte_zmalloc(NULL,
425 sizeof(struct lio_instr_queue), 0);
426 if (lio_dev->instr_queue[0] == NULL)
429 lio_dev->instr_queue[0]->q_index = 0;
430 lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
432 txpciq.s.q_no = iq_no;
433 txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
434 txpciq.s.use_qpg = 0;
436 if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
437 rte_free(lio_dev->instr_queue[0]);
438 lio_dev->instr_queue[0] = NULL;
448 * lio_delete_instr_queue()
449 * @param lio_dev - pointer to the lio device structure.
450 * @param iq_no - queue to be deleted.
452 * Called at driver unload time for each input queue. Deletes all
453 * allocated resources for the input queue.
456 lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
458 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
460 rte_free(iq->request_list);
461 iq->request_list = NULL;
462 lio_dma_zone_free(lio_dev, iq->iq_mz);
466 lio_free_instr_queue0(struct lio_device *lio_dev)
468 lio_delete_instr_queue(lio_dev, 0);
469 rte_free(lio_dev->instr_queue[0]);
470 lio_dev->instr_queue[0] = NULL;
475 lio_ring_doorbell(struct lio_device *lio_dev,
476 struct lio_instr_queue *iq)
478 if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
479 rte_write32(iq->fill_cnt, iq->doorbell_reg);
480 /* make sure doorbell write goes through */
487 copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
489 uint8_t *iqptr, cmdsize;
491 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
492 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
494 rte_memcpy(iqptr, cmd, cmdsize);
497 static inline struct lio_iq_post_status
498 post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
500 struct lio_iq_post_status st;
502 st.status = LIO_IQ_SEND_OK;
504 /* This ensures that the read index does not wrap around to the same
505 * position if queue gets full before Octeon could fetch any instr.
507 if (rte_atomic64_read(&iq->instr_pending) >=
508 (int32_t)(iq->max_count - 1)) {
509 st.status = LIO_IQ_SEND_FAILED;
514 if (rte_atomic64_read(&iq->instr_pending) >=
515 (int32_t)(iq->max_count - 2))
516 st.status = LIO_IQ_SEND_STOP;
518 copy_cmd_into_iq(iq, cmd);
520 /* "index" is returned, host_write_index is modified. */
521 st.index = iq->host_write_index;
522 iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
526 /* Flush the command into memory. We need to be sure the data is in
527 * memory before indicating that the instruction is pending.
531 rte_atomic64_inc(&iq->instr_pending);
537 lio_add_to_request_list(struct lio_instr_queue *iq,
538 int idx, void *buf, int reqtype)
540 iq->request_list[idx].buf = buf;
541 iq->request_list[idx].reqtype = reqtype;
545 lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
546 void *buf, uint32_t datasize __rte_unused, uint32_t reqtype)
548 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
549 struct lio_iq_post_status st;
551 rte_spinlock_lock(&iq->post_lock);
553 st = post_command2(iq, cmd);
555 if (st.status != LIO_IQ_SEND_FAILED) {
556 lio_add_to_request_list(iq, st.index, buf, reqtype);
557 lio_ring_doorbell(lio_dev, iq);
560 rte_spinlock_unlock(&iq->post_lock);
566 lio_prepare_soft_command(struct lio_device *lio_dev,
567 struct lio_soft_command *sc, uint8_t opcode,
568 uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
571 struct octeon_instr_pki_ih3 *pki_ih3;
572 struct octeon_instr_ih3 *ih3;
573 struct octeon_instr_irh *irh;
574 struct octeon_instr_rdp *rdp;
576 RTE_ASSERT(opcode <= 15);
577 RTE_ASSERT(subcode <= 127);
579 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
581 ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
583 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
588 pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
591 pki_ih3->tag = LIO_CONTROL;
592 pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
593 pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
598 ih3->dlengsz = sc->datasize;
600 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
601 irh->opcode = opcode;
602 irh->subcode = subcode;
604 /* opcode/subcode specific parameters (ossp) */
605 irh->ossp = irh_ossp;
606 sc->cmd.cmd3.ossp[0] = ossp0;
607 sc->cmd.cmd3.ossp[1] = ossp1;
610 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
611 rdp->pcie_port = lio_dev->pcie_port;
612 rdp->rlen = sc->rdatasize;
615 ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3;
619 ih3->fsz = OCTEON_PCI_CMD_O3;
624 lio_send_soft_command(struct lio_device *lio_dev,
625 struct lio_soft_command *sc)
627 struct octeon_instr_ih3 *ih3;
628 struct octeon_instr_irh *irh;
631 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
633 RTE_ASSERT(sc->dmadptr);
634 sc->cmd.cmd3.dptr = sc->dmadptr;
637 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
639 RTE_ASSERT(sc->dmarptr);
640 RTE_ASSERT(sc->status_word != NULL);
641 *sc->status_word = LIO_COMPLETION_WORD_INIT;
642 sc->cmd.cmd3.rptr = sc->dmarptr;
645 len = (uint32_t)ih3->dlengsz;
648 sc->timeout = lio_uptime + sc->wait_time;
650 return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
651 LIO_REQTYPE_SOFT_COMMAND);
655 lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
657 char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
660 buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
661 snprintf(sc_pool_name, sizeof(sc_pool_name),
662 "lio_sc_pool_%u", lio_dev->port_id);
663 lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
664 LIO_MAX_SOFT_COMMAND_BUFFERS,
665 0, 0, buf_size, SOCKET_ID_ANY);
670 lio_free_sc_buffer_pool(struct lio_device *lio_dev)
672 rte_mempool_free(lio_dev->sc_buf_pool);
675 struct lio_soft_command *
676 lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
677 uint32_t rdatasize, uint32_t ctxsize)
679 uint32_t offset = sizeof(struct lio_soft_command);
680 struct lio_soft_command *sc;
684 RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
685 LIO_SOFT_COMMAND_BUFFER_SIZE);
687 m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
689 lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
693 /* set rte_mbuf data size and there is only 1 segment */
694 m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
695 m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
697 /* use rte_mbuf buffer for soft command */
698 sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
699 memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
700 sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
701 sc->dma_addr = rte_mbuf_data_dma_addr(m);
704 dma_addr = sc->dma_addr;
707 sc->ctxptr = (uint8_t *)sc + offset;
708 sc->ctxsize = ctxsize;
711 /* Start data at 128 byte boundary */
712 offset = (offset + ctxsize + 127) & 0xffffff80;
715 sc->virtdptr = (uint8_t *)sc + offset;
716 sc->dmadptr = dma_addr + offset;
717 sc->datasize = datasize;
720 /* Start rdata at 128 byte boundary */
721 offset = (offset + datasize + 127) & 0xffffff80;
724 RTE_ASSERT(rdatasize >= 16);
725 sc->virtrptr = (uint8_t *)sc + offset;
726 sc->dmarptr = dma_addr + offset;
727 sc->rdatasize = rdatasize;
728 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
736 lio_free_soft_command(struct lio_soft_command *sc)
738 rte_pktmbuf_free(sc->mbuf);
742 lio_setup_response_list(struct lio_device *lio_dev)
744 STAILQ_INIT(&lio_dev->response_list.head);
745 rte_spinlock_init(&lio_dev->response_list.lock);
746 rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
750 lio_process_ordered_list(struct lio_device *lio_dev)
752 int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
753 struct lio_response_list *ordered_sc_list;
754 struct lio_soft_command *sc;
755 int request_complete = 0;
759 ordered_sc_list = &lio_dev->response_list;
762 rte_spinlock_lock(&ordered_sc_list->lock);
764 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
765 /* ordered_sc_list is empty; there is
768 rte_spinlock_unlock(&ordered_sc_list->lock);
772 sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
773 struct lio_soft_command, node);
775 status = LIO_REQUEST_PENDING;
777 /* check if octeon has finished DMA'ing a response
778 * to where rptr is pointing to
780 status64 = *sc->status_word;
782 if (status64 != LIO_COMPLETION_WORD_INIT) {
783 /* This logic ensures that all 64b have been written.
784 * 1. check byte 0 for non-FF
785 * 2. if non-FF, then swap result from BE to host order
786 * 3. check byte 7 (swapped to 0) for non-FF
787 * 4. if non-FF, use the low 32-bit status code
788 * 5. if either byte 0 or byte 7 is FF, don't use status
790 if ((status64 & 0xff) != 0xff) {
791 lio_swap_8B_data(&status64, 1);
792 if (((status64 & 0xff) != 0xff)) {
793 /* retrieve 16-bit firmware status */
794 status = (uint32_t)(status64 &
798 LIO_FIRMWARE_STATUS_CODE(
802 status = LIO_REQUEST_DONE;
806 } else if ((sc->timeout && lio_check_timeout(lio_uptime,
809 "cmd failed, timeout (%ld, %ld)\n",
810 (long)lio_uptime, (long)sc->timeout);
811 status = LIO_REQUEST_TIMEOUT;
814 if (status != LIO_REQUEST_PENDING) {
815 /* we have received a response or we have timed out.
816 * remove node from linked list
818 STAILQ_REMOVE(&ordered_sc_list->head,
819 &sc->node, lio_stailq_node, entries);
821 &lio_dev->response_list.pending_req_count);
822 rte_spinlock_unlock(&ordered_sc_list->lock);
825 sc->callback(status, sc->callback_arg);
829 /* no response yet */
830 request_complete = 0;
831 rte_spinlock_unlock(&ordered_sc_list->lock);
834 /* If we hit the Max Ordered requests to process every loop,
835 * we quit and let this function be invoked the next time
836 * the poll thread runs to process the remaining requests.
837 * This function can take up the entire CPU if there is
838 * no upper limit to the requests processed.
840 if (request_complete >= resp_to_process)
842 } while (request_complete);