4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
39 #include "lio_struct.h"
40 #include "lio_ethdev.h"
44 /* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
45 #define LIO_FLUSH_WM(_iq) ((_iq)->max_count / 2)
46 #define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
49 lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
54 count += droq->buffer_size;
55 } while (count < LIO_MAX_RX_PKTLEN);
59 lio_droq_reset_indices(struct lio_droq *droq)
64 droq->refill_count = 0;
65 rte_atomic64_set(&droq->pkts_pending, 0);
69 lio_droq_destroy_ring_buffers(struct lio_droq *droq)
73 for (i = 0; i < droq->max_count; i++) {
74 if (droq->recv_buf_list[i].buffer) {
75 rte_pktmbuf_free((struct rte_mbuf *)
76 droq->recv_buf_list[i].buffer);
77 droq->recv_buf_list[i].buffer = NULL;
81 lio_droq_reset_indices(droq);
85 lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
87 struct lio_droq *droq = lio_dev->droq[q_no];
88 struct rte_mempool *mpool = droq->mpool;
91 m = rte_pktmbuf_alloc(mpool);
93 lio_dev_err(lio_dev, "Cannot allocate\n");
97 rte_mbuf_refcnt_set(m, 1);
99 m->data_off = RTE_PKTMBUF_HEADROOM;
107 lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
108 struct lio_droq *droq)
110 struct lio_droq_desc *desc_ring = droq->desc_ring;
114 for (i = 0; i < droq->max_count; i++) {
115 buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
117 lio_dev_err(lio_dev, "buffer alloc failed\n");
118 lio_droq_destroy_ring_buffers(droq);
122 droq->recv_buf_list[i].buffer = buf;
123 droq->info_list[i].length = 0;
125 /* map ring buffers into memory */
126 desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
127 desc_ring[i].buffer_ptr =
128 lio_map_ring(droq->recv_buf_list[i].buffer);
131 lio_droq_reset_indices(droq);
133 lio_droq_compute_max_packet_bufs(droq);
139 lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
141 const struct rte_memzone *mz_tmp;
145 lio_dev_err(lio_dev, "Memzone NULL\n");
149 mz_tmp = rte_memzone_lookup(mz->name);
150 if (mz_tmp == NULL) {
151 lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
155 ret = rte_memzone_free(mz);
157 lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
161 * Frees the space for descriptor ring for the droq.
163 * @param lio_dev - pointer to the lio device structure
164 * @param q_no - droq no.
167 lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
169 struct lio_droq *droq = lio_dev->droq[q_no];
171 lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
173 lio_droq_destroy_ring_buffers(droq);
174 rte_free(droq->recv_buf_list);
175 droq->recv_buf_list = NULL;
176 lio_dma_zone_free(lio_dev, droq->info_mz);
177 lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
179 memset(droq, 0, LIO_DROQ_SIZE);
183 lio_alloc_info_buffer(struct lio_device *lio_dev,
184 struct lio_droq *droq, unsigned int socket_id)
186 droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
187 "info_list", droq->q_no,
193 if (droq->info_mz == NULL)
196 droq->info_list_dma = droq->info_mz->phys_addr;
197 droq->info_alloc_size = droq->info_mz->len;
198 droq->info_base_addr = (size_t)droq->info_mz->addr;
200 return droq->info_mz->addr;
204 * Allocates space for the descriptor ring for the droq and
205 * sets the base addr, num desc etc in Octeon registers.
207 * @param lio_dev - pointer to the lio device structure
208 * @param q_no - droq no.
209 * @param app_ctx - pointer to application context
210 * @return Success: 0 Failure: -1
213 lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
214 uint32_t num_descs, uint32_t desc_size,
215 struct rte_mempool *mpool, unsigned int socket_id)
217 uint32_t c_refill_threshold;
218 uint32_t desc_ring_size;
219 struct lio_droq *droq;
221 lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
223 droq = lio_dev->droq[q_no];
224 droq->lio_dev = lio_dev;
228 c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
230 droq->max_count = num_descs;
231 droq->buffer_size = desc_size;
233 desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
234 droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
240 if (droq->desc_ring_mz == NULL) {
242 "Output queue %d ring alloc failed\n", q_no);
246 droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
247 droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
249 lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
250 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
251 lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
254 droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
255 if (droq->info_list == NULL) {
256 lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
260 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
262 LIO_DROQ_RECVBUF_SIZE),
265 if (droq->recv_buf_list == NULL) {
267 "Output queue recv buf list alloc failed\n");
271 if (lio_droq_setup_ring_buffers(lio_dev, droq))
274 droq->refill_threshold = c_refill_threshold;
276 rte_spinlock_init(&droq->lock);
278 lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
280 lio_dev->io_qmask.oq |= (1ULL << q_no);
285 lio_delete_droq(lio_dev, q_no);
291 lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
292 int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
294 struct lio_droq *droq;
296 PMD_INIT_FUNC_TRACE();
298 if (lio_dev->droq[oq_no]) {
299 lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
303 /* Allocate the DS for the new droq. */
304 droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
305 RTE_CACHE_LINE_SIZE, socket_id);
309 lio_dev->droq[oq_no] = droq;
311 /* Initialize the Droq */
312 if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
314 lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
315 rte_free(lio_dev->droq[oq_no]);
316 lio_dev->droq[oq_no] = NULL;
322 lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
324 /* Send credit for octeon output queues. credits are always
325 * sent after the output queue is enabled.
327 rte_write32(lio_dev->droq[oq_no]->max_count,
328 lio_dev->droq[oq_no]->pkts_credit_reg);
334 static inline uint32_t
335 lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
337 uint32_t buf_cnt = 0;
339 while (total_len > (buf_size * buf_cnt))
345 /* If we were not able to refill all buffers, try to move around
346 * the buffers that were not dispatched.
348 static inline uint32_t
349 lio_droq_refill_pullup_descs(struct lio_droq *droq,
350 struct lio_droq_desc *desc_ring)
352 uint32_t refill_index = droq->refill_idx;
353 uint32_t desc_refilled = 0;
355 while (refill_index != droq->read_idx) {
356 if (droq->recv_buf_list[refill_index].buffer) {
357 droq->recv_buf_list[droq->refill_idx].buffer =
358 droq->recv_buf_list[refill_index].buffer;
359 desc_ring[droq->refill_idx].buffer_ptr =
360 desc_ring[refill_index].buffer_ptr;
361 droq->recv_buf_list[refill_index].buffer = NULL;
362 desc_ring[refill_index].buffer_ptr = 0;
364 droq->refill_idx = lio_incr_index(
368 droq->refill_count--;
369 } while (droq->recv_buf_list[droq->refill_idx].buffer);
371 refill_index = lio_incr_index(refill_index, 1,
375 return desc_refilled;
380 * @param lio_dev - pointer to the lio device structure
381 * @param droq - droq in which descriptors require new buffers.
384 * Called during normal DROQ processing in interrupt mode or by the poll
385 * thread to refill the descriptors from which buffers were dispatched
386 * to upper layers. Attempts to allocate new buffers. If that fails, moves
387 * up buffers (that were not dispatched) to form a contiguous ring.
390 * No of descriptors refilled.
393 * This routine is called with droq->lock held.
396 lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
398 struct lio_droq_desc *desc_ring;
399 uint32_t desc_refilled = 0;
402 desc_ring = droq->desc_ring;
404 while (droq->refill_count && (desc_refilled < droq->max_count)) {
405 /* If a valid buffer exists (happens if there is no dispatch),
406 * reuse the buffer, else allocate.
408 if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
409 buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
410 /* If a buffer could not be allocated, no point in
416 droq->recv_buf_list[droq->refill_idx].buffer = buf;
419 desc_ring[droq->refill_idx].buffer_ptr =
420 lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);
421 /* Reset any previous values in the length field. */
422 droq->info_list[droq->refill_idx].length = 0;
424 droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
427 droq->refill_count--;
430 if (droq->refill_count)
431 desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
433 /* if droq->refill_count
434 * The refill count would not change in pass two. We only moved buffers
435 * to close the gap in the ring, but we would still have the same no. of
438 return desc_refilled;
442 lio_droq_fast_process_packet(struct lio_device *lio_dev,
443 struct lio_droq *droq,
444 struct rte_mbuf **rx_pkts)
446 struct rte_mbuf *nicbuf = NULL;
447 struct lio_droq_info *info;
448 uint32_t total_len = 0;
449 int data_total_len = 0;
450 uint32_t pkt_len = 0;
454 info = &droq->info_list[droq->read_idx];
455 lio_swap_8B_data((uint64_t *)info, 2);
460 /* Len of resp hdr in included in the received data len. */
461 info->length -= OCTEON_RH_SIZE;
464 total_len += (uint32_t)info->length;
466 if (lio_opcode_slow_path(rh)) {
469 buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
470 (uint32_t)info->length);
471 droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
473 droq->refill_count += buf_cnt;
475 if (info->length <= droq->buffer_size) {
476 if (rh->r_dh.has_hash)
477 pkt_len = (uint32_t)(info->length - 8);
479 pkt_len = (uint32_t)info->length;
481 nicbuf = droq->recv_buf_list[droq->read_idx].buffer;
482 droq->recv_buf_list[droq->read_idx].buffer = NULL;
483 droq->read_idx = lio_incr_index(
486 droq->refill_count++;
488 if (likely(nicbuf != NULL)) {
489 nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
492 /* We don't have a way to pass flags yet */
493 nicbuf->ol_flags = 0;
494 if (rh->r_dh.has_hash) {
497 nicbuf->ol_flags |= PKT_RX_RSS_HASH;
498 hash_ptr = rte_pktmbuf_mtod(nicbuf,
500 lio_swap_8B_data(hash_ptr, 1);
501 nicbuf->hash.rss = (uint32_t)*hash_ptr;
502 nicbuf->data_off += 8;
505 nicbuf->pkt_len = pkt_len;
506 nicbuf->data_len = pkt_len;
507 nicbuf->port = lio_dev->port_id;
509 rx_pkts[data_pkts++] = nicbuf;
510 data_total_len += pkt_len;
513 /* Prefetch buffer pointers when on a cache line
516 if ((droq->read_idx & 3) == 0) {
518 &droq->recv_buf_list[droq->read_idx]);
520 &droq->info_list[droq->read_idx]);
523 struct rte_mbuf *first_buf = NULL;
524 struct rte_mbuf *last_buf = NULL;
526 while (pkt_len < info->length) {
529 cpy_len = ((pkt_len + droq->buffer_size) >
531 ? ((uint32_t)info->length -
536 droq->recv_buf_list[droq->read_idx].buffer;
537 droq->recv_buf_list[droq->read_idx].buffer =
540 if (likely(nicbuf != NULL)) {
541 /* Note the first seg */
545 nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
548 nicbuf->port = lio_dev->port_id;
549 /* We don't have a way to pass
552 nicbuf->ol_flags = 0;
553 if ((!pkt_len) && (rh->r_dh.has_hash)) {
558 hash_ptr = rte_pktmbuf_mtod(
560 lio_swap_8B_data(hash_ptr, 1);
563 nicbuf->data_off += 8;
564 nicbuf->pkt_len = cpy_len - 8;
565 nicbuf->data_len = cpy_len - 8;
567 nicbuf->pkt_len = cpy_len;
568 nicbuf->data_len = cpy_len;
572 first_buf->nb_segs++;
575 last_buf->next = nicbuf;
579 PMD_RX_LOG(lio_dev, ERR, "no buf\n");
583 droq->read_idx = lio_incr_index(
586 droq->refill_count++;
588 /* Prefetch buffer pointers when on a
589 * cache line boundary
591 if ((droq->read_idx & 3) == 0) {
592 rte_prefetch0(&droq->recv_buf_list
596 &droq->info_list[droq->read_idx]);
599 rx_pkts[data_pkts++] = first_buf;
600 if (rh->r_dh.has_hash)
601 data_total_len += (pkt_len - 8);
603 data_total_len += pkt_len;
606 /* Inform upper layer about packet checksum verification */
607 struct rte_mbuf *m = rx_pkts[data_pkts - 1];
609 if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
610 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
612 if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
613 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
616 if (droq->refill_count >= droq->refill_threshold) {
617 int desc_refilled = lio_droq_refill(lio_dev, droq);
619 /* Flush the droq descriptor data to memory to be sure
620 * that when we update the credits the data in memory is
624 rte_write32(desc_refilled, droq->pkts_credit_reg);
625 /* make sure mmio write completes */
636 lio_droq_fast_process_packets(struct lio_device *lio_dev,
637 struct lio_droq *droq,
638 struct rte_mbuf **rx_pkts,
639 uint32_t pkts_to_process)
641 int ret, data_pkts = 0;
644 for (pkt = 0; pkt < pkts_to_process; pkt++) {
645 ret = lio_droq_fast_process_packet(lio_dev, droq,
646 &rx_pkts[data_pkts]);
648 lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
649 lio_dev->port_id, droq->q_no,
650 droq->read_idx, pkts_to_process);
656 rte_atomic64_sub(&droq->pkts_pending, pkt);
661 static inline uint32_t
662 lio_droq_check_hw_for_pkts(struct lio_droq *droq)
667 pkt_count = rte_read32(droq->pkts_sent_reg);
669 last_count = pkt_count - droq->pkt_count;
670 droq->pkt_count = pkt_count;
673 rte_atomic64_add(&droq->pkts_pending, last_count);
679 lio_dev_recv_pkts(void *rx_queue,
680 struct rte_mbuf **rx_pkts,
683 struct lio_droq *droq = rx_queue;
684 struct lio_device *lio_dev = droq->lio_dev;
685 uint32_t pkts_processed = 0;
686 uint32_t pkt_count = 0;
688 lio_droq_check_hw_for_pkts(droq);
690 pkt_count = rte_atomic64_read(&droq->pkts_pending);
694 if (pkt_count > budget)
698 rte_spinlock_lock(&droq->lock);
699 pkts_processed = lio_droq_fast_process_packets(lio_dev,
703 if (droq->pkt_count) {
704 rte_write32(droq->pkt_count, droq->pkts_sent_reg);
708 /* Release the spin lock */
709 rte_spinlock_unlock(&droq->lock);
711 return pkts_processed;
715 lio_delete_droq_queue(struct lio_device *lio_dev,
718 lio_delete_droq(lio_dev, oq_no);
720 rte_free(lio_dev->droq[oq_no]);
721 lio_dev->droq[oq_no] = NULL;
725 * lio_init_instr_queue()
726 * @param lio_dev - pointer to the lio device structure.
727 * @param txpciq - queue to be initialized.
729 * Called at driver init time for each input queue. iq_conf has the
730 * configuration parameters for the queue.
732 * @return Success: 0 Failure: -1
735 lio_init_instr_queue(struct lio_device *lio_dev,
736 union octeon_txpciq txpciq,
737 uint32_t num_descs, unsigned int socket_id)
739 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
740 struct lio_instr_queue *iq;
744 instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
746 q_size = instr_type * num_descs;
747 iq = lio_dev->instr_queue[iq_no];
748 iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
749 "instr_queue", iq_no, q_size,
752 if (iq->iq_mz == NULL) {
753 lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
758 iq->base_addr_dma = iq->iq_mz->phys_addr;
759 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
761 iq->max_count = num_descs;
763 /* Initialize a list to holds requests that have been posted to Octeon
764 * but has yet to be fetched by octeon
766 iq->request_list = rte_zmalloc_socket("request_list",
767 sizeof(*iq->request_list) *
771 if (iq->request_list == NULL) {
772 lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
774 lio_dma_zone_free(lio_dev, iq->iq_mz);
778 lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
779 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
782 iq->lio_dev = lio_dev;
783 iq->txpciq.txpciq64 = txpciq.txpciq64;
785 iq->host_write_index = 0;
786 iq->lio_read_index = 0;
789 rte_atomic64_set(&iq->instr_pending, 0);
791 /* Initialize the spinlock for this instruction queue */
792 rte_spinlock_init(&iq->lock);
793 rte_spinlock_init(&iq->post_lock);
795 rte_atomic64_clear(&iq->iq_flush_running);
797 lio_dev->io_qmask.iq |= (1ULL << iq_no);
799 /* Set the 32B/64B mode for each input queue */
800 lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
801 iq->iqcmd_64B = (instr_type == 64);
803 lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
809 lio_setup_instr_queue0(struct lio_device *lio_dev)
811 union octeon_txpciq txpciq;
812 uint32_t num_descs = 0;
815 num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
817 lio_dev->num_iqs = 0;
819 lio_dev->instr_queue[0] = rte_zmalloc(NULL,
820 sizeof(struct lio_instr_queue), 0);
821 if (lio_dev->instr_queue[0] == NULL)
824 lio_dev->instr_queue[0]->q_index = 0;
825 lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
827 txpciq.s.q_no = iq_no;
828 txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
829 txpciq.s.use_qpg = 0;
831 if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
832 rte_free(lio_dev->instr_queue[0]);
833 lio_dev->instr_queue[0] = NULL;
843 * lio_delete_instr_queue()
844 * @param lio_dev - pointer to the lio device structure.
845 * @param iq_no - queue to be deleted.
847 * Called at driver unload time for each input queue. Deletes all
848 * allocated resources for the input queue.
851 lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
853 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
855 rte_free(iq->request_list);
856 iq->request_list = NULL;
857 lio_dma_zone_free(lio_dev, iq->iq_mz);
861 lio_free_instr_queue0(struct lio_device *lio_dev)
863 lio_delete_instr_queue(lio_dev, 0);
864 rte_free(lio_dev->instr_queue[0]);
865 lio_dev->instr_queue[0] = NULL;
869 /* Return 0 on success, -1 on failure */
871 lio_setup_iq(struct lio_device *lio_dev, int q_index,
872 union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,
873 unsigned int socket_id)
875 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
877 if (lio_dev->instr_queue[iq_no]) {
878 lio_dev_dbg(lio_dev, "IQ is in use. Cannot create the IQ: %d again\n",
880 lio_dev->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
881 lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
885 lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
886 sizeof(struct lio_instr_queue),
887 RTE_CACHE_LINE_SIZE, socket_id);
888 if (lio_dev->instr_queue[iq_no] == NULL)
891 lio_dev->instr_queue[iq_no]->q_index = q_index;
892 lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
894 if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id))
898 if (lio_dev->fn_list.enable_io_queues(lio_dev))
904 lio_delete_instr_queue(lio_dev, iq_no);
907 rte_free(lio_dev->instr_queue[iq_no]);
908 lio_dev->instr_queue[iq_no] = NULL;
914 lio_ring_doorbell(struct lio_device *lio_dev,
915 struct lio_instr_queue *iq)
917 if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
918 rte_write32(iq->fill_cnt, iq->doorbell_reg);
919 /* make sure doorbell write goes through */
926 copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
928 uint8_t *iqptr, cmdsize;
930 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
931 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
933 rte_memcpy(iqptr, cmd, cmdsize);
936 static inline struct lio_iq_post_status
937 post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
939 struct lio_iq_post_status st;
941 st.status = LIO_IQ_SEND_OK;
943 /* This ensures that the read index does not wrap around to the same
944 * position if queue gets full before Octeon could fetch any instr.
946 if (rte_atomic64_read(&iq->instr_pending) >=
947 (int32_t)(iq->max_count - 1)) {
948 st.status = LIO_IQ_SEND_FAILED;
953 if (rte_atomic64_read(&iq->instr_pending) >=
954 (int32_t)(iq->max_count - 2))
955 st.status = LIO_IQ_SEND_STOP;
957 copy_cmd_into_iq(iq, cmd);
959 /* "index" is returned, host_write_index is modified. */
960 st.index = iq->host_write_index;
961 iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
965 /* Flush the command into memory. We need to be sure the data is in
966 * memory before indicating that the instruction is pending.
970 rte_atomic64_inc(&iq->instr_pending);
976 lio_add_to_request_list(struct lio_instr_queue *iq,
977 int idx, void *buf, int reqtype)
979 iq->request_list[idx].buf = buf;
980 iq->request_list[idx].reqtype = reqtype;
984 lio_free_netsgbuf(void *buf)
986 struct lio_buf_free_info *finfo = buf;
987 struct lio_device *lio_dev = finfo->lio_dev;
988 struct rte_mbuf *m = finfo->mbuf;
989 struct lio_gather *g = finfo->g;
990 uint8_t iq = finfo->iq_no;
992 /* This will take care of multiple segments also */
995 rte_spinlock_lock(&lio_dev->glist_lock[iq]);
996 STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries);
997 rte_spinlock_unlock(&lio_dev->glist_lock[iq]);
1001 /* Can only run in process context */
1003 lio_process_iq_request_list(struct lio_device *lio_dev,
1004 struct lio_instr_queue *iq)
1006 struct octeon_instr_irh *irh = NULL;
1007 uint32_t old = iq->flush_index;
1008 struct lio_soft_command *sc;
1009 uint32_t inst_count = 0;
1013 while (old != iq->lio_read_index) {
1014 reqtype = iq->request_list[old].reqtype;
1015 buf = iq->request_list[old].buf;
1017 if (reqtype == LIO_REQTYPE_NONE)
1021 case LIO_REQTYPE_NORESP_NET:
1022 rte_pktmbuf_free((struct rte_mbuf *)buf);
1024 case LIO_REQTYPE_NORESP_NET_SG:
1025 lio_free_netsgbuf(buf);
1027 case LIO_REQTYPE_SOFT_COMMAND:
1029 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1031 /* We're expecting a response from Octeon.
1032 * It's up to lio_process_ordered_list() to
1033 * process sc. Add sc to the ordered soft
1034 * command response list because we expect
1035 * a response from Octeon.
1037 rte_spinlock_lock(&lio_dev->response_list.lock);
1039 &lio_dev->response_list.pending_req_count);
1041 &lio_dev->response_list.head,
1042 &sc->node, entries);
1043 rte_spinlock_unlock(
1044 &lio_dev->response_list.lock);
1047 /* This callback must not sleep */
1048 sc->callback(LIO_REQUEST_DONE,
1054 lio_dev_err(lio_dev,
1055 "Unknown reqtype: %d buf: %p at idx %d\n",
1059 iq->request_list[old].buf = NULL;
1060 iq->request_list[old].reqtype = 0;
1064 old = lio_incr_index(old, 1, iq->max_count);
1067 iq->flush_index = old;
1073 lio_update_read_index(struct lio_instr_queue *iq)
1075 uint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg);
1078 last_done = pkt_in_done - iq->pkt_in_done;
1079 iq->pkt_in_done = pkt_in_done;
1081 /* Add last_done and modulo with the IQ size to get new index */
1082 iq->lio_read_index = (iq->lio_read_index +
1083 (uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
1088 lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
1090 uint32_t tot_inst_processed = 0;
1091 uint32_t inst_processed = 0;
1094 if (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0)
1097 rte_spinlock_lock(&iq->lock);
1099 lio_update_read_index(iq);
1102 /* Process any outstanding IQ packets. */
1103 if (iq->flush_index == iq->lio_read_index)
1106 inst_processed = lio_process_iq_request_list(lio_dev, iq);
1109 rte_atomic64_sub(&iq->instr_pending, inst_processed);
1111 tot_inst_processed += inst_processed;
1116 rte_spinlock_unlock(&iq->lock);
1118 rte_atomic64_clear(&iq->iq_flush_running);
1124 lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
1125 void *buf, uint32_t datasize __rte_unused, uint32_t reqtype)
1127 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
1128 struct lio_iq_post_status st;
1130 rte_spinlock_lock(&iq->post_lock);
1132 st = post_command2(iq, cmd);
1134 if (st.status != LIO_IQ_SEND_FAILED) {
1135 lio_add_to_request_list(iq, st.index, buf, reqtype);
1136 lio_ring_doorbell(lio_dev, iq);
1139 rte_spinlock_unlock(&iq->post_lock);
1145 lio_prepare_soft_command(struct lio_device *lio_dev,
1146 struct lio_soft_command *sc, uint8_t opcode,
1147 uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
1150 struct octeon_instr_pki_ih3 *pki_ih3;
1151 struct octeon_instr_ih3 *ih3;
1152 struct octeon_instr_irh *irh;
1153 struct octeon_instr_rdp *rdp;
1155 RTE_ASSERT(opcode <= 15);
1156 RTE_ASSERT(subcode <= 127);
1158 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1160 ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
1162 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
1167 pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
1170 pki_ih3->tag = LIO_CONTROL;
1171 pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
1172 pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
1177 ih3->dlengsz = sc->datasize;
1179 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1180 irh->opcode = opcode;
1181 irh->subcode = subcode;
1183 /* opcode/subcode specific parameters (ossp) */
1184 irh->ossp = irh_ossp;
1185 sc->cmd.cmd3.ossp[0] = ossp0;
1186 sc->cmd.cmd3.ossp[1] = ossp1;
1188 if (sc->rdatasize) {
1189 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
1190 rdp->pcie_port = lio_dev->pcie_port;
1191 rdp->rlen = sc->rdatasize;
1194 ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3;
1198 ih3->fsz = OCTEON_PCI_CMD_O3;
1203 lio_send_soft_command(struct lio_device *lio_dev,
1204 struct lio_soft_command *sc)
1206 struct octeon_instr_ih3 *ih3;
1207 struct octeon_instr_irh *irh;
1210 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1212 RTE_ASSERT(sc->dmadptr);
1213 sc->cmd.cmd3.dptr = sc->dmadptr;
1216 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1218 RTE_ASSERT(sc->dmarptr);
1219 RTE_ASSERT(sc->status_word != NULL);
1220 *sc->status_word = LIO_COMPLETION_WORD_INIT;
1221 sc->cmd.cmd3.rptr = sc->dmarptr;
1224 len = (uint32_t)ih3->dlengsz;
1227 sc->timeout = lio_uptime + sc->wait_time;
1229 return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
1230 LIO_REQTYPE_SOFT_COMMAND);
1234 lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
1236 char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
1239 buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
1240 snprintf(sc_pool_name, sizeof(sc_pool_name),
1241 "lio_sc_pool_%u", lio_dev->port_id);
1242 lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
1243 LIO_MAX_SOFT_COMMAND_BUFFERS,
1244 0, 0, buf_size, SOCKET_ID_ANY);
1249 lio_free_sc_buffer_pool(struct lio_device *lio_dev)
1251 rte_mempool_free(lio_dev->sc_buf_pool);
1254 struct lio_soft_command *
1255 lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
1256 uint32_t rdatasize, uint32_t ctxsize)
1258 uint32_t offset = sizeof(struct lio_soft_command);
1259 struct lio_soft_command *sc;
1263 RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
1264 LIO_SOFT_COMMAND_BUFFER_SIZE);
1266 m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
1268 lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
1272 /* set rte_mbuf data size and there is only 1 segment */
1273 m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1274 m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1276 /* use rte_mbuf buffer for soft command */
1277 sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
1278 memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
1279 sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
1280 sc->dma_addr = rte_mbuf_data_dma_addr(m);
1283 dma_addr = sc->dma_addr;
1286 sc->ctxptr = (uint8_t *)sc + offset;
1287 sc->ctxsize = ctxsize;
1290 /* Start data at 128 byte boundary */
1291 offset = (offset + ctxsize + 127) & 0xffffff80;
1294 sc->virtdptr = (uint8_t *)sc + offset;
1295 sc->dmadptr = dma_addr + offset;
1296 sc->datasize = datasize;
1299 /* Start rdata at 128 byte boundary */
1300 offset = (offset + datasize + 127) & 0xffffff80;
1303 RTE_ASSERT(rdatasize >= 16);
1304 sc->virtrptr = (uint8_t *)sc + offset;
1305 sc->dmarptr = dma_addr + offset;
1306 sc->rdatasize = rdatasize;
1307 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
1315 lio_free_soft_command(struct lio_soft_command *sc)
1317 rte_pktmbuf_free(sc->mbuf);
1321 lio_setup_response_list(struct lio_device *lio_dev)
1323 STAILQ_INIT(&lio_dev->response_list.head);
1324 rte_spinlock_init(&lio_dev->response_list.lock);
1325 rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
1329 lio_process_ordered_list(struct lio_device *lio_dev)
1331 int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
1332 struct lio_response_list *ordered_sc_list;
1333 struct lio_soft_command *sc;
1334 int request_complete = 0;
1338 ordered_sc_list = &lio_dev->response_list;
1341 rte_spinlock_lock(&ordered_sc_list->lock);
1343 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
1344 /* ordered_sc_list is empty; there is
1345 * nothing to process
1347 rte_spinlock_unlock(&ordered_sc_list->lock);
1351 sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
1352 struct lio_soft_command, node);
1354 status = LIO_REQUEST_PENDING;
1356 /* check if octeon has finished DMA'ing a response
1357 * to where rptr is pointing to
1359 status64 = *sc->status_word;
1361 if (status64 != LIO_COMPLETION_WORD_INIT) {
1362 /* This logic ensures that all 64b have been written.
1363 * 1. check byte 0 for non-FF
1364 * 2. if non-FF, then swap result from BE to host order
1365 * 3. check byte 7 (swapped to 0) for non-FF
1366 * 4. if non-FF, use the low 32-bit status code
1367 * 5. if either byte 0 or byte 7 is FF, don't use status
1369 if ((status64 & 0xff) != 0xff) {
1370 lio_swap_8B_data(&status64, 1);
1371 if (((status64 & 0xff) != 0xff)) {
1372 /* retrieve 16-bit firmware status */
1373 status = (uint32_t)(status64 &
1377 LIO_FIRMWARE_STATUS_CODE(
1381 status = LIO_REQUEST_DONE;
1385 } else if ((sc->timeout && lio_check_timeout(lio_uptime,
1387 lio_dev_err(lio_dev,
1388 "cmd failed, timeout (%ld, %ld)\n",
1389 (long)lio_uptime, (long)sc->timeout);
1390 status = LIO_REQUEST_TIMEOUT;
1393 if (status != LIO_REQUEST_PENDING) {
1394 /* we have received a response or we have timed out.
1395 * remove node from linked list
1397 STAILQ_REMOVE(&ordered_sc_list->head,
1398 &sc->node, lio_stailq_node, entries);
1400 &lio_dev->response_list.pending_req_count);
1401 rte_spinlock_unlock(&ordered_sc_list->lock);
1404 sc->callback(status, sc->callback_arg);
1408 /* no response yet */
1409 request_complete = 0;
1410 rte_spinlock_unlock(&ordered_sc_list->lock);
1413 /* If we hit the Max Ordered requests to process every loop,
1414 * we quit and let this function be invoked the next time
1415 * the poll thread runs to process the remaining requests.
1416 * This function can take up the entire CPU if there is
1417 * no upper limit to the requests processed.
1419 if (request_complete >= resp_to_process)
1421 } while (request_complete);
1426 static inline struct lio_stailq_node *
1427 list_delete_first_node(struct lio_stailq_head *head)
1429 struct lio_stailq_node *node;
1431 if (STAILQ_EMPTY(head))
1434 node = STAILQ_FIRST(head);
1437 STAILQ_REMOVE(head, node, lio_stailq_node, entries);
1443 lio_delete_sglist(struct lio_instr_queue *txq)
1445 struct lio_device *lio_dev = txq->lio_dev;
1446 int iq_no = txq->q_index;
1447 struct lio_gather *g;
1449 if (lio_dev->glist_head == NULL)
1453 g = (struct lio_gather *)list_delete_first_node(
1454 &lio_dev->glist_head[iq_no]);
1458 (void *)((unsigned long)g->sg - g->adjust));
1465 * \brief Setup gather lists
1466 * @param lio per-network private data
1469 lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
1470 int fw_mapped_iq, int num_descs, unsigned int socket_id)
1472 struct lio_gather *g;
1475 rte_spinlock_init(&lio_dev->glist_lock[iq_no]);
1477 STAILQ_INIT(&lio_dev->glist_head[iq_no]);
1479 for (i = 0; i < num_descs; i++) {
1480 g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,
1483 lio_dev_err(lio_dev,
1484 "lio_gather memory allocation failed for qno %d\n",
1490 ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);
1492 g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,
1493 RTE_CACHE_LINE_SIZE, socket_id);
1494 if (g->sg == NULL) {
1495 lio_dev_err(lio_dev,
1496 "sg list memory allocation failed for qno %d\n",
1502 /* The gather component should be aligned on 64-bit boundary */
1503 if (((unsigned long)g->sg) & 7) {
1504 g->adjust = 8 - (((unsigned long)g->sg) & 7);
1506 (struct lio_sg_entry *)((unsigned long)g->sg +
1510 STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,
1514 if (i != num_descs) {
1515 lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);
1523 lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
1525 lio_delete_instr_queue(lio_dev, iq_no);
1526 rte_free(lio_dev->instr_queue[iq_no]);
1527 lio_dev->instr_queue[iq_no] = NULL;
1531 static inline uint32_t
1532 lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
1534 return ((lio_dev->instr_queue[q_no]->max_count - 1) -
1535 (uint32_t)rte_atomic64_read(
1536 &lio_dev->instr_queue[q_no]->instr_pending));
1540 lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
1542 return ((uint32_t)rte_atomic64_read(
1543 &lio_dev->instr_queue[q_no]->instr_pending) >=
1544 (lio_dev->instr_queue[q_no]->max_count - 2));
1548 lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no)
1550 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
1551 uint32_t count = 10000;
1553 while ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) &&
1555 lio_flush_iq(lio_dev, iq);
1557 return count ? 0 : 1;
1560 /** Send data packet to the device
1561 * @param lio_dev - lio device pointer
1562 * @param ndata - control structure with queueing, and buffer information
1564 * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
1565 * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
1568 lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata)
1570 return lio_send_command(lio_dev, ndata->q_no, &ndata->cmd,
1571 ndata->buf, ndata->datasize, ndata->reqtype);
1575 lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
1577 struct lio_instr_queue *txq = tx_queue;
1578 union lio_cmd_setup cmdsetup;
1579 struct lio_device *lio_dev;
1580 struct lio_data_pkt ndata;
1581 int i, processed = 0;
1587 lio_dev = txq->lio_dev;
1588 iq_no = txq->txpciq.s.q_no;
1590 if (!lio_dev->linfo.link.s.link_up) {
1591 PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n",
1592 lio_dev->linfo.link.s.link_up);
1596 lio_dev_cleanup_iq(lio_dev, iq_no);
1598 for (i = 0; i < nb_pkts; i++) {
1599 uint32_t pkt_len = 0;
1603 /* Prepare the attributes for the data to be passed to BASE. */
1604 memset(&ndata, 0, sizeof(struct lio_data_pkt));
1609 if (lio_iq_is_full(lio_dev, ndata.q_no)) {
1610 if (lio_dev_cleanup_iq(lio_dev, iq_no)) {
1611 PMD_TX_LOG(lio_dev, ERR,
1612 "Transmit failed iq:%d full\n",
1618 cmdsetup.cmd_setup64 = 0;
1619 cmdsetup.s.iq_no = iq_no;
1621 /* check checksum offload flags to form cmd */
1622 if (m->ol_flags & PKT_TX_IP_CKSUM)
1623 cmdsetup.s.ip_csum = 1;
1625 if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
1626 (m->ol_flags & PKT_TX_UDP_CKSUM))
1627 cmdsetup.s.transport_csum = 1;
1629 if (m->nb_segs == 1) {
1630 pkt_len = rte_pktmbuf_data_len(m);
1631 cmdsetup.s.u.datasize = pkt_len;
1632 lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
1634 ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
1635 ndata.reqtype = LIO_REQTYPE_NORESP_NET;
1637 struct lio_buf_free_info *finfo;
1638 struct lio_gather *g;
1639 phys_addr_t phyaddr;
1642 finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
1644 if (finfo == NULL) {
1645 PMD_TX_LOG(lio_dev, ERR,
1646 "free buffer alloc failed\n");
1650 rte_spinlock_lock(&lio_dev->glist_lock[iq_no]);
1651 g = (struct lio_gather *)list_delete_first_node(
1652 &lio_dev->glist_head[iq_no]);
1653 rte_spinlock_unlock(&lio_dev->glist_lock[iq_no]);
1655 PMD_TX_LOG(lio_dev, ERR,
1656 "Transmit scatter gather: glist null!\n");
1660 cmdsetup.s.gather = 1;
1661 cmdsetup.s.u.gatherptrs = m->nb_segs;
1662 lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
1665 memset(g->sg, 0, g->sg_size);
1666 g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
1667 lio_add_sg_size(&g->sg[0], m->data_len, 0);
1668 pkt_len = m->data_len;
1671 /* First seg taken care above */
1672 frags = m->nb_segs - 1;
1676 g->sg[(i >> 2)].ptr[(i & 3)] =
1677 rte_mbuf_data_dma_addr(m);
1678 lio_add_sg_size(&g->sg[(i >> 2)],
1679 m->data_len, (i & 3));
1680 pkt_len += m->data_len;
1685 phyaddr = rte_mem_virt2phy(g->sg);
1686 if (phyaddr == RTE_BAD_PHYS_ADDR) {
1687 PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
1691 ndata.cmd.cmd3.dptr = phyaddr;
1692 ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
1695 finfo->lio_dev = lio_dev;
1696 finfo->iq_no = (uint64_t)iq_no;
1700 ndata.datasize = pkt_len;
1702 status = lio_send_data_pkt(lio_dev, &ndata);
1704 if (unlikely(status == LIO_IQ_SEND_FAILED)) {
1705 PMD_TX_LOG(lio_dev, ERR, "send failed\n");
1709 if (unlikely(status == LIO_IQ_SEND_STOP)) {
1710 PMD_TX_LOG(lio_dev, DEBUG, "iq full\n");
1711 /* create space as iq is full */
1712 lio_dev_cleanup_iq(lio_dev, iq_no);