4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
39 #include "lio_struct.h"
40 #include "lio_ethdev.h"
44 /* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
45 #define LIO_FLUSH_WM(_iq) ((_iq)->max_count / 2)
46 #define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
49 lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
54 count += droq->buffer_size;
55 } while (count < LIO_MAX_RX_PKTLEN);
59 lio_droq_reset_indices(struct lio_droq *droq)
64 droq->refill_count = 0;
65 rte_atomic64_set(&droq->pkts_pending, 0);
69 lio_droq_destroy_ring_buffers(struct lio_droq *droq)
73 for (i = 0; i < droq->max_count; i++) {
74 if (droq->recv_buf_list[i].buffer) {
75 rte_pktmbuf_free((struct rte_mbuf *)
76 droq->recv_buf_list[i].buffer);
77 droq->recv_buf_list[i].buffer = NULL;
81 lio_droq_reset_indices(droq);
85 lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
86 struct lio_droq *droq)
88 struct lio_droq_desc *desc_ring = droq->desc_ring;
92 for (i = 0; i < droq->max_count; i++) {
93 buf = rte_pktmbuf_alloc(droq->mpool);
95 lio_dev_err(lio_dev, "buffer alloc failed\n");
96 droq->stats.rx_alloc_failure++;
97 lio_droq_destroy_ring_buffers(droq);
101 droq->recv_buf_list[i].buffer = buf;
102 droq->info_list[i].length = 0;
104 /* map ring buffers into memory */
105 desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
106 desc_ring[i].buffer_ptr =
107 lio_map_ring(droq->recv_buf_list[i].buffer);
110 lio_droq_reset_indices(droq);
112 lio_droq_compute_max_packet_bufs(droq);
118 lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
120 const struct rte_memzone *mz_tmp;
124 lio_dev_err(lio_dev, "Memzone NULL\n");
128 mz_tmp = rte_memzone_lookup(mz->name);
129 if (mz_tmp == NULL) {
130 lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
134 ret = rte_memzone_free(mz);
136 lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
140 * Frees the space for descriptor ring for the droq.
142 * @param lio_dev - pointer to the lio device structure
143 * @param q_no - droq no.
146 lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
148 struct lio_droq *droq = lio_dev->droq[q_no];
150 lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
152 lio_droq_destroy_ring_buffers(droq);
153 rte_free(droq->recv_buf_list);
154 droq->recv_buf_list = NULL;
155 lio_dma_zone_free(lio_dev, droq->info_mz);
156 lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
158 memset(droq, 0, LIO_DROQ_SIZE);
162 lio_alloc_info_buffer(struct lio_device *lio_dev,
163 struct lio_droq *droq, unsigned int socket_id)
165 droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
166 "info_list", droq->q_no,
172 if (droq->info_mz == NULL)
175 droq->info_list_dma = droq->info_mz->phys_addr;
176 droq->info_alloc_size = droq->info_mz->len;
177 droq->info_base_addr = (size_t)droq->info_mz->addr;
179 return droq->info_mz->addr;
183 * Allocates space for the descriptor ring for the droq and
184 * sets the base addr, num desc etc in Octeon registers.
186 * @param lio_dev - pointer to the lio device structure
187 * @param q_no - droq no.
188 * @param app_ctx - pointer to application context
189 * @return Success: 0 Failure: -1
192 lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
193 uint32_t num_descs, uint32_t desc_size,
194 struct rte_mempool *mpool, unsigned int socket_id)
196 uint32_t c_refill_threshold;
197 uint32_t desc_ring_size;
198 struct lio_droq *droq;
200 lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
202 droq = lio_dev->droq[q_no];
203 droq->lio_dev = lio_dev;
207 c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
209 droq->max_count = num_descs;
210 droq->buffer_size = desc_size;
212 desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
213 droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
219 if (droq->desc_ring_mz == NULL) {
221 "Output queue %d ring alloc failed\n", q_no);
225 droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
226 droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
228 lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
229 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
230 lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
233 droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
234 if (droq->info_list == NULL) {
235 lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
239 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
241 LIO_DROQ_RECVBUF_SIZE),
244 if (droq->recv_buf_list == NULL) {
246 "Output queue recv buf list alloc failed\n");
250 if (lio_droq_setup_ring_buffers(lio_dev, droq))
253 droq->refill_threshold = c_refill_threshold;
255 rte_spinlock_init(&droq->lock);
257 lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
259 lio_dev->io_qmask.oq |= (1ULL << q_no);
264 lio_delete_droq(lio_dev, q_no);
270 lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
271 int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
273 struct lio_droq *droq;
275 PMD_INIT_FUNC_TRACE();
277 if (lio_dev->droq[oq_no]) {
278 lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
282 /* Allocate the DS for the new droq. */
283 droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
284 RTE_CACHE_LINE_SIZE, socket_id);
288 lio_dev->droq[oq_no] = droq;
290 /* Initialize the Droq */
291 if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
293 lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
294 rte_free(lio_dev->droq[oq_no]);
295 lio_dev->droq[oq_no] = NULL;
301 lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
303 /* Send credit for octeon output queues. credits are always
304 * sent after the output queue is enabled.
306 rte_write32(lio_dev->droq[oq_no]->max_count,
307 lio_dev->droq[oq_no]->pkts_credit_reg);
313 static inline uint32_t
314 lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
316 uint32_t buf_cnt = 0;
318 while (total_len > (buf_size * buf_cnt))
324 /* If we were not able to refill all buffers, try to move around
325 * the buffers that were not dispatched.
327 static inline uint32_t
328 lio_droq_refill_pullup_descs(struct lio_droq *droq,
329 struct lio_droq_desc *desc_ring)
331 uint32_t refill_index = droq->refill_idx;
332 uint32_t desc_refilled = 0;
334 while (refill_index != droq->read_idx) {
335 if (droq->recv_buf_list[refill_index].buffer) {
336 droq->recv_buf_list[droq->refill_idx].buffer =
337 droq->recv_buf_list[refill_index].buffer;
338 desc_ring[droq->refill_idx].buffer_ptr =
339 desc_ring[refill_index].buffer_ptr;
340 droq->recv_buf_list[refill_index].buffer = NULL;
341 desc_ring[refill_index].buffer_ptr = 0;
343 droq->refill_idx = lio_incr_index(
347 droq->refill_count--;
348 } while (droq->recv_buf_list[droq->refill_idx].buffer);
350 refill_index = lio_incr_index(refill_index, 1,
354 return desc_refilled;
359 * @param droq - droq in which descriptors require new buffers.
362 * Called during normal DROQ processing in interrupt mode or by the poll
363 * thread to refill the descriptors from which buffers were dispatched
364 * to upper layers. Attempts to allocate new buffers. If that fails, moves
365 * up buffers (that were not dispatched) to form a contiguous ring.
368 * No of descriptors refilled.
371 * This routine is called with droq->lock held.
374 lio_droq_refill(struct lio_droq *droq)
376 struct lio_droq_desc *desc_ring;
377 uint32_t desc_refilled = 0;
380 desc_ring = droq->desc_ring;
382 while (droq->refill_count && (desc_refilled < droq->max_count)) {
383 /* If a valid buffer exists (happens if there is no dispatch),
384 * reuse the buffer, else allocate.
386 if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
387 buf = rte_pktmbuf_alloc(droq->mpool);
388 /* If a buffer could not be allocated, no point in
392 droq->stats.rx_alloc_failure++;
396 droq->recv_buf_list[droq->refill_idx].buffer = buf;
399 desc_ring[droq->refill_idx].buffer_ptr =
400 lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);
401 /* Reset any previous values in the length field. */
402 droq->info_list[droq->refill_idx].length = 0;
404 droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
407 droq->refill_count--;
410 if (droq->refill_count)
411 desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
413 /* if droq->refill_count
414 * The refill count would not change in pass two. We only moved buffers
415 * to close the gap in the ring, but we would still have the same no. of
418 return desc_refilled;
422 lio_droq_fast_process_packet(struct lio_device *lio_dev,
423 struct lio_droq *droq,
424 struct rte_mbuf **rx_pkts)
426 struct rte_mbuf *nicbuf = NULL;
427 struct lio_droq_info *info;
428 uint32_t total_len = 0;
429 int data_total_len = 0;
430 uint32_t pkt_len = 0;
434 info = &droq->info_list[droq->read_idx];
435 lio_swap_8B_data((uint64_t *)info, 2);
440 /* Len of resp hdr in included in the received data len. */
441 info->length -= OCTEON_RH_SIZE;
444 total_len += (uint32_t)info->length;
446 if (lio_opcode_slow_path(rh)) {
449 buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
450 (uint32_t)info->length);
451 droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
453 droq->refill_count += buf_cnt;
455 if (info->length <= droq->buffer_size) {
456 if (rh->r_dh.has_hash)
457 pkt_len = (uint32_t)(info->length - 8);
459 pkt_len = (uint32_t)info->length;
461 nicbuf = droq->recv_buf_list[droq->read_idx].buffer;
462 droq->recv_buf_list[droq->read_idx].buffer = NULL;
463 droq->read_idx = lio_incr_index(
466 droq->refill_count++;
468 if (likely(nicbuf != NULL)) {
469 /* We don't have a way to pass flags yet */
470 nicbuf->ol_flags = 0;
471 if (rh->r_dh.has_hash) {
474 nicbuf->ol_flags |= PKT_RX_RSS_HASH;
475 hash_ptr = rte_pktmbuf_mtod(nicbuf,
477 lio_swap_8B_data(hash_ptr, 1);
478 nicbuf->hash.rss = (uint32_t)*hash_ptr;
479 nicbuf->data_off += 8;
482 nicbuf->pkt_len = pkt_len;
483 nicbuf->data_len = pkt_len;
484 nicbuf->port = lio_dev->port_id;
486 rx_pkts[data_pkts++] = nicbuf;
487 data_total_len += pkt_len;
490 /* Prefetch buffer pointers when on a cache line
493 if ((droq->read_idx & 3) == 0) {
495 &droq->recv_buf_list[droq->read_idx]);
497 &droq->info_list[droq->read_idx]);
500 struct rte_mbuf *first_buf = NULL;
501 struct rte_mbuf *last_buf = NULL;
503 while (pkt_len < info->length) {
506 cpy_len = ((pkt_len + droq->buffer_size) >
508 ? ((uint32_t)info->length -
513 droq->recv_buf_list[droq->read_idx].buffer;
514 droq->recv_buf_list[droq->read_idx].buffer =
517 if (likely(nicbuf != NULL)) {
518 /* Note the first seg */
522 nicbuf->port = lio_dev->port_id;
523 /* We don't have a way to pass
526 nicbuf->ol_flags = 0;
527 if ((!pkt_len) && (rh->r_dh.has_hash)) {
532 hash_ptr = rte_pktmbuf_mtod(
534 lio_swap_8B_data(hash_ptr, 1);
537 nicbuf->data_off += 8;
538 nicbuf->pkt_len = cpy_len - 8;
539 nicbuf->data_len = cpy_len - 8;
541 nicbuf->pkt_len = cpy_len;
542 nicbuf->data_len = cpy_len;
546 first_buf->nb_segs++;
549 last_buf->next = nicbuf;
553 PMD_RX_LOG(lio_dev, ERR, "no buf\n");
557 droq->read_idx = lio_incr_index(
560 droq->refill_count++;
562 /* Prefetch buffer pointers when on a
563 * cache line boundary
565 if ((droq->read_idx & 3) == 0) {
566 rte_prefetch0(&droq->recv_buf_list
570 &droq->info_list[droq->read_idx]);
573 rx_pkts[data_pkts++] = first_buf;
574 if (rh->r_dh.has_hash)
575 data_total_len += (pkt_len - 8);
577 data_total_len += pkt_len;
580 /* Inform upper layer about packet checksum verification */
581 struct rte_mbuf *m = rx_pkts[data_pkts - 1];
583 if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
584 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
586 if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
587 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
590 if (droq->refill_count >= droq->refill_threshold) {
591 int desc_refilled = lio_droq_refill(droq);
593 /* Flush the droq descriptor data to memory to be sure
594 * that when we update the credits the data in memory is
598 rte_write32(desc_refilled, droq->pkts_credit_reg);
599 /* make sure mmio write completes */
606 droq->stats.pkts_received++;
607 droq->stats.rx_pkts_received += data_pkts;
608 droq->stats.rx_bytes_received += data_total_len;
609 droq->stats.bytes_received += total_len;
615 lio_droq_fast_process_packets(struct lio_device *lio_dev,
616 struct lio_droq *droq,
617 struct rte_mbuf **rx_pkts,
618 uint32_t pkts_to_process)
620 int ret, data_pkts = 0;
623 for (pkt = 0; pkt < pkts_to_process; pkt++) {
624 ret = lio_droq_fast_process_packet(lio_dev, droq,
625 &rx_pkts[data_pkts]);
627 lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
628 lio_dev->port_id, droq->q_no,
629 droq->read_idx, pkts_to_process);
635 rte_atomic64_sub(&droq->pkts_pending, pkt);
640 static inline uint32_t
641 lio_droq_check_hw_for_pkts(struct lio_droq *droq)
646 pkt_count = rte_read32(droq->pkts_sent_reg);
648 last_count = pkt_count - droq->pkt_count;
649 droq->pkt_count = pkt_count;
652 rte_atomic64_add(&droq->pkts_pending, last_count);
658 lio_dev_recv_pkts(void *rx_queue,
659 struct rte_mbuf **rx_pkts,
662 struct lio_droq *droq = rx_queue;
663 struct lio_device *lio_dev = droq->lio_dev;
664 uint32_t pkts_processed = 0;
665 uint32_t pkt_count = 0;
667 lio_droq_check_hw_for_pkts(droq);
669 pkt_count = rte_atomic64_read(&droq->pkts_pending);
673 if (pkt_count > budget)
677 rte_spinlock_lock(&droq->lock);
678 pkts_processed = lio_droq_fast_process_packets(lio_dev,
682 if (droq->pkt_count) {
683 rte_write32(droq->pkt_count, droq->pkts_sent_reg);
687 /* Release the spin lock */
688 rte_spinlock_unlock(&droq->lock);
690 return pkts_processed;
694 lio_delete_droq_queue(struct lio_device *lio_dev,
697 lio_delete_droq(lio_dev, oq_no);
699 rte_free(lio_dev->droq[oq_no]);
700 lio_dev->droq[oq_no] = NULL;
704 * lio_init_instr_queue()
705 * @param lio_dev - pointer to the lio device structure.
706 * @param txpciq - queue to be initialized.
708 * Called at driver init time for each input queue. iq_conf has the
709 * configuration parameters for the queue.
711 * @return Success: 0 Failure: -1
714 lio_init_instr_queue(struct lio_device *lio_dev,
715 union octeon_txpciq txpciq,
716 uint32_t num_descs, unsigned int socket_id)
718 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
719 struct lio_instr_queue *iq;
723 instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
725 q_size = instr_type * num_descs;
726 iq = lio_dev->instr_queue[iq_no];
727 iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
728 "instr_queue", iq_no, q_size,
731 if (iq->iq_mz == NULL) {
732 lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
737 iq->base_addr_dma = iq->iq_mz->phys_addr;
738 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
740 iq->max_count = num_descs;
742 /* Initialize a list to holds requests that have been posted to Octeon
743 * but has yet to be fetched by octeon
745 iq->request_list = rte_zmalloc_socket("request_list",
746 sizeof(*iq->request_list) *
750 if (iq->request_list == NULL) {
751 lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
753 lio_dma_zone_free(lio_dev, iq->iq_mz);
757 lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
758 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
761 iq->lio_dev = lio_dev;
762 iq->txpciq.txpciq64 = txpciq.txpciq64;
764 iq->host_write_index = 0;
765 iq->lio_read_index = 0;
768 rte_atomic64_set(&iq->instr_pending, 0);
770 /* Initialize the spinlock for this instruction queue */
771 rte_spinlock_init(&iq->lock);
772 rte_spinlock_init(&iq->post_lock);
774 rte_atomic64_clear(&iq->iq_flush_running);
776 lio_dev->io_qmask.iq |= (1ULL << iq_no);
778 /* Set the 32B/64B mode for each input queue */
779 lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
780 iq->iqcmd_64B = (instr_type == 64);
782 lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
788 lio_setup_instr_queue0(struct lio_device *lio_dev)
790 union octeon_txpciq txpciq;
791 uint32_t num_descs = 0;
794 num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
796 lio_dev->num_iqs = 0;
798 lio_dev->instr_queue[0] = rte_zmalloc(NULL,
799 sizeof(struct lio_instr_queue), 0);
800 if (lio_dev->instr_queue[0] == NULL)
803 lio_dev->instr_queue[0]->q_index = 0;
804 lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
806 txpciq.s.q_no = iq_no;
807 txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
808 txpciq.s.use_qpg = 0;
810 if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
811 rte_free(lio_dev->instr_queue[0]);
812 lio_dev->instr_queue[0] = NULL;
822 * lio_delete_instr_queue()
823 * @param lio_dev - pointer to the lio device structure.
824 * @param iq_no - queue to be deleted.
826 * Called at driver unload time for each input queue. Deletes all
827 * allocated resources for the input queue.
830 lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
832 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
834 rte_free(iq->request_list);
835 iq->request_list = NULL;
836 lio_dma_zone_free(lio_dev, iq->iq_mz);
840 lio_free_instr_queue0(struct lio_device *lio_dev)
842 lio_delete_instr_queue(lio_dev, 0);
843 rte_free(lio_dev->instr_queue[0]);
844 lio_dev->instr_queue[0] = NULL;
848 /* Return 0 on success, -1 on failure */
850 lio_setup_iq(struct lio_device *lio_dev, int q_index,
851 union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,
852 unsigned int socket_id)
854 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
856 if (lio_dev->instr_queue[iq_no]) {
857 lio_dev_dbg(lio_dev, "IQ is in use. Cannot create the IQ: %d again\n",
859 lio_dev->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
860 lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
864 lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
865 sizeof(struct lio_instr_queue),
866 RTE_CACHE_LINE_SIZE, socket_id);
867 if (lio_dev->instr_queue[iq_no] == NULL)
870 lio_dev->instr_queue[iq_no]->q_index = q_index;
871 lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
873 if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id))
877 if (lio_dev->fn_list.enable_io_queues(lio_dev))
883 lio_delete_instr_queue(lio_dev, iq_no);
886 rte_free(lio_dev->instr_queue[iq_no]);
887 lio_dev->instr_queue[iq_no] = NULL;
893 lio_wait_for_instr_fetch(struct lio_device *lio_dev)
895 int pending, instr_cnt;
901 for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) {
902 if (!(lio_dev->io_qmask.iq & (1ULL << i)))
905 if (lio_dev->instr_queue[i] == NULL)
908 pending = rte_atomic64_read(
909 &lio_dev->instr_queue[i]->instr_pending);
911 lio_flush_iq(lio_dev, lio_dev->instr_queue[i]);
913 instr_cnt += pending;
921 } while (retry-- && instr_cnt);
927 lio_ring_doorbell(struct lio_device *lio_dev,
928 struct lio_instr_queue *iq)
930 if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
931 rte_write32(iq->fill_cnt, iq->doorbell_reg);
932 /* make sure doorbell write goes through */
939 copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
941 uint8_t *iqptr, cmdsize;
943 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
944 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
946 rte_memcpy(iqptr, cmd, cmdsize);
949 static inline struct lio_iq_post_status
950 post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
952 struct lio_iq_post_status st;
954 st.status = LIO_IQ_SEND_OK;
956 /* This ensures that the read index does not wrap around to the same
957 * position if queue gets full before Octeon could fetch any instr.
959 if (rte_atomic64_read(&iq->instr_pending) >=
960 (int32_t)(iq->max_count - 1)) {
961 st.status = LIO_IQ_SEND_FAILED;
966 if (rte_atomic64_read(&iq->instr_pending) >=
967 (int32_t)(iq->max_count - 2))
968 st.status = LIO_IQ_SEND_STOP;
970 copy_cmd_into_iq(iq, cmd);
972 /* "index" is returned, host_write_index is modified. */
973 st.index = iq->host_write_index;
974 iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
978 /* Flush the command into memory. We need to be sure the data is in
979 * memory before indicating that the instruction is pending.
983 rte_atomic64_inc(&iq->instr_pending);
989 lio_add_to_request_list(struct lio_instr_queue *iq,
990 int idx, void *buf, int reqtype)
992 iq->request_list[idx].buf = buf;
993 iq->request_list[idx].reqtype = reqtype;
997 lio_free_netsgbuf(void *buf)
999 struct lio_buf_free_info *finfo = buf;
1000 struct lio_device *lio_dev = finfo->lio_dev;
1001 struct rte_mbuf *m = finfo->mbuf;
1002 struct lio_gather *g = finfo->g;
1003 uint8_t iq = finfo->iq_no;
1005 /* This will take care of multiple segments also */
1006 rte_pktmbuf_free(m);
1008 rte_spinlock_lock(&lio_dev->glist_lock[iq]);
1009 STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries);
1010 rte_spinlock_unlock(&lio_dev->glist_lock[iq]);
1014 /* Can only run in process context */
1016 lio_process_iq_request_list(struct lio_device *lio_dev,
1017 struct lio_instr_queue *iq)
1019 struct octeon_instr_irh *irh = NULL;
1020 uint32_t old = iq->flush_index;
1021 struct lio_soft_command *sc;
1022 uint32_t inst_count = 0;
1026 while (old != iq->lio_read_index) {
1027 reqtype = iq->request_list[old].reqtype;
1028 buf = iq->request_list[old].buf;
1030 if (reqtype == LIO_REQTYPE_NONE)
1034 case LIO_REQTYPE_NORESP_NET:
1035 rte_pktmbuf_free((struct rte_mbuf *)buf);
1037 case LIO_REQTYPE_NORESP_NET_SG:
1038 lio_free_netsgbuf(buf);
1040 case LIO_REQTYPE_SOFT_COMMAND:
1042 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1044 /* We're expecting a response from Octeon.
1045 * It's up to lio_process_ordered_list() to
1046 * process sc. Add sc to the ordered soft
1047 * command response list because we expect
1048 * a response from Octeon.
1050 rte_spinlock_lock(&lio_dev->response_list.lock);
1052 &lio_dev->response_list.pending_req_count);
1054 &lio_dev->response_list.head,
1055 &sc->node, entries);
1056 rte_spinlock_unlock(
1057 &lio_dev->response_list.lock);
1060 /* This callback must not sleep */
1061 sc->callback(LIO_REQUEST_DONE,
1067 lio_dev_err(lio_dev,
1068 "Unknown reqtype: %d buf: %p at idx %d\n",
1072 iq->request_list[old].buf = NULL;
1073 iq->request_list[old].reqtype = 0;
1077 old = lio_incr_index(old, 1, iq->max_count);
1080 iq->flush_index = old;
1086 lio_update_read_index(struct lio_instr_queue *iq)
1088 uint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg);
1091 last_done = pkt_in_done - iq->pkt_in_done;
1092 iq->pkt_in_done = pkt_in_done;
1094 /* Add last_done and modulo with the IQ size to get new index */
1095 iq->lio_read_index = (iq->lio_read_index +
1096 (uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
1101 lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
1103 uint32_t tot_inst_processed = 0;
1104 uint32_t inst_processed = 0;
1107 if (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0)
1110 rte_spinlock_lock(&iq->lock);
1112 lio_update_read_index(iq);
1115 /* Process any outstanding IQ packets. */
1116 if (iq->flush_index == iq->lio_read_index)
1119 inst_processed = lio_process_iq_request_list(lio_dev, iq);
1121 if (inst_processed) {
1122 rte_atomic64_sub(&iq->instr_pending, inst_processed);
1123 iq->stats.instr_processed += inst_processed;
1126 tot_inst_processed += inst_processed;
1131 rte_spinlock_unlock(&iq->lock);
1133 rte_atomic64_clear(&iq->iq_flush_running);
1139 lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
1140 void *buf, uint32_t datasize, uint32_t reqtype)
1142 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
1143 struct lio_iq_post_status st;
1145 rte_spinlock_lock(&iq->post_lock);
1147 st = post_command2(iq, cmd);
1149 if (st.status != LIO_IQ_SEND_FAILED) {
1150 lio_add_to_request_list(iq, st.index, buf, reqtype);
1151 LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent,
1153 LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1);
1155 lio_ring_doorbell(lio_dev, iq);
1157 LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1);
1160 rte_spinlock_unlock(&iq->post_lock);
1166 lio_prepare_soft_command(struct lio_device *lio_dev,
1167 struct lio_soft_command *sc, uint8_t opcode,
1168 uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
1171 struct octeon_instr_pki_ih3 *pki_ih3;
1172 struct octeon_instr_ih3 *ih3;
1173 struct octeon_instr_irh *irh;
1174 struct octeon_instr_rdp *rdp;
1176 RTE_ASSERT(opcode <= 15);
1177 RTE_ASSERT(subcode <= 127);
1179 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1181 ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
1183 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
1188 pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
1191 pki_ih3->tag = LIO_CONTROL;
1192 pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
1193 pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
1198 ih3->dlengsz = sc->datasize;
1200 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1201 irh->opcode = opcode;
1202 irh->subcode = subcode;
1204 /* opcode/subcode specific parameters (ossp) */
1205 irh->ossp = irh_ossp;
1206 sc->cmd.cmd3.ossp[0] = ossp0;
1207 sc->cmd.cmd3.ossp[1] = ossp1;
1209 if (sc->rdatasize) {
1210 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
1211 rdp->pcie_port = lio_dev->pcie_port;
1212 rdp->rlen = sc->rdatasize;
1215 ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3;
1219 ih3->fsz = OCTEON_PCI_CMD_O3;
1224 lio_send_soft_command(struct lio_device *lio_dev,
1225 struct lio_soft_command *sc)
1227 struct octeon_instr_ih3 *ih3;
1228 struct octeon_instr_irh *irh;
1231 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1233 RTE_ASSERT(sc->dmadptr);
1234 sc->cmd.cmd3.dptr = sc->dmadptr;
1237 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1239 RTE_ASSERT(sc->dmarptr);
1240 RTE_ASSERT(sc->status_word != NULL);
1241 *sc->status_word = LIO_COMPLETION_WORD_INIT;
1242 sc->cmd.cmd3.rptr = sc->dmarptr;
1245 len = (uint32_t)ih3->dlengsz;
1248 sc->timeout = lio_uptime + sc->wait_time;
1250 return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
1251 LIO_REQTYPE_SOFT_COMMAND);
1255 lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
1257 char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
1260 buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
1261 snprintf(sc_pool_name, sizeof(sc_pool_name),
1262 "lio_sc_pool_%u", lio_dev->port_id);
1263 lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
1264 LIO_MAX_SOFT_COMMAND_BUFFERS,
1265 0, 0, buf_size, SOCKET_ID_ANY);
1270 lio_free_sc_buffer_pool(struct lio_device *lio_dev)
1272 rte_mempool_free(lio_dev->sc_buf_pool);
1275 struct lio_soft_command *
1276 lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
1277 uint32_t rdatasize, uint32_t ctxsize)
1279 uint32_t offset = sizeof(struct lio_soft_command);
1280 struct lio_soft_command *sc;
1284 RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
1285 LIO_SOFT_COMMAND_BUFFER_SIZE);
1287 m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
1289 lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
1293 /* set rte_mbuf data size and there is only 1 segment */
1294 m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1295 m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1297 /* use rte_mbuf buffer for soft command */
1298 sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
1299 memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
1300 sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
1301 sc->dma_addr = rte_mbuf_data_dma_addr(m);
1304 dma_addr = sc->dma_addr;
1307 sc->ctxptr = (uint8_t *)sc + offset;
1308 sc->ctxsize = ctxsize;
1311 /* Start data at 128 byte boundary */
1312 offset = (offset + ctxsize + 127) & 0xffffff80;
1315 sc->virtdptr = (uint8_t *)sc + offset;
1316 sc->dmadptr = dma_addr + offset;
1317 sc->datasize = datasize;
1320 /* Start rdata at 128 byte boundary */
1321 offset = (offset + datasize + 127) & 0xffffff80;
1324 RTE_ASSERT(rdatasize >= 16);
1325 sc->virtrptr = (uint8_t *)sc + offset;
1326 sc->dmarptr = dma_addr + offset;
1327 sc->rdatasize = rdatasize;
1328 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
1336 lio_free_soft_command(struct lio_soft_command *sc)
1338 rte_pktmbuf_free(sc->mbuf);
1342 lio_setup_response_list(struct lio_device *lio_dev)
1344 STAILQ_INIT(&lio_dev->response_list.head);
1345 rte_spinlock_init(&lio_dev->response_list.lock);
1346 rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
1350 lio_process_ordered_list(struct lio_device *lio_dev)
1352 int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
1353 struct lio_response_list *ordered_sc_list;
1354 struct lio_soft_command *sc;
1355 int request_complete = 0;
1359 ordered_sc_list = &lio_dev->response_list;
1362 rte_spinlock_lock(&ordered_sc_list->lock);
1364 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
1365 /* ordered_sc_list is empty; there is
1366 * nothing to process
1368 rte_spinlock_unlock(&ordered_sc_list->lock);
1372 sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
1373 struct lio_soft_command, node);
1375 status = LIO_REQUEST_PENDING;
1377 /* check if octeon has finished DMA'ing a response
1378 * to where rptr is pointing to
1380 status64 = *sc->status_word;
1382 if (status64 != LIO_COMPLETION_WORD_INIT) {
1383 /* This logic ensures that all 64b have been written.
1384 * 1. check byte 0 for non-FF
1385 * 2. if non-FF, then swap result from BE to host order
1386 * 3. check byte 7 (swapped to 0) for non-FF
1387 * 4. if non-FF, use the low 32-bit status code
1388 * 5. if either byte 0 or byte 7 is FF, don't use status
1390 if ((status64 & 0xff) != 0xff) {
1391 lio_swap_8B_data(&status64, 1);
1392 if (((status64 & 0xff) != 0xff)) {
1393 /* retrieve 16-bit firmware status */
1394 status = (uint32_t)(status64 &
1398 LIO_FIRMWARE_STATUS_CODE(
1402 status = LIO_REQUEST_DONE;
1406 } else if ((sc->timeout && lio_check_timeout(lio_uptime,
1408 lio_dev_err(lio_dev,
1409 "cmd failed, timeout (%ld, %ld)\n",
1410 (long)lio_uptime, (long)sc->timeout);
1411 status = LIO_REQUEST_TIMEOUT;
1414 if (status != LIO_REQUEST_PENDING) {
1415 /* we have received a response or we have timed out.
1416 * remove node from linked list
1418 STAILQ_REMOVE(&ordered_sc_list->head,
1419 &sc->node, lio_stailq_node, entries);
1421 &lio_dev->response_list.pending_req_count);
1422 rte_spinlock_unlock(&ordered_sc_list->lock);
1425 sc->callback(status, sc->callback_arg);
1429 /* no response yet */
1430 request_complete = 0;
1431 rte_spinlock_unlock(&ordered_sc_list->lock);
1434 /* If we hit the Max Ordered requests to process every loop,
1435 * we quit and let this function be invoked the next time
1436 * the poll thread runs to process the remaining requests.
1437 * This function can take up the entire CPU if there is
1438 * no upper limit to the requests processed.
1440 if (request_complete >= resp_to_process)
1442 } while (request_complete);
1447 static inline struct lio_stailq_node *
1448 list_delete_first_node(struct lio_stailq_head *head)
1450 struct lio_stailq_node *node;
1452 if (STAILQ_EMPTY(head))
1455 node = STAILQ_FIRST(head);
1458 STAILQ_REMOVE(head, node, lio_stailq_node, entries);
1464 lio_delete_sglist(struct lio_instr_queue *txq)
1466 struct lio_device *lio_dev = txq->lio_dev;
1467 int iq_no = txq->q_index;
1468 struct lio_gather *g;
1470 if (lio_dev->glist_head == NULL)
1474 g = (struct lio_gather *)list_delete_first_node(
1475 &lio_dev->glist_head[iq_no]);
1479 (void *)((unsigned long)g->sg - g->adjust));
1486 * \brief Setup gather lists
1487 * @param lio per-network private data
1490 lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
1491 int fw_mapped_iq, int num_descs, unsigned int socket_id)
1493 struct lio_gather *g;
1496 rte_spinlock_init(&lio_dev->glist_lock[iq_no]);
1498 STAILQ_INIT(&lio_dev->glist_head[iq_no]);
1500 for (i = 0; i < num_descs; i++) {
1501 g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,
1504 lio_dev_err(lio_dev,
1505 "lio_gather memory allocation failed for qno %d\n",
1511 ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);
1513 g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,
1514 RTE_CACHE_LINE_SIZE, socket_id);
1515 if (g->sg == NULL) {
1516 lio_dev_err(lio_dev,
1517 "sg list memory allocation failed for qno %d\n",
1523 /* The gather component should be aligned on 64-bit boundary */
1524 if (((unsigned long)g->sg) & 7) {
1525 g->adjust = 8 - (((unsigned long)g->sg) & 7);
1527 (struct lio_sg_entry *)((unsigned long)g->sg +
1531 STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,
1535 if (i != num_descs) {
1536 lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);
1544 lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
1546 lio_delete_instr_queue(lio_dev, iq_no);
1547 rte_free(lio_dev->instr_queue[iq_no]);
1548 lio_dev->instr_queue[iq_no] = NULL;
1552 static inline uint32_t
1553 lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
1555 return ((lio_dev->instr_queue[q_no]->max_count - 1) -
1556 (uint32_t)rte_atomic64_read(
1557 &lio_dev->instr_queue[q_no]->instr_pending));
1561 lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
1563 return ((uint32_t)rte_atomic64_read(
1564 &lio_dev->instr_queue[q_no]->instr_pending) >=
1565 (lio_dev->instr_queue[q_no]->max_count - 2));
1569 lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no)
1571 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
1572 uint32_t count = 10000;
1574 while ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) &&
1576 lio_flush_iq(lio_dev, iq);
1578 return count ? 0 : 1;
1582 lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr)
1584 struct lio_soft_command *sc = sc_ptr;
1585 struct lio_dev_ctrl_cmd *ctrl_cmd;
1586 struct lio_ctrl_pkt *ctrl_pkt;
1588 ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr;
1589 ctrl_cmd = ctrl_pkt->ctrl_cmd;
1592 lio_free_soft_command(sc);
1595 static inline struct lio_soft_command *
1596 lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev,
1597 struct lio_ctrl_pkt *ctrl_pkt)
1599 struct lio_soft_command *sc = NULL;
1600 uint32_t uddsize, datasize;
1604 uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8);
1606 datasize = OCTEON_CMD_SIZE + uddsize;
1607 rdatasize = (ctrl_pkt->wait_time) ? 16 : 0;
1609 sc = lio_alloc_soft_command(lio_dev, datasize,
1610 rdatasize, sizeof(struct lio_ctrl_pkt));
1614 rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt));
1616 data = (uint8_t *)sc->virtdptr;
1618 rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE);
1620 lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3);
1623 /* Endian-Swap for UDD should have been done by caller. */
1624 rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize);
1627 sc->iq_no = (uint32_t)ctrl_pkt->iq_no;
1629 lio_prepare_soft_command(lio_dev, sc,
1630 LIO_OPCODE, LIO_OPCODE_CMD,
1633 sc->callback = lio_ctrl_cmd_callback;
1634 sc->callback_arg = sc;
1635 sc->wait_time = ctrl_pkt->wait_time;
1641 lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt)
1643 struct lio_soft_command *sc = NULL;
1646 sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt);
1648 lio_dev_err(lio_dev, "soft command allocation failed\n");
1652 retval = lio_send_soft_command(lio_dev, sc);
1653 if (retval == LIO_IQ_SEND_FAILED) {
1654 lio_free_soft_command(sc);
1655 lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n",
1656 lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval);
1663 /** Send data packet to the device
1664 * @param lio_dev - lio device pointer
1665 * @param ndata - control structure with queueing, and buffer information
1667 * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
1668 * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
1671 lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata)
1673 return lio_send_command(lio_dev, ndata->q_no, &ndata->cmd,
1674 ndata->buf, ndata->datasize, ndata->reqtype);
1678 lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
1680 struct lio_instr_queue *txq = tx_queue;
1681 union lio_cmd_setup cmdsetup;
1682 struct lio_device *lio_dev;
1683 struct lio_iq_stats *stats;
1684 struct lio_data_pkt ndata;
1685 int i, processed = 0;
1691 lio_dev = txq->lio_dev;
1692 iq_no = txq->txpciq.s.q_no;
1693 stats = &lio_dev->instr_queue[iq_no]->stats;
1695 if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) {
1696 PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n",
1697 lio_dev->linfo.link.s.link_up);
1701 lio_dev_cleanup_iq(lio_dev, iq_no);
1703 for (i = 0; i < nb_pkts; i++) {
1704 uint32_t pkt_len = 0;
1708 /* Prepare the attributes for the data to be passed to BASE. */
1709 memset(&ndata, 0, sizeof(struct lio_data_pkt));
1714 if (lio_iq_is_full(lio_dev, ndata.q_no)) {
1715 stats->tx_iq_busy++;
1716 if (lio_dev_cleanup_iq(lio_dev, iq_no)) {
1717 PMD_TX_LOG(lio_dev, ERR,
1718 "Transmit failed iq:%d full\n",
1724 cmdsetup.cmd_setup64 = 0;
1725 cmdsetup.s.iq_no = iq_no;
1727 /* check checksum offload flags to form cmd */
1728 if (m->ol_flags & PKT_TX_IP_CKSUM)
1729 cmdsetup.s.ip_csum = 1;
1731 if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM)
1732 cmdsetup.s.tnl_csum = 1;
1733 else if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
1734 (m->ol_flags & PKT_TX_UDP_CKSUM))
1735 cmdsetup.s.transport_csum = 1;
1737 if (m->nb_segs == 1) {
1738 pkt_len = rte_pktmbuf_data_len(m);
1739 cmdsetup.s.u.datasize = pkt_len;
1740 lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
1742 ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
1743 ndata.reqtype = LIO_REQTYPE_NORESP_NET;
1745 struct lio_buf_free_info *finfo;
1746 struct lio_gather *g;
1747 phys_addr_t phyaddr;
1750 finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
1752 if (finfo == NULL) {
1753 PMD_TX_LOG(lio_dev, ERR,
1754 "free buffer alloc failed\n");
1758 rte_spinlock_lock(&lio_dev->glist_lock[iq_no]);
1759 g = (struct lio_gather *)list_delete_first_node(
1760 &lio_dev->glist_head[iq_no]);
1761 rte_spinlock_unlock(&lio_dev->glist_lock[iq_no]);
1763 PMD_TX_LOG(lio_dev, ERR,
1764 "Transmit scatter gather: glist null!\n");
1768 cmdsetup.s.gather = 1;
1769 cmdsetup.s.u.gatherptrs = m->nb_segs;
1770 lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
1773 memset(g->sg, 0, g->sg_size);
1774 g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
1775 lio_add_sg_size(&g->sg[0], m->data_len, 0);
1776 pkt_len = m->data_len;
1779 /* First seg taken care above */
1780 frags = m->nb_segs - 1;
1784 g->sg[(i >> 2)].ptr[(i & 3)] =
1785 rte_mbuf_data_dma_addr(m);
1786 lio_add_sg_size(&g->sg[(i >> 2)],
1787 m->data_len, (i & 3));
1788 pkt_len += m->data_len;
1793 phyaddr = rte_mem_virt2iova(g->sg);
1794 if (phyaddr == RTE_BAD_PHYS_ADDR) {
1795 PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
1799 ndata.cmd.cmd3.dptr = phyaddr;
1800 ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
1803 finfo->lio_dev = lio_dev;
1804 finfo->iq_no = (uint64_t)iq_no;
1808 ndata.datasize = pkt_len;
1810 status = lio_send_data_pkt(lio_dev, &ndata);
1812 if (unlikely(status == LIO_IQ_SEND_FAILED)) {
1813 PMD_TX_LOG(lio_dev, ERR, "send failed\n");
1817 if (unlikely(status == LIO_IQ_SEND_STOP)) {
1818 PMD_TX_LOG(lio_dev, DEBUG, "iq full\n");
1819 /* create space as iq is full */
1820 lio_dev_cleanup_iq(lio_dev, iq_no);
1824 stats->tx_tot_bytes += pkt_len;
1829 stats->tx_dropped += (nb_pkts - processed);
1835 lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
1837 struct lio_instr_queue *txq;
1838 struct lio_droq *rxq;
1841 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1842 txq = eth_dev->data->tx_queues[i];
1844 lio_dev_tx_queue_release(txq);
1845 eth_dev->data->tx_queues[i] = NULL;
1849 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1850 rxq = eth_dev->data->rx_queues[i];
1852 lio_dev_rx_queue_release(rxq);
1853 eth_dev->data->rx_queues[i] = NULL;