1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <ethdev_driver.h>
6 #include <rte_cycles.h>
7 #include <rte_malloc.h>
10 #include "lio_struct.h"
11 #include "lio_ethdev.h"
15 /* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
16 #define LIO_FLUSH_WM(_iq) ((_iq)->nb_desc / 2)
17 #define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
20 lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
25 count += droq->buffer_size;
26 } while (count < LIO_MAX_RX_PKTLEN);
30 lio_droq_reset_indices(struct lio_droq *droq)
35 droq->refill_count = 0;
36 rte_atomic64_set(&droq->pkts_pending, 0);
40 lio_droq_destroy_ring_buffers(struct lio_droq *droq)
44 for (i = 0; i < droq->nb_desc; i++) {
45 if (droq->recv_buf_list[i].buffer) {
46 rte_pktmbuf_free((struct rte_mbuf *)
47 droq->recv_buf_list[i].buffer);
48 droq->recv_buf_list[i].buffer = NULL;
52 lio_droq_reset_indices(droq);
56 lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
57 struct lio_droq *droq)
59 struct lio_droq_desc *desc_ring = droq->desc_ring;
63 for (i = 0; i < droq->nb_desc; i++) {
64 buf = rte_pktmbuf_alloc(droq->mpool);
66 lio_dev_err(lio_dev, "buffer alloc failed\n");
67 droq->stats.rx_alloc_failure++;
68 lio_droq_destroy_ring_buffers(droq);
72 droq->recv_buf_list[i].buffer = buf;
73 droq->info_list[i].length = 0;
75 /* map ring buffers into memory */
76 desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
77 desc_ring[i].buffer_ptr =
78 lio_map_ring(droq->recv_buf_list[i].buffer);
81 lio_droq_reset_indices(droq);
83 lio_droq_compute_max_packet_bufs(droq);
89 lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
91 const struct rte_memzone *mz_tmp;
95 lio_dev_err(lio_dev, "Memzone NULL\n");
99 mz_tmp = rte_memzone_lookup(mz->name);
100 if (mz_tmp == NULL) {
101 lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
105 ret = rte_memzone_free(mz);
107 lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
111 * Frees the space for descriptor ring for the droq.
113 * @param lio_dev - pointer to the lio device structure
114 * @param q_no - droq no.
117 lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
119 struct lio_droq *droq = lio_dev->droq[q_no];
121 lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
123 lio_droq_destroy_ring_buffers(droq);
124 rte_free(droq->recv_buf_list);
125 droq->recv_buf_list = NULL;
126 lio_dma_zone_free(lio_dev, droq->info_mz);
127 lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
129 memset(droq, 0, LIO_DROQ_SIZE);
133 lio_alloc_info_buffer(struct lio_device *lio_dev,
134 struct lio_droq *droq, unsigned int socket_id)
136 droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
137 "info_list", droq->q_no,
143 if (droq->info_mz == NULL)
146 droq->info_list_dma = droq->info_mz->iova;
147 droq->info_alloc_size = droq->info_mz->len;
148 droq->info_base_addr = (size_t)droq->info_mz->addr;
150 return droq->info_mz->addr;
154 * Allocates space for the descriptor ring for the droq and
155 * sets the base addr, num desc etc in Octeon registers.
157 * @param lio_dev - pointer to the lio device structure
158 * @param q_no - droq no.
159 * @param app_ctx - pointer to application context
160 * @return Success: 0 Failure: -1
163 lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
164 uint32_t num_descs, uint32_t desc_size,
165 struct rte_mempool *mpool, unsigned int socket_id)
167 uint32_t c_refill_threshold;
168 uint32_t desc_ring_size;
169 struct lio_droq *droq;
171 lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
173 droq = lio_dev->droq[q_no];
174 droq->lio_dev = lio_dev;
178 c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
180 droq->nb_desc = num_descs;
181 droq->buffer_size = desc_size;
183 desc_ring_size = droq->nb_desc * LIO_DROQ_DESC_SIZE;
184 droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
190 if (droq->desc_ring_mz == NULL) {
192 "Output queue %d ring alloc failed\n", q_no);
196 droq->desc_ring_dma = droq->desc_ring_mz->iova;
197 droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
199 lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
200 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
201 lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
204 droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
205 if (droq->info_list == NULL) {
206 lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
210 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
212 LIO_DROQ_RECVBUF_SIZE),
215 if (droq->recv_buf_list == NULL) {
217 "Output queue recv buf list alloc failed\n");
221 if (lio_droq_setup_ring_buffers(lio_dev, droq))
224 droq->refill_threshold = c_refill_threshold;
226 rte_spinlock_init(&droq->lock);
228 lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
230 lio_dev->io_qmask.oq |= (1ULL << q_no);
235 lio_delete_droq(lio_dev, q_no);
241 lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
242 int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
244 struct lio_droq *droq;
246 PMD_INIT_FUNC_TRACE();
248 /* Allocate the DS for the new droq. */
249 droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
250 RTE_CACHE_LINE_SIZE, socket_id);
254 lio_dev->droq[oq_no] = droq;
256 /* Initialize the Droq */
257 if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
259 lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
260 rte_free(lio_dev->droq[oq_no]);
261 lio_dev->droq[oq_no] = NULL;
267 lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
269 /* Send credit for octeon output queues. credits are always
270 * sent after the output queue is enabled.
272 rte_write32(lio_dev->droq[oq_no]->nb_desc,
273 lio_dev->droq[oq_no]->pkts_credit_reg);
279 static inline uint32_t
280 lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
282 uint32_t buf_cnt = 0;
284 while (total_len > (buf_size * buf_cnt))
290 /* If we were not able to refill all buffers, try to move around
291 * the buffers that were not dispatched.
293 static inline uint32_t
294 lio_droq_refill_pullup_descs(struct lio_droq *droq,
295 struct lio_droq_desc *desc_ring)
297 uint32_t refill_index = droq->refill_idx;
298 uint32_t desc_refilled = 0;
300 while (refill_index != droq->read_idx) {
301 if (droq->recv_buf_list[refill_index].buffer) {
302 droq->recv_buf_list[droq->refill_idx].buffer =
303 droq->recv_buf_list[refill_index].buffer;
304 desc_ring[droq->refill_idx].buffer_ptr =
305 desc_ring[refill_index].buffer_ptr;
306 droq->recv_buf_list[refill_index].buffer = NULL;
307 desc_ring[refill_index].buffer_ptr = 0;
309 droq->refill_idx = lio_incr_index(
313 droq->refill_count--;
314 } while (droq->recv_buf_list[droq->refill_idx].buffer);
316 refill_index = lio_incr_index(refill_index, 1,
320 return desc_refilled;
325 * @param droq - droq in which descriptors require new buffers.
328 * Called during normal DROQ processing in interrupt mode or by the poll
329 * thread to refill the descriptors from which buffers were dispatched
330 * to upper layers. Attempts to allocate new buffers. If that fails, moves
331 * up buffers (that were not dispatched) to form a contiguous ring.
334 * No of descriptors refilled.
337 * This routine is called with droq->lock held.
340 lio_droq_refill(struct lio_droq *droq)
342 struct lio_droq_desc *desc_ring;
343 uint32_t desc_refilled = 0;
346 desc_ring = droq->desc_ring;
348 while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
349 /* If a valid buffer exists (happens if there is no dispatch),
350 * reuse the buffer, else allocate.
352 if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
353 buf = rte_pktmbuf_alloc(droq->mpool);
354 /* If a buffer could not be allocated, no point in
358 droq->stats.rx_alloc_failure++;
362 droq->recv_buf_list[droq->refill_idx].buffer = buf;
365 desc_ring[droq->refill_idx].buffer_ptr =
366 lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);
367 /* Reset any previous values in the length field. */
368 droq->info_list[droq->refill_idx].length = 0;
370 droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
373 droq->refill_count--;
376 if (droq->refill_count)
377 desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
379 /* if droq->refill_count
380 * The refill count would not change in pass two. We only moved buffers
381 * to close the gap in the ring, but we would still have the same no. of
384 return desc_refilled;
388 lio_droq_fast_process_packet(struct lio_device *lio_dev,
389 struct lio_droq *droq,
390 struct rte_mbuf **rx_pkts)
392 struct rte_mbuf *nicbuf = NULL;
393 struct lio_droq_info *info;
394 uint32_t total_len = 0;
395 int data_total_len = 0;
396 uint32_t pkt_len = 0;
400 info = &droq->info_list[droq->read_idx];
401 lio_swap_8B_data((uint64_t *)info, 2);
406 /* Len of resp hdr in included in the received data len. */
407 info->length -= OCTEON_RH_SIZE;
410 total_len += (uint32_t)info->length;
412 if (lio_opcode_slow_path(rh)) {
415 buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
416 (uint32_t)info->length);
417 droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
419 droq->refill_count += buf_cnt;
421 if (info->length <= droq->buffer_size) {
422 if (rh->r_dh.has_hash)
423 pkt_len = (uint32_t)(info->length - 8);
425 pkt_len = (uint32_t)info->length;
427 nicbuf = droq->recv_buf_list[droq->read_idx].buffer;
428 droq->recv_buf_list[droq->read_idx].buffer = NULL;
429 droq->read_idx = lio_incr_index(
432 droq->refill_count++;
434 if (likely(nicbuf != NULL)) {
435 /* We don't have a way to pass flags yet */
436 nicbuf->ol_flags = 0;
437 if (rh->r_dh.has_hash) {
440 nicbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
441 hash_ptr = rte_pktmbuf_mtod(nicbuf,
443 lio_swap_8B_data(hash_ptr, 1);
444 nicbuf->hash.rss = (uint32_t)*hash_ptr;
445 nicbuf->data_off += 8;
448 nicbuf->pkt_len = pkt_len;
449 nicbuf->data_len = pkt_len;
450 nicbuf->port = lio_dev->port_id;
452 rx_pkts[data_pkts++] = nicbuf;
453 data_total_len += pkt_len;
456 /* Prefetch buffer pointers when on a cache line
459 if ((droq->read_idx & 3) == 0) {
461 &droq->recv_buf_list[droq->read_idx]);
463 &droq->info_list[droq->read_idx]);
466 struct rte_mbuf *first_buf = NULL;
467 struct rte_mbuf *last_buf = NULL;
469 while (pkt_len < info->length) {
472 cpy_len = ((pkt_len + droq->buffer_size) >
474 ? ((uint32_t)info->length -
479 droq->recv_buf_list[droq->read_idx].buffer;
480 droq->recv_buf_list[droq->read_idx].buffer =
483 if (likely(nicbuf != NULL)) {
484 /* Note the first seg */
488 nicbuf->port = lio_dev->port_id;
489 /* We don't have a way to pass
492 nicbuf->ol_flags = 0;
493 if ((!pkt_len) && (rh->r_dh.has_hash)) {
497 RTE_MBUF_F_RX_RSS_HASH;
498 hash_ptr = rte_pktmbuf_mtod(
500 lio_swap_8B_data(hash_ptr, 1);
503 nicbuf->data_off += 8;
504 nicbuf->pkt_len = cpy_len - 8;
505 nicbuf->data_len = cpy_len - 8;
507 nicbuf->pkt_len = cpy_len;
508 nicbuf->data_len = cpy_len;
512 first_buf->nb_segs++;
515 last_buf->next = nicbuf;
519 PMD_RX_LOG(lio_dev, ERR, "no buf\n");
523 droq->read_idx = lio_incr_index(
526 droq->refill_count++;
528 /* Prefetch buffer pointers when on a
529 * cache line boundary
531 if ((droq->read_idx & 3) == 0) {
532 rte_prefetch0(&droq->recv_buf_list
536 &droq->info_list[droq->read_idx]);
539 rx_pkts[data_pkts++] = first_buf;
540 if (rh->r_dh.has_hash)
541 data_total_len += (pkt_len - 8);
543 data_total_len += pkt_len;
546 /* Inform upper layer about packet checksum verification */
547 struct rte_mbuf *m = rx_pkts[data_pkts - 1];
549 if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
550 m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
552 if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
553 m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
556 if (droq->refill_count >= droq->refill_threshold) {
557 int desc_refilled = lio_droq_refill(droq);
559 /* Flush the droq descriptor data to memory to be sure
560 * that when we update the credits the data in memory is
564 rte_write32(desc_refilled, droq->pkts_credit_reg);
565 /* make sure mmio write completes */
572 droq->stats.pkts_received++;
573 droq->stats.rx_pkts_received += data_pkts;
574 droq->stats.rx_bytes_received += data_total_len;
575 droq->stats.bytes_received += total_len;
581 lio_droq_fast_process_packets(struct lio_device *lio_dev,
582 struct lio_droq *droq,
583 struct rte_mbuf **rx_pkts,
584 uint32_t pkts_to_process)
586 int ret, data_pkts = 0;
589 for (pkt = 0; pkt < pkts_to_process; pkt++) {
590 ret = lio_droq_fast_process_packet(lio_dev, droq,
591 &rx_pkts[data_pkts]);
593 lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
594 lio_dev->port_id, droq->q_no,
595 droq->read_idx, pkts_to_process);
601 rte_atomic64_sub(&droq->pkts_pending, pkt);
606 static inline uint32_t
607 lio_droq_check_hw_for_pkts(struct lio_droq *droq)
612 pkt_count = rte_read32(droq->pkts_sent_reg);
614 last_count = pkt_count - droq->pkt_count;
615 droq->pkt_count = pkt_count;
618 rte_atomic64_add(&droq->pkts_pending, last_count);
624 lio_dev_recv_pkts(void *rx_queue,
625 struct rte_mbuf **rx_pkts,
628 struct lio_droq *droq = rx_queue;
629 struct lio_device *lio_dev = droq->lio_dev;
630 uint32_t pkts_processed = 0;
631 uint32_t pkt_count = 0;
633 lio_droq_check_hw_for_pkts(droq);
635 pkt_count = rte_atomic64_read(&droq->pkts_pending);
639 if (pkt_count > budget)
643 rte_spinlock_lock(&droq->lock);
644 pkts_processed = lio_droq_fast_process_packets(lio_dev,
648 if (droq->pkt_count) {
649 rte_write32(droq->pkt_count, droq->pkts_sent_reg);
653 /* Release the spin lock */
654 rte_spinlock_unlock(&droq->lock);
656 return pkts_processed;
660 lio_delete_droq_queue(struct lio_device *lio_dev,
663 lio_delete_droq(lio_dev, oq_no);
665 rte_free(lio_dev->droq[oq_no]);
666 lio_dev->droq[oq_no] = NULL;
670 * lio_init_instr_queue()
671 * @param lio_dev - pointer to the lio device structure.
672 * @param txpciq - queue to be initialized.
674 * Called at driver init time for each input queue. iq_conf has the
675 * configuration parameters for the queue.
677 * @return Success: 0 Failure: -1
680 lio_init_instr_queue(struct lio_device *lio_dev,
681 union octeon_txpciq txpciq,
682 uint32_t num_descs, unsigned int socket_id)
684 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
685 struct lio_instr_queue *iq;
689 instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
691 q_size = instr_type * num_descs;
692 iq = lio_dev->instr_queue[iq_no];
693 iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
694 "instr_queue", iq_no, q_size,
697 if (iq->iq_mz == NULL) {
698 lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
703 iq->base_addr_dma = iq->iq_mz->iova;
704 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
706 iq->nb_desc = num_descs;
708 /* Initialize a list to holds requests that have been posted to Octeon
709 * but has yet to be fetched by octeon
711 iq->request_list = rte_zmalloc_socket("request_list",
712 sizeof(*iq->request_list) *
716 if (iq->request_list == NULL) {
717 lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
719 lio_dma_zone_free(lio_dev, iq->iq_mz);
723 lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
724 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
727 iq->lio_dev = lio_dev;
728 iq->txpciq.txpciq64 = txpciq.txpciq64;
730 iq->host_write_index = 0;
731 iq->lio_read_index = 0;
734 rte_atomic64_set(&iq->instr_pending, 0);
736 /* Initialize the spinlock for this instruction queue */
737 rte_spinlock_init(&iq->lock);
738 rte_spinlock_init(&iq->post_lock);
740 rte_atomic64_clear(&iq->iq_flush_running);
742 lio_dev->io_qmask.iq |= (1ULL << iq_no);
744 /* Set the 32B/64B mode for each input queue */
745 lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
746 iq->iqcmd_64B = (instr_type == 64);
748 lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
754 lio_setup_instr_queue0(struct lio_device *lio_dev)
756 union octeon_txpciq txpciq;
757 uint32_t num_descs = 0;
760 num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
762 lio_dev->num_iqs = 0;
764 lio_dev->instr_queue[0] = rte_zmalloc(NULL,
765 sizeof(struct lio_instr_queue), 0);
766 if (lio_dev->instr_queue[0] == NULL)
769 lio_dev->instr_queue[0]->q_index = 0;
770 lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
772 txpciq.s.q_no = iq_no;
773 txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
774 txpciq.s.use_qpg = 0;
776 if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
777 rte_free(lio_dev->instr_queue[0]);
778 lio_dev->instr_queue[0] = NULL;
788 * lio_delete_instr_queue()
789 * @param lio_dev - pointer to the lio device structure.
790 * @param iq_no - queue to be deleted.
792 * Called at driver unload time for each input queue. Deletes all
793 * allocated resources for the input queue.
796 lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
798 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
800 rte_free(iq->request_list);
801 iq->request_list = NULL;
802 lio_dma_zone_free(lio_dev, iq->iq_mz);
806 lio_free_instr_queue0(struct lio_device *lio_dev)
808 lio_delete_instr_queue(lio_dev, 0);
809 rte_free(lio_dev->instr_queue[0]);
810 lio_dev->instr_queue[0] = NULL;
814 /* Return 0 on success, -1 on failure */
816 lio_setup_iq(struct lio_device *lio_dev, int q_index,
817 union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,
818 unsigned int socket_id)
820 uint32_t iq_no = (uint32_t)txpciq.s.q_no;
822 lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
823 sizeof(struct lio_instr_queue),
824 RTE_CACHE_LINE_SIZE, socket_id);
825 if (lio_dev->instr_queue[iq_no] == NULL)
828 lio_dev->instr_queue[iq_no]->q_index = q_index;
829 lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
831 if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) {
832 rte_free(lio_dev->instr_queue[iq_no]);
833 lio_dev->instr_queue[iq_no] = NULL;
843 lio_wait_for_instr_fetch(struct lio_device *lio_dev)
845 int pending, instr_cnt;
851 for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) {
852 if (!(lio_dev->io_qmask.iq & (1ULL << i)))
855 if (lio_dev->instr_queue[i] == NULL)
858 pending = rte_atomic64_read(
859 &lio_dev->instr_queue[i]->instr_pending);
861 lio_flush_iq(lio_dev, lio_dev->instr_queue[i]);
863 instr_cnt += pending;
871 } while (retry-- && instr_cnt);
877 lio_ring_doorbell(struct lio_device *lio_dev,
878 struct lio_instr_queue *iq)
880 if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
881 rte_write32(iq->fill_cnt, iq->doorbell_reg);
882 /* make sure doorbell write goes through */
889 copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
891 uint8_t *iqptr, cmdsize;
893 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
894 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
896 rte_memcpy(iqptr, cmd, cmdsize);
899 static inline struct lio_iq_post_status
900 post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
902 struct lio_iq_post_status st;
904 st.status = LIO_IQ_SEND_OK;
906 /* This ensures that the read index does not wrap around to the same
907 * position if queue gets full before Octeon could fetch any instr.
909 if (rte_atomic64_read(&iq->instr_pending) >=
910 (int32_t)(iq->nb_desc - 1)) {
911 st.status = LIO_IQ_SEND_FAILED;
916 if (rte_atomic64_read(&iq->instr_pending) >=
917 (int32_t)(iq->nb_desc - 2))
918 st.status = LIO_IQ_SEND_STOP;
920 copy_cmd_into_iq(iq, cmd);
922 /* "index" is returned, host_write_index is modified. */
923 st.index = iq->host_write_index;
924 iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
928 /* Flush the command into memory. We need to be sure the data is in
929 * memory before indicating that the instruction is pending.
933 rte_atomic64_inc(&iq->instr_pending);
939 lio_add_to_request_list(struct lio_instr_queue *iq,
940 int idx, void *buf, int reqtype)
942 iq->request_list[idx].buf = buf;
943 iq->request_list[idx].reqtype = reqtype;
947 lio_free_netsgbuf(void *buf)
949 struct lio_buf_free_info *finfo = buf;
950 struct lio_device *lio_dev = finfo->lio_dev;
951 struct rte_mbuf *m = finfo->mbuf;
952 struct lio_gather *g = finfo->g;
953 uint8_t iq = finfo->iq_no;
955 /* This will take care of multiple segments also */
958 rte_spinlock_lock(&lio_dev->glist_lock[iq]);
959 STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries);
960 rte_spinlock_unlock(&lio_dev->glist_lock[iq]);
964 /* Can only run in process context */
966 lio_process_iq_request_list(struct lio_device *lio_dev,
967 struct lio_instr_queue *iq)
969 struct octeon_instr_irh *irh = NULL;
970 uint32_t old = iq->flush_index;
971 struct lio_soft_command *sc;
972 uint32_t inst_count = 0;
976 while (old != iq->lio_read_index) {
977 reqtype = iq->request_list[old].reqtype;
978 buf = iq->request_list[old].buf;
980 if (reqtype == LIO_REQTYPE_NONE)
984 case LIO_REQTYPE_NORESP_NET:
985 rte_pktmbuf_free((struct rte_mbuf *)buf);
987 case LIO_REQTYPE_NORESP_NET_SG:
988 lio_free_netsgbuf(buf);
990 case LIO_REQTYPE_SOFT_COMMAND:
992 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
994 /* We're expecting a response from Octeon.
995 * It's up to lio_process_ordered_list() to
996 * process sc. Add sc to the ordered soft
997 * command response list because we expect
998 * a response from Octeon.
1000 rte_spinlock_lock(&lio_dev->response_list.lock);
1002 &lio_dev->response_list.pending_req_count);
1004 &lio_dev->response_list.head,
1005 &sc->node, entries);
1006 rte_spinlock_unlock(
1007 &lio_dev->response_list.lock);
1010 /* This callback must not sleep */
1011 sc->callback(LIO_REQUEST_DONE,
1017 lio_dev_err(lio_dev,
1018 "Unknown reqtype: %d buf: %p at idx %d\n",
1022 iq->request_list[old].buf = NULL;
1023 iq->request_list[old].reqtype = 0;
1027 old = lio_incr_index(old, 1, iq->nb_desc);
1030 iq->flush_index = old;
1036 lio_update_read_index(struct lio_instr_queue *iq)
1038 uint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg);
1041 last_done = pkt_in_done - iq->pkt_in_done;
1042 iq->pkt_in_done = pkt_in_done;
1044 /* Add last_done and modulo with the IQ size to get new index */
1045 iq->lio_read_index = (iq->lio_read_index +
1046 (uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
1051 lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
1053 uint32_t inst_processed = 0;
1056 if (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0)
1059 rte_spinlock_lock(&iq->lock);
1061 lio_update_read_index(iq);
1064 /* Process any outstanding IQ packets. */
1065 if (iq->flush_index == iq->lio_read_index)
1068 inst_processed = lio_process_iq_request_list(lio_dev, iq);
1070 if (inst_processed) {
1071 rte_atomic64_sub(&iq->instr_pending, inst_processed);
1072 iq->stats.instr_processed += inst_processed;
1079 rte_spinlock_unlock(&iq->lock);
1081 rte_atomic64_clear(&iq->iq_flush_running);
1087 lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
1088 void *buf, uint32_t datasize, uint32_t reqtype)
1090 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
1091 struct lio_iq_post_status st;
1093 rte_spinlock_lock(&iq->post_lock);
1095 st = post_command2(iq, cmd);
1097 if (st.status != LIO_IQ_SEND_FAILED) {
1098 lio_add_to_request_list(iq, st.index, buf, reqtype);
1099 LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent,
1101 LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1);
1103 lio_ring_doorbell(lio_dev, iq);
1105 LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1);
1108 rte_spinlock_unlock(&iq->post_lock);
1114 lio_prepare_soft_command(struct lio_device *lio_dev,
1115 struct lio_soft_command *sc, uint8_t opcode,
1116 uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
1119 struct octeon_instr_pki_ih3 *pki_ih3;
1120 struct octeon_instr_ih3 *ih3;
1121 struct octeon_instr_irh *irh;
1122 struct octeon_instr_rdp *rdp;
1124 RTE_ASSERT(opcode <= 15);
1125 RTE_ASSERT(subcode <= 127);
1127 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1129 ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
1131 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
1136 pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
1139 pki_ih3->tag = LIO_CONTROL;
1140 pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
1141 pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
1146 ih3->dlengsz = sc->datasize;
1148 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1149 irh->opcode = opcode;
1150 irh->subcode = subcode;
1152 /* opcode/subcode specific parameters (ossp) */
1153 irh->ossp = irh_ossp;
1154 sc->cmd.cmd3.ossp[0] = ossp0;
1155 sc->cmd.cmd3.ossp[1] = ossp1;
1157 if (sc->rdatasize) {
1158 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
1159 rdp->pcie_port = lio_dev->pcie_port;
1160 rdp->rlen = sc->rdatasize;
1163 ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3;
1167 ih3->fsz = OCTEON_PCI_CMD_O3;
1172 lio_send_soft_command(struct lio_device *lio_dev,
1173 struct lio_soft_command *sc)
1175 struct octeon_instr_ih3 *ih3;
1176 struct octeon_instr_irh *irh;
1179 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
1181 RTE_ASSERT(sc->dmadptr);
1182 sc->cmd.cmd3.dptr = sc->dmadptr;
1185 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
1187 RTE_ASSERT(sc->dmarptr);
1188 RTE_ASSERT(sc->status_word != NULL);
1189 *sc->status_word = LIO_COMPLETION_WORD_INIT;
1190 sc->cmd.cmd3.rptr = sc->dmarptr;
1193 len = (uint32_t)ih3->dlengsz;
1196 sc->timeout = lio_uptime + sc->wait_time;
1198 return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
1199 LIO_REQTYPE_SOFT_COMMAND);
1203 lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
1205 char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
1208 buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
1209 snprintf(sc_pool_name, sizeof(sc_pool_name),
1210 "lio_sc_pool_%u", lio_dev->port_id);
1211 lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
1212 LIO_MAX_SOFT_COMMAND_BUFFERS,
1213 0, 0, buf_size, SOCKET_ID_ANY);
1218 lio_free_sc_buffer_pool(struct lio_device *lio_dev)
1220 rte_mempool_free(lio_dev->sc_buf_pool);
1223 struct lio_soft_command *
1224 lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
1225 uint32_t rdatasize, uint32_t ctxsize)
1227 uint32_t offset = sizeof(struct lio_soft_command);
1228 struct lio_soft_command *sc;
1232 RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
1233 LIO_SOFT_COMMAND_BUFFER_SIZE);
1235 m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
1237 lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
1241 /* set rte_mbuf data size and there is only 1 segment */
1242 m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1243 m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
1245 /* use rte_mbuf buffer for soft command */
1246 sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
1247 memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
1248 sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
1249 sc->dma_addr = rte_mbuf_data_iova(m);
1252 dma_addr = sc->dma_addr;
1255 sc->ctxptr = (uint8_t *)sc + offset;
1256 sc->ctxsize = ctxsize;
1259 /* Start data at 128 byte boundary */
1260 offset = (offset + ctxsize + 127) & 0xffffff80;
1263 sc->virtdptr = (uint8_t *)sc + offset;
1264 sc->dmadptr = dma_addr + offset;
1265 sc->datasize = datasize;
1268 /* Start rdata at 128 byte boundary */
1269 offset = (offset + datasize + 127) & 0xffffff80;
1272 RTE_ASSERT(rdatasize >= 16);
1273 sc->virtrptr = (uint8_t *)sc + offset;
1274 sc->dmarptr = dma_addr + offset;
1275 sc->rdatasize = rdatasize;
1276 sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
1284 lio_free_soft_command(struct lio_soft_command *sc)
1286 rte_pktmbuf_free(sc->mbuf);
1290 lio_setup_response_list(struct lio_device *lio_dev)
1292 STAILQ_INIT(&lio_dev->response_list.head);
1293 rte_spinlock_init(&lio_dev->response_list.lock);
1294 rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
1298 lio_process_ordered_list(struct lio_device *lio_dev)
1300 int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
1301 struct lio_response_list *ordered_sc_list;
1302 struct lio_soft_command *sc;
1303 int request_complete = 0;
1307 ordered_sc_list = &lio_dev->response_list;
1310 rte_spinlock_lock(&ordered_sc_list->lock);
1312 if (STAILQ_EMPTY(&ordered_sc_list->head)) {
1313 /* ordered_sc_list is empty; there is
1314 * nothing to process
1316 rte_spinlock_unlock(&ordered_sc_list->lock);
1320 sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
1321 struct lio_soft_command, node);
1323 status = LIO_REQUEST_PENDING;
1325 /* check if octeon has finished DMA'ing a response
1326 * to where rptr is pointing to
1328 status64 = *sc->status_word;
1330 if (status64 != LIO_COMPLETION_WORD_INIT) {
1331 /* This logic ensures that all 64b have been written.
1332 * 1. check byte 0 for non-FF
1333 * 2. if non-FF, then swap result from BE to host order
1334 * 3. check byte 7 (swapped to 0) for non-FF
1335 * 4. if non-FF, use the low 32-bit status code
1336 * 5. if either byte 0 or byte 7 is FF, don't use status
1338 if ((status64 & 0xff) != 0xff) {
1339 lio_swap_8B_data(&status64, 1);
1340 if (((status64 & 0xff) != 0xff)) {
1341 /* retrieve 16-bit firmware status */
1342 status = (uint32_t)(status64 &
1346 LIO_FIRMWARE_STATUS_CODE(
1350 status = LIO_REQUEST_DONE;
1354 } else if ((sc->timeout && lio_check_timeout(lio_uptime,
1356 lio_dev_err(lio_dev,
1357 "cmd failed, timeout (%ld, %ld)\n",
1358 (long)lio_uptime, (long)sc->timeout);
1359 status = LIO_REQUEST_TIMEOUT;
1362 if (status != LIO_REQUEST_PENDING) {
1363 /* we have received a response or we have timed out.
1364 * remove node from linked list
1366 STAILQ_REMOVE(&ordered_sc_list->head,
1367 &sc->node, lio_stailq_node, entries);
1369 &lio_dev->response_list.pending_req_count);
1370 rte_spinlock_unlock(&ordered_sc_list->lock);
1373 sc->callback(status, sc->callback_arg);
1377 /* no response yet */
1378 request_complete = 0;
1379 rte_spinlock_unlock(&ordered_sc_list->lock);
1382 /* If we hit the Max Ordered requests to process every loop,
1383 * we quit and let this function be invoked the next time
1384 * the poll thread runs to process the remaining requests.
1385 * This function can take up the entire CPU if there is
1386 * no upper limit to the requests processed.
1388 if (request_complete >= resp_to_process)
1390 } while (request_complete);
1395 static inline struct lio_stailq_node *
1396 list_delete_first_node(struct lio_stailq_head *head)
1398 struct lio_stailq_node *node;
1400 if (STAILQ_EMPTY(head))
1403 node = STAILQ_FIRST(head);
1406 STAILQ_REMOVE(head, node, lio_stailq_node, entries);
1412 lio_delete_sglist(struct lio_instr_queue *txq)
1414 struct lio_device *lio_dev = txq->lio_dev;
1415 int iq_no = txq->q_index;
1416 struct lio_gather *g;
1418 if (lio_dev->glist_head == NULL)
1422 g = (struct lio_gather *)list_delete_first_node(
1423 &lio_dev->glist_head[iq_no]);
1427 (void *)((unsigned long)g->sg - g->adjust));
1434 * \brief Setup gather lists
1435 * @param lio per-network private data
1438 lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
1439 int fw_mapped_iq, int num_descs, unsigned int socket_id)
1441 struct lio_gather *g;
1444 rte_spinlock_init(&lio_dev->glist_lock[iq_no]);
1446 STAILQ_INIT(&lio_dev->glist_head[iq_no]);
1448 for (i = 0; i < num_descs; i++) {
1449 g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,
1452 lio_dev_err(lio_dev,
1453 "lio_gather memory allocation failed for qno %d\n",
1459 ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);
1461 g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,
1462 RTE_CACHE_LINE_SIZE, socket_id);
1463 if (g->sg == NULL) {
1464 lio_dev_err(lio_dev,
1465 "sg list memory allocation failed for qno %d\n",
1471 /* The gather component should be aligned on 64-bit boundary */
1472 if (((unsigned long)g->sg) & 7) {
1473 g->adjust = 8 - (((unsigned long)g->sg) & 7);
1475 (struct lio_sg_entry *)((unsigned long)g->sg +
1479 STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,
1483 if (i != num_descs) {
1484 lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);
1492 lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
1494 lio_delete_instr_queue(lio_dev, iq_no);
1495 rte_free(lio_dev->instr_queue[iq_no]);
1496 lio_dev->instr_queue[iq_no] = NULL;
1500 static inline uint32_t
1501 lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
1503 return ((lio_dev->instr_queue[q_no]->nb_desc - 1) -
1504 (uint32_t)rte_atomic64_read(
1505 &lio_dev->instr_queue[q_no]->instr_pending));
1509 lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
1511 return ((uint32_t)rte_atomic64_read(
1512 &lio_dev->instr_queue[q_no]->instr_pending) >=
1513 (lio_dev->instr_queue[q_no]->nb_desc - 2));
1517 lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no)
1519 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
1520 uint32_t count = 10000;
1522 while ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) &&
1524 lio_flush_iq(lio_dev, iq);
1526 return count ? 0 : 1;
1530 lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr)
1532 struct lio_soft_command *sc = sc_ptr;
1533 struct lio_dev_ctrl_cmd *ctrl_cmd;
1534 struct lio_ctrl_pkt *ctrl_pkt;
1536 ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr;
1537 ctrl_cmd = ctrl_pkt->ctrl_cmd;
1540 lio_free_soft_command(sc);
1543 static inline struct lio_soft_command *
1544 lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev,
1545 struct lio_ctrl_pkt *ctrl_pkt)
1547 struct lio_soft_command *sc = NULL;
1548 uint32_t uddsize, datasize;
1552 uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8);
1554 datasize = OCTEON_CMD_SIZE + uddsize;
1555 rdatasize = (ctrl_pkt->wait_time) ? 16 : 0;
1557 sc = lio_alloc_soft_command(lio_dev, datasize,
1558 rdatasize, sizeof(struct lio_ctrl_pkt));
1562 rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt));
1564 data = (uint8_t *)sc->virtdptr;
1566 rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE);
1568 lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3);
1571 /* Endian-Swap for UDD should have been done by caller. */
1572 rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize);
1575 sc->iq_no = (uint32_t)ctrl_pkt->iq_no;
1577 lio_prepare_soft_command(lio_dev, sc,
1578 LIO_OPCODE, LIO_OPCODE_CMD,
1581 sc->callback = lio_ctrl_cmd_callback;
1582 sc->callback_arg = sc;
1583 sc->wait_time = ctrl_pkt->wait_time;
1589 lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt)
1591 struct lio_soft_command *sc = NULL;
1594 sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt);
1596 lio_dev_err(lio_dev, "soft command allocation failed\n");
1600 retval = lio_send_soft_command(lio_dev, sc);
1601 if (retval == LIO_IQ_SEND_FAILED) {
1602 lio_free_soft_command(sc);
1603 lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n",
1604 lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval);
1611 /** Send data packet to the device
1612 * @param lio_dev - lio device pointer
1613 * @param ndata - control structure with queueing, and buffer information
1615 * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
1616 * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
1619 lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata)
1621 return lio_send_command(lio_dev, ndata->q_no, &ndata->cmd,
1622 ndata->buf, ndata->datasize, ndata->reqtype);
1626 lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
1628 struct lio_instr_queue *txq = tx_queue;
1629 union lio_cmd_setup cmdsetup;
1630 struct lio_device *lio_dev;
1631 struct lio_iq_stats *stats;
1632 struct lio_data_pkt ndata;
1633 int i, processed = 0;
1639 lio_dev = txq->lio_dev;
1640 iq_no = txq->txpciq.s.q_no;
1641 stats = &lio_dev->instr_queue[iq_no]->stats;
1643 if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) {
1644 PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n",
1645 lio_dev->linfo.link.s.link_up);
1649 lio_dev_cleanup_iq(lio_dev, iq_no);
1651 for (i = 0; i < nb_pkts; i++) {
1652 uint32_t pkt_len = 0;
1656 /* Prepare the attributes for the data to be passed to BASE. */
1657 memset(&ndata, 0, sizeof(struct lio_data_pkt));
1662 if (lio_iq_is_full(lio_dev, ndata.q_no)) {
1663 stats->tx_iq_busy++;
1664 if (lio_dev_cleanup_iq(lio_dev, iq_no)) {
1665 PMD_TX_LOG(lio_dev, ERR,
1666 "Transmit failed iq:%d full\n",
1672 cmdsetup.cmd_setup64 = 0;
1673 cmdsetup.s.iq_no = iq_no;
1675 /* check checksum offload flags to form cmd */
1676 if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
1677 cmdsetup.s.ip_csum = 1;
1679 if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
1680 cmdsetup.s.tnl_csum = 1;
1681 else if ((m->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ||
1682 (m->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM))
1683 cmdsetup.s.transport_csum = 1;
1685 if (m->nb_segs == 1) {
1686 pkt_len = rte_pktmbuf_data_len(m);
1687 cmdsetup.s.u.datasize = pkt_len;
1688 lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
1690 ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);
1691 ndata.reqtype = LIO_REQTYPE_NORESP_NET;
1693 struct lio_buf_free_info *finfo;
1694 struct lio_gather *g;
1698 finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
1700 if (finfo == NULL) {
1701 PMD_TX_LOG(lio_dev, ERR,
1702 "free buffer alloc failed\n");
1706 rte_spinlock_lock(&lio_dev->glist_lock[iq_no]);
1707 g = (struct lio_gather *)list_delete_first_node(
1708 &lio_dev->glist_head[iq_no]);
1709 rte_spinlock_unlock(&lio_dev->glist_lock[iq_no]);
1711 PMD_TX_LOG(lio_dev, ERR,
1712 "Transmit scatter gather: glist null!\n");
1716 cmdsetup.s.gather = 1;
1717 cmdsetup.s.u.gatherptrs = m->nb_segs;
1718 lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
1721 memset(g->sg, 0, g->sg_size);
1722 g->sg[0].ptr[0] = rte_mbuf_data_iova(m);
1723 lio_add_sg_size(&g->sg[0], m->data_len, 0);
1724 pkt_len = m->data_len;
1727 /* First seg taken care above */
1728 frags = m->nb_segs - 1;
1732 g->sg[(i >> 2)].ptr[(i & 3)] =
1733 rte_mbuf_data_iova(m);
1734 lio_add_sg_size(&g->sg[(i >> 2)],
1735 m->data_len, (i & 3));
1736 pkt_len += m->data_len;
1741 phyaddr = rte_mem_virt2iova(g->sg);
1742 if (phyaddr == RTE_BAD_IOVA) {
1743 PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
1747 ndata.cmd.cmd3.dptr = phyaddr;
1748 ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
1751 finfo->lio_dev = lio_dev;
1752 finfo->iq_no = (uint64_t)iq_no;
1756 ndata.datasize = pkt_len;
1758 status = lio_send_data_pkt(lio_dev, &ndata);
1760 if (unlikely(status == LIO_IQ_SEND_FAILED)) {
1761 PMD_TX_LOG(lio_dev, ERR, "send failed\n");
1765 if (unlikely(status == LIO_IQ_SEND_STOP)) {
1766 PMD_TX_LOG(lio_dev, DEBUG, "iq full\n");
1767 /* create space as iq is full */
1768 lio_dev_cleanup_iq(lio_dev, iq_no);
1772 stats->tx_tot_bytes += pkt_len;
1777 stats->tx_dropped += (nb_pkts - processed);
1783 lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
1785 struct lio_instr_queue *txq;
1786 struct lio_droq *rxq;
1789 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1790 txq = eth_dev->data->tx_queues[i];
1792 lio_dev_tx_queue_release(eth_dev, i);
1793 eth_dev->data->tx_queues[i] = NULL;
1797 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1798 rxq = eth_dev->data->rx_queues[i];
1800 lio_dev_rx_queue_release(eth_dev, i);
1801 eth_dev->data->rx_queues[i] = NULL;