X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fliquidio%2Flio_rxtx.c;h=8d705bfe7fe124315c2c31a30165695f734a228e;hb=87520e59fb84c3ba5dbeee10d57a534fa3a70ad2;hp=64c038510af094a1455f8d2a79997e221719e938;hpb=5ee7640f95a0bd48f6ae18a776d037ce1abeb451;p=dpdk.git diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c index 64c038510a..8d705bfe7f 100644 --- a/drivers/net/liquidio/lio_rxtx.c +++ b/drivers/net/liquidio/lio_rxtx.c @@ -1,37 +1,8 @@ -/* - * BSD LICENSE - * - * Copyright(c) 2017 Cavium, Inc.. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ -#include +#include #include #include @@ -42,7 +13,7 @@ #define LIO_MAX_SG 12 /* Flush iq if available tx_desc fall below LIO_FLUSH_WM */ -#define LIO_FLUSH_WM(_iq) ((_iq)->max_count / 2) +#define LIO_FLUSH_WM(_iq) ((_iq)->nb_desc / 2) #define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL static void @@ -70,7 +41,7 @@ lio_droq_destroy_ring_buffers(struct lio_droq *droq) { uint32_t i; - for (i = 0; i < droq->max_count; i++) { + for (i = 0; i < droq->nb_desc; i++) { if (droq->recv_buf_list[i].buffer) { rte_pktmbuf_free((struct rte_mbuf *) droq->recv_buf_list[i].buffer); @@ -81,28 +52,6 @@ lio_droq_destroy_ring_buffers(struct lio_droq *droq) lio_droq_reset_indices(droq); } -static void * -lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no) -{ - struct lio_droq *droq = lio_dev->droq[q_no]; - struct rte_mempool *mpool = droq->mpool; - struct rte_mbuf *m; - - m = rte_pktmbuf_alloc(mpool); - if (m == NULL) { - lio_dev_err(lio_dev, "Cannot allocate\n"); - return NULL; - } - - rte_mbuf_refcnt_set(m, 1); - m->next = NULL; - m->data_off = RTE_PKTMBUF_HEADROOM; - m->nb_segs = 1; - m->pool = mpool; - - return m; -} - static int lio_droq_setup_ring_buffers(struct lio_device *lio_dev, struct lio_droq *droq) @@ -111,10 +60,11 @@ lio_droq_setup_ring_buffers(struct lio_device *lio_dev, uint32_t i; void *buf; - for (i = 0; i < droq->max_count; i++) { - buf = lio_recv_buffer_alloc(lio_dev, droq->q_no); + for (i = 0; i < droq->nb_desc; i++) { + buf = rte_pktmbuf_alloc(droq->mpool); if (buf == NULL) { lio_dev_err(lio_dev, "buffer alloc failed\n"); + droq->stats.rx_alloc_failure++; lio_droq_destroy_ring_buffers(droq); return -ENOMEM; } @@ -185,7 +135,7 @@ lio_alloc_info_buffer(struct lio_device *lio_dev, { droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev, "info_list", droq->q_no, - (droq->max_count * + (droq->nb_desc * LIO_DROQ_INFO_SIZE), RTE_CACHE_LINE_SIZE, socket_id); @@ -193,7 +143,7 @@ lio_alloc_info_buffer(struct lio_device *lio_dev, if (droq->info_mz == NULL) return NULL; - droq->info_list_dma = droq->info_mz->phys_addr; + droq->info_list_dma = droq->info_mz->iova; droq->info_alloc_size = droq->info_mz->len; droq->info_base_addr = (size_t)droq->info_mz->addr; @@ -227,10 +177,10 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no, c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev); - droq->max_count = num_descs; + droq->nb_desc = num_descs; droq->buffer_size = desc_size; - desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE; + desc_ring_size = droq->nb_desc * LIO_DROQ_DESC_SIZE; droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev, "droq", q_no, desc_ring_size, @@ -243,13 +193,13 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no, return -1; } - droq->desc_ring_dma = droq->desc_ring_mz->phys_addr; + droq->desc_ring_dma = droq->desc_ring_mz->iova; droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr; lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma); lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no, - droq->max_count); + droq->nb_desc); droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id); if (droq->info_list == NULL) { @@ -258,7 +208,7 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no, } droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list", - (droq->max_count * + (droq->nb_desc * LIO_DROQ_RECVBUF_SIZE), RTE_CACHE_LINE_SIZE, socket_id); @@ -295,11 +245,6 @@ lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs, PMD_INIT_FUNC_TRACE(); - if (lio_dev->droq[oq_no]) { - lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no); - return 0; - } - /* Allocate the DS for the new droq. */ droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq), RTE_CACHE_LINE_SIZE, socket_id); @@ -324,7 +269,7 @@ lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs, /* Send credit for octeon output queues. credits are always * sent after the output queue is enabled. */ - rte_write32(lio_dev->droq[oq_no]->max_count, + rte_write32(lio_dev->droq[oq_no]->nb_desc, lio_dev->droq[oq_no]->pkts_credit_reg); rte_wmb(); @@ -363,13 +308,13 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq, do { droq->refill_idx = lio_incr_index( droq->refill_idx, 1, - droq->max_count); + droq->nb_desc); desc_refilled++; droq->refill_count--; } while (droq->recv_buf_list[droq->refill_idx].buffer); } refill_index = lio_incr_index(refill_index, 1, - droq->max_count); + droq->nb_desc); } /* while */ return desc_refilled; @@ -377,7 +322,6 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq, /* lio_droq_refill * - * @param lio_dev - pointer to the lio device structure * @param droq - droq in which descriptors require new buffers. * * Description: @@ -393,7 +337,7 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq, * This routine is called with droq->lock held. */ static uint32_t -lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq) +lio_droq_refill(struct lio_droq *droq) { struct lio_droq_desc *desc_ring; uint32_t desc_refilled = 0; @@ -401,17 +345,19 @@ lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq) desc_ring = droq->desc_ring; - while (droq->refill_count && (desc_refilled < droq->max_count)) { + while (droq->refill_count && (desc_refilled < droq->nb_desc)) { /* If a valid buffer exists (happens if there is no dispatch), * reuse the buffer, else allocate. */ if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) { - buf = lio_recv_buffer_alloc(lio_dev, droq->q_no); + buf = rte_pktmbuf_alloc(droq->mpool); /* If a buffer could not be allocated, no point in * continuing */ - if (buf == NULL) + if (buf == NULL) { + droq->stats.rx_alloc_failure++; break; + } droq->recv_buf_list[droq->refill_idx].buffer = buf; } @@ -422,7 +368,7 @@ lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq) droq->info_list[droq->refill_idx].length = 0; droq->refill_idx = lio_incr_index(droq->refill_idx, 1, - droq->max_count); + droq->nb_desc); desc_refilled++; droq->refill_count--; } @@ -469,7 +415,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, buf_cnt = lio_droq_get_bufcount(droq->buffer_size, (uint32_t)info->length); droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt, - droq->max_count); + droq->nb_desc); droq->refill_count += buf_cnt; } else { if (info->length <= droq->buffer_size) { @@ -482,13 +428,10 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, droq->recv_buf_list[droq->read_idx].buffer = NULL; droq->read_idx = lio_incr_index( droq->read_idx, 1, - droq->max_count); + droq->nb_desc); droq->refill_count++; if (likely(nicbuf != NULL)) { - nicbuf->data_off = RTE_PKTMBUF_HEADROOM; - nicbuf->nb_segs = 1; - nicbuf->next = NULL; /* We don't have a way to pass flags yet */ nicbuf->ol_flags = 0; if (rh->r_dh.has_hash) { @@ -542,9 +485,6 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, if (!pkt_len) first_buf = nicbuf; - nicbuf->data_off = RTE_PKTMBUF_HEADROOM; - nicbuf->nb_segs = 1; - nicbuf->next = NULL; nicbuf->port = lio_dev->port_id; /* We don't have a way to pass * flags yet @@ -582,7 +522,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, pkt_len += cpy_len; droq->read_idx = lio_incr_index( droq->read_idx, - 1, droq->max_count); + 1, droq->nb_desc); droq->refill_count++; /* Prefetch buffer pointers when on a @@ -614,7 +554,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, } if (droq->refill_count >= droq->refill_threshold) { - int desc_refilled = lio_droq_refill(lio_dev, droq); + int desc_refilled = lio_droq_refill(droq); /* Flush the droq descriptor data to memory to be sure * that when we update the credits the data in memory is @@ -629,6 +569,11 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, info->length = 0; info->rh.rh64 = 0; + droq->stats.pkts_received++; + droq->stats.rx_pkts_received += data_pkts; + droq->stats.rx_bytes_received += data_total_len; + droq->stats.bytes_received += total_len; + return data_pkts; } @@ -755,10 +700,10 @@ lio_init_instr_queue(struct lio_device *lio_dev, return -1; } - iq->base_addr_dma = iq->iq_mz->phys_addr; + iq->base_addr_dma = iq->iq_mz->iova; iq->base_addr = (uint8_t *)iq->iq_mz->addr; - iq->max_count = num_descs; + iq->nb_desc = num_descs; /* Initialize a list to holds requests that have been posted to Octeon * but has yet to be fetched by octeon @@ -777,7 +722,7 @@ lio_init_instr_queue(struct lio_device *lio_dev, lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n", iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma, - iq->max_count); + iq->nb_desc); iq->lio_dev = lio_dev; iq->txpciq.txpciq64 = txpciq.txpciq64; @@ -874,14 +819,6 @@ lio_setup_iq(struct lio_device *lio_dev, int q_index, { uint32_t iq_no = (uint32_t)txpciq.s.q_no; - if (lio_dev->instr_queue[iq_no]) { - lio_dev_dbg(lio_dev, "IQ is in use. Cannot create the IQ: %d again\n", - iq_no); - lio_dev->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64; - lio_dev->instr_queue[iq_no]->app_ctx = app_ctx; - return 0; - } - lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue", sizeof(struct lio_instr_queue), RTE_CACHE_LINE_SIZE, socket_id); @@ -891,23 +828,49 @@ lio_setup_iq(struct lio_device *lio_dev, int q_index, lio_dev->instr_queue[iq_no]->q_index = q_index; lio_dev->instr_queue[iq_no]->app_ctx = app_ctx; - if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) - goto release_lio_iq; + if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) { + rte_free(lio_dev->instr_queue[iq_no]); + lio_dev->instr_queue[iq_no] = NULL; + return -1; + } lio_dev->num_iqs++; - if (lio_dev->fn_list.enable_io_queues(lio_dev)) - goto delete_lio_iq; return 0; +} -delete_lio_iq: - lio_delete_instr_queue(lio_dev, iq_no); - lio_dev->num_iqs--; -release_lio_iq: - rte_free(lio_dev->instr_queue[iq_no]); - lio_dev->instr_queue[iq_no] = NULL; +int +lio_wait_for_instr_fetch(struct lio_device *lio_dev) +{ + int pending, instr_cnt; + int i, retry = 1000; - return -1; + do { + instr_cnt = 0; + + for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) { + if (!(lio_dev->io_qmask.iq & (1ULL << i))) + continue; + + if (lio_dev->instr_queue[i] == NULL) + break; + + pending = rte_atomic64_read( + &lio_dev->instr_queue[i]->instr_pending); + if (pending) + lio_flush_iq(lio_dev, lio_dev->instr_queue[i]); + + instr_cnt += pending; + } + + if (instr_cnt == 0) + break; + + rte_delay_ms(1); + + } while (retry-- && instr_cnt); + + return instr_cnt; } static inline void @@ -944,14 +907,14 @@ post_command2(struct lio_instr_queue *iq, uint8_t *cmd) * position if queue gets full before Octeon could fetch any instr. */ if (rte_atomic64_read(&iq->instr_pending) >= - (int32_t)(iq->max_count - 1)) { + (int32_t)(iq->nb_desc - 1)) { st.status = LIO_IQ_SEND_FAILED; st.index = -1; return st; } if (rte_atomic64_read(&iq->instr_pending) >= - (int32_t)(iq->max_count - 2)) + (int32_t)(iq->nb_desc - 2)) st.status = LIO_IQ_SEND_STOP; copy_cmd_into_iq(iq, cmd); @@ -959,7 +922,7 @@ post_command2(struct lio_instr_queue *iq, uint8_t *cmd) /* "index" is returned, host_write_index is modified. */ st.index = iq->host_write_index; iq->host_write_index = lio_incr_index(iq->host_write_index, 1, - iq->max_count); + iq->nb_desc); iq->fill_cnt++; /* Flush the command into memory. We need to be sure the data is in @@ -1061,7 +1024,7 @@ lio_process_iq_request_list(struct lio_device *lio_dev, skip_this: inst_count++; - old = lio_incr_index(old, 1, iq->max_count); + old = lio_incr_index(old, 1, iq->nb_desc); } iq->flush_index = old; @@ -1081,7 +1044,7 @@ lio_update_read_index(struct lio_instr_queue *iq) /* Add last_done and modulo with the IQ size to get new index */ iq->lio_read_index = (iq->lio_read_index + (uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) % - iq->max_count; + iq->nb_desc; } int @@ -1105,8 +1068,10 @@ lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq) inst_processed = lio_process_iq_request_list(lio_dev, iq); - if (inst_processed) + if (inst_processed) { rte_atomic64_sub(&iq->instr_pending, inst_processed); + iq->stats.instr_processed += inst_processed; + } tot_inst_processed += inst_processed; inst_processed = 0; @@ -1122,7 +1087,7 @@ lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq) static int lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd, - void *buf, uint32_t datasize __rte_unused, uint32_t reqtype) + void *buf, uint32_t datasize, uint32_t reqtype) { struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no]; struct lio_iq_post_status st; @@ -1133,7 +1098,13 @@ lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd, if (st.status != LIO_IQ_SEND_FAILED) { lio_add_to_request_list(iq, st.index, buf, reqtype); + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent, + datasize); + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1); + lio_ring_doorbell(lio_dev, iq); + } else { + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1); } rte_spinlock_unlock(&iq->post_lock); @@ -1277,7 +1248,7 @@ lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize, sc = rte_pktmbuf_mtod(m, struct lio_soft_command *); memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE); sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE; - sc->dma_addr = rte_mbuf_data_dma_addr(m); + sc->dma_addr = rte_mbuf_data_iova(m); sc->mbuf = m; dma_addr = sc->dma_addr; @@ -1439,7 +1410,7 @@ list_delete_first_node(struct lio_stailq_head *head) return node; } -static void +void lio_delete_sglist(struct lio_instr_queue *txq) { struct lio_device *lio_dev = txq->lio_dev; @@ -1531,7 +1502,7 @@ lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no) static inline uint32_t lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no) { - return ((lio_dev->instr_queue[q_no]->max_count - 1) - + return ((lio_dev->instr_queue[q_no]->nb_desc - 1) - (uint32_t)rte_atomic64_read( &lio_dev->instr_queue[q_no]->instr_pending)); } @@ -1541,7 +1512,7 @@ lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no) { return ((uint32_t)rte_atomic64_read( &lio_dev->instr_queue[q_no]->instr_pending) >= - (lio_dev->instr_queue[q_no]->max_count - 2)); + (lio_dev->instr_queue[q_no]->nb_desc - 2)); } static int @@ -1557,6 +1528,88 @@ lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no) return count ? 0 : 1; } +static void +lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr) +{ + struct lio_soft_command *sc = sc_ptr; + struct lio_dev_ctrl_cmd *ctrl_cmd; + struct lio_ctrl_pkt *ctrl_pkt; + + ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr; + ctrl_cmd = ctrl_pkt->ctrl_cmd; + ctrl_cmd->cond = 1; + + lio_free_soft_command(sc); +} + +static inline struct lio_soft_command * +lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev, + struct lio_ctrl_pkt *ctrl_pkt) +{ + struct lio_soft_command *sc = NULL; + uint32_t uddsize, datasize; + uint32_t rdatasize; + uint8_t *data; + + uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8); + + datasize = OCTEON_CMD_SIZE + uddsize; + rdatasize = (ctrl_pkt->wait_time) ? 16 : 0; + + sc = lio_alloc_soft_command(lio_dev, datasize, + rdatasize, sizeof(struct lio_ctrl_pkt)); + if (sc == NULL) + return NULL; + + rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt)); + + data = (uint8_t *)sc->virtdptr; + + rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE); + + lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3); + + if (uddsize) { + /* Endian-Swap for UDD should have been done by caller. */ + rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize); + } + + sc->iq_no = (uint32_t)ctrl_pkt->iq_no; + + lio_prepare_soft_command(lio_dev, sc, + LIO_OPCODE, LIO_OPCODE_CMD, + 0, 0, 0); + + sc->callback = lio_ctrl_cmd_callback; + sc->callback_arg = sc; + sc->wait_time = ctrl_pkt->wait_time; + + return sc; +} + +int +lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt) +{ + struct lio_soft_command *sc = NULL; + int retval; + + sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt); + if (sc == NULL) { + lio_dev_err(lio_dev, "soft command allocation failed\n"); + return -1; + } + + retval = lio_send_soft_command(lio_dev, sc); + if (retval == LIO_IQ_SEND_FAILED) { + lio_free_soft_command(sc); + lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n", + lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval); + return -1; + } + + return retval; +} + /** Send data packet to the device * @param lio_dev - lio device pointer * @param ndata - control structure with queueing, and buffer information @@ -1577,6 +1630,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) struct lio_instr_queue *txq = tx_queue; union lio_cmd_setup cmdsetup; struct lio_device *lio_dev; + struct lio_iq_stats *stats; struct lio_data_pkt ndata; int i, processed = 0; struct rte_mbuf *m; @@ -1586,8 +1640,9 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) lio_dev = txq->lio_dev; iq_no = txq->txpciq.s.q_no; + stats = &lio_dev->instr_queue[iq_no]->stats; - if (!lio_dev->linfo.link.s.link_up) { + if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) { PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n", lio_dev->linfo.link.s.link_up); goto xmit_failed; @@ -1607,6 +1662,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) ndata.q_no = iq_no; if (lio_iq_is_full(lio_dev, ndata.q_no)) { + stats->tx_iq_busy++; if (lio_dev_cleanup_iq(lio_dev, iq_no)) { PMD_TX_LOG(lio_dev, ERR, "Transmit failed iq:%d full\n", @@ -1622,7 +1678,9 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) if (m->ol_flags & PKT_TX_IP_CKSUM) cmdsetup.s.ip_csum = 1; - if ((m->ol_flags & PKT_TX_TCP_CKSUM) || + if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM) + cmdsetup.s.tnl_csum = 1; + else if ((m->ol_flags & PKT_TX_TCP_CKSUM) || (m->ol_flags & PKT_TX_UDP_CKSUM)) cmdsetup.s.transport_csum = 1; @@ -1631,12 +1689,12 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) cmdsetup.s.u.datasize = pkt_len; lio_prepare_pci_cmd(lio_dev, &ndata.cmd, &cmdsetup, tag); - ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m); + ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m); ndata.reqtype = LIO_REQTYPE_NORESP_NET; } else { struct lio_buf_free_info *finfo; struct lio_gather *g; - phys_addr_t phyaddr; + rte_iova_t phyaddr; int i, frags; finfo = (struct lio_buf_free_info *)rte_malloc(NULL, @@ -1663,7 +1721,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) &cmdsetup, tag); memset(g->sg, 0, g->sg_size); - g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m); + g->sg[0].ptr[0] = rte_mbuf_data_iova(m); lio_add_sg_size(&g->sg[0], m->data_len, 0); pkt_len = m->data_len; finfo->mbuf = m; @@ -1674,7 +1732,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) m = m->next; while (frags--) { g->sg[(i >> 2)].ptr[(i & 3)] = - rte_mbuf_data_dma_addr(m); + rte_mbuf_data_iova(m); lio_add_sg_size(&g->sg[(i >> 2)], m->data_len, (i & 3)); pkt_len += m->data_len; @@ -1682,8 +1740,8 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) m = m->next; } - phyaddr = rte_mem_virt2phy(g->sg); - if (phyaddr == RTE_BAD_PHYS_ADDR) { + phyaddr = rte_mem_virt2iova(g->sg); + if (phyaddr == RTE_BAD_IOVA) { PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n"); goto xmit_failed; } @@ -1712,11 +1770,37 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) lio_dev_cleanup_iq(lio_dev, iq_no); } + stats->tx_done++; + stats->tx_tot_bytes += pkt_len; processed++; } xmit_failed: + stats->tx_dropped += (nb_pkts - processed); return processed; } +void +lio_dev_clear_queues(struct rte_eth_dev *eth_dev) +{ + struct lio_instr_queue *txq; + struct lio_droq *rxq; + uint16_t i; + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + txq = eth_dev->data->tx_queues[i]; + if (txq != NULL) { + lio_dev_tx_queue_release(txq); + eth_dev->data->tx_queues[i] = NULL; + } + } + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rxq = eth_dev->data->rx_queues[i]; + if (rxq != NULL) { + lio_dev_rx_queue_release(rxq); + eth_dev->data->rx_queues[i] = NULL; + } + } +}