X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fliquidio%2Flio_rxtx.c;h=2bbb893c27e423e49d12b027ae3c450d40c9b381;hb=f37dfab21c988d2d0ecb3c82be4ba9738c7e51c7;hp=f1054572ba7e989c8f1ade45731838418e83f2b9;hpb=0b83930de29a639e9088ea51ff00df8b652b3f48;p=dpdk.git diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c index f1054572ba..2bbb893c27 100644 --- a/drivers/net/liquidio/lio_rxtx.c +++ b/drivers/net/liquidio/lio_rxtx.c @@ -81,28 +81,6 @@ lio_droq_destroy_ring_buffers(struct lio_droq *droq) lio_droq_reset_indices(droq); } -static void * -lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no) -{ - struct lio_droq *droq = lio_dev->droq[q_no]; - struct rte_mempool *mpool = droq->mpool; - struct rte_mbuf *m; - - m = rte_pktmbuf_alloc(mpool); - if (m == NULL) { - lio_dev_err(lio_dev, "Cannot allocate\n"); - return NULL; - } - - rte_mbuf_refcnt_set(m, 1); - m->next = NULL; - m->data_off = RTE_PKTMBUF_HEADROOM; - m->nb_segs = 1; - m->pool = mpool; - - return m; -} - static int lio_droq_setup_ring_buffers(struct lio_device *lio_dev, struct lio_droq *droq) @@ -112,9 +90,10 @@ lio_droq_setup_ring_buffers(struct lio_device *lio_dev, void *buf; for (i = 0; i < droq->max_count; i++) { - buf = lio_recv_buffer_alloc(lio_dev, droq->q_no); + buf = rte_pktmbuf_alloc(droq->mpool); if (buf == NULL) { lio_dev_err(lio_dev, "buffer alloc failed\n"); + droq->stats.rx_alloc_failure++; lio_droq_destroy_ring_buffers(droq); return -ENOMEM; } @@ -377,7 +356,6 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq, /* lio_droq_refill * - * @param lio_dev - pointer to the lio device structure * @param droq - droq in which descriptors require new buffers. * * Description: @@ -393,7 +371,7 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq, * This routine is called with droq->lock held. */ static uint32_t -lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq) +lio_droq_refill(struct lio_droq *droq) { struct lio_droq_desc *desc_ring; uint32_t desc_refilled = 0; @@ -406,12 +384,14 @@ lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq) * reuse the buffer, else allocate. */ if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) { - buf = lio_recv_buffer_alloc(lio_dev, droq->q_no); + buf = rte_pktmbuf_alloc(droq->mpool); /* If a buffer could not be allocated, no point in * continuing */ - if (buf == NULL) + if (buf == NULL) { + droq->stats.rx_alloc_failure++; break; + } droq->recv_buf_list[droq->refill_idx].buffer = buf; } @@ -486,9 +466,6 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, droq->refill_count++; if (likely(nicbuf != NULL)) { - nicbuf->data_off = RTE_PKTMBUF_HEADROOM; - nicbuf->nb_segs = 1; - nicbuf->next = NULL; /* We don't have a way to pass flags yet */ nicbuf->ol_flags = 0; if (rh->r_dh.has_hash) { @@ -542,9 +519,6 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, if (!pkt_len) first_buf = nicbuf; - nicbuf->data_off = RTE_PKTMBUF_HEADROOM; - nicbuf->nb_segs = 1; - nicbuf->next = NULL; nicbuf->port = lio_dev->port_id; /* We don't have a way to pass * flags yet @@ -614,7 +588,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, } if (droq->refill_count >= droq->refill_threshold) { - int desc_refilled = lio_droq_refill(lio_dev, droq); + int desc_refilled = lio_droq_refill(droq); /* Flush the droq descriptor data to memory to be sure * that when we update the credits the data in memory is @@ -629,6 +603,11 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev, info->length = 0; info->rh.rh64 = 0; + droq->stats.pkts_received++; + droq->stats.rx_pkts_received += data_pkts; + droq->stats.rx_bytes_received += data_total_len; + droq->stats.bytes_received += total_len; + return data_pkts; } @@ -910,6 +889,40 @@ release_lio_iq: return -1; } +int +lio_wait_for_instr_fetch(struct lio_device *lio_dev) +{ + int pending, instr_cnt; + int i, retry = 1000; + + do { + instr_cnt = 0; + + for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) { + if (!(lio_dev->io_qmask.iq & (1ULL << i))) + continue; + + if (lio_dev->instr_queue[i] == NULL) + break; + + pending = rte_atomic64_read( + &lio_dev->instr_queue[i]->instr_pending); + if (pending) + lio_flush_iq(lio_dev, lio_dev->instr_queue[i]); + + instr_cnt += pending; + } + + if (instr_cnt == 0) + break; + + rte_delay_ms(1); + + } while (retry-- && instr_cnt); + + return instr_cnt; +} + static inline void lio_ring_doorbell(struct lio_device *lio_dev, struct lio_instr_queue *iq) @@ -1105,8 +1118,10 @@ lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq) inst_processed = lio_process_iq_request_list(lio_dev, iq); - if (inst_processed) + if (inst_processed) { rte_atomic64_sub(&iq->instr_pending, inst_processed); + iq->stats.instr_processed += inst_processed; + } tot_inst_processed += inst_processed; inst_processed = 0; @@ -1122,7 +1137,7 @@ lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq) static int lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd, - void *buf, uint32_t datasize __rte_unused, uint32_t reqtype) + void *buf, uint32_t datasize, uint32_t reqtype) { struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no]; struct lio_iq_post_status st; @@ -1133,7 +1148,13 @@ lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd, if (st.status != LIO_IQ_SEND_FAILED) { lio_add_to_request_list(iq, st.index, buf, reqtype); + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent, + datasize); + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1); + lio_ring_doorbell(lio_dev, iq); + } else { + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1); } rte_spinlock_unlock(&iq->post_lock); @@ -1659,6 +1680,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) struct lio_instr_queue *txq = tx_queue; union lio_cmd_setup cmdsetup; struct lio_device *lio_dev; + struct lio_iq_stats *stats; struct lio_data_pkt ndata; int i, processed = 0; struct rte_mbuf *m; @@ -1668,6 +1690,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) lio_dev = txq->lio_dev; iq_no = txq->txpciq.s.q_no; + stats = &lio_dev->instr_queue[iq_no]->stats; if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) { PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n", @@ -1689,6 +1712,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) ndata.q_no = iq_no; if (lio_iq_is_full(lio_dev, ndata.q_no)) { + stats->tx_iq_busy++; if (lio_dev_cleanup_iq(lio_dev, iq_no)) { PMD_TX_LOG(lio_dev, ERR, "Transmit failed iq:%d full\n", @@ -1796,11 +1820,37 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) lio_dev_cleanup_iq(lio_dev, iq_no); } + stats->tx_done++; + stats->tx_tot_bytes += pkt_len; processed++; } xmit_failed: + stats->tx_dropped += (nb_pkts - processed); return processed; } +void +lio_dev_clear_queues(struct rte_eth_dev *eth_dev) +{ + struct lio_instr_queue *txq; + struct lio_droq *rxq; + uint16_t i; + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + txq = eth_dev->data->tx_queues[i]; + if (txq != NULL) { + lio_dev_tx_queue_release(txq); + eth_dev->data->tx_queues[i] = NULL; + } + } + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rxq = eth_dev->data->rx_queues[i]; + if (rxq != NULL) { + lio_dev_rx_queue_release(rxq); + eth_dev->data->rx_queues[i] = NULL; + } + } +}