X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef10_tx.c;h=0711c1136d4327f815bcb47982b2976ff7e87170;hb=4106d89a18f8e2eccdd279a55dd8b14ec752fb16;hp=c97e3bad0a252bb92be54376941b784830157331;hpb=6bc985e4115534deae841c4eb1dd0735935f0d80;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index c97e3bad0a..0711c1136d 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -381,6 +381,9 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *); hdr_iova = rte_mbuf_data_iova(m_seg); if (rte_pktmbuf_data_len(m_seg) == header_len) { + /* Cannot send a packet that consists only of header */ + if (unlikely(m_seg->next == NULL)) + return EMSGSIZE; /* * Associate header mbuf with header descriptor * which is located after TSO descriptors. @@ -409,6 +412,10 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, copied_segs = sfc_tso_prepare_header(hdr_addr, header_len, &m_seg, &in_off); + /* Cannot send a packet that consists only of header */ + if (unlikely(m_seg == NULL)) + return EMSGSIZE; + m_seg_to_free_up_to = m_seg; /* * Reduce the number of needed descriptors by the number of @@ -775,6 +782,7 @@ sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info) static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings; static int sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc, + struct sfc_dp_tx_hw_limits *limits, unsigned int *txq_entries, unsigned int *evq_entries, unsigned int *txq_max_fill_level) @@ -783,8 +791,8 @@ sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc, * rte_ethdev API guarantees that the number meets min, max and * alignment requirements. */ - if (nb_tx_desc <= EFX_TXQ_MINNDESCS) - *txq_entries = EFX_TXQ_MINNDESCS; + if (nb_tx_desc <= limits->txq_min_entries) + *txq_entries = limits->txq_min_entries; else *txq_entries = rte_align32pow2(nb_tx_desc); @@ -936,12 +944,49 @@ sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq) txq->flags &= ~SFC_EF10_TXQ_STARTED; } +static unsigned int +sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq *txq) +{ + const unsigned int curr_done = txq->completed - 1; + unsigned int anew_done = curr_done; + efx_qword_t tx_ev; + const unsigned int evq_old_read_ptr = txq->evq_read_ptr; + + if (unlikely(txq->flags & + (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION))) + return 0; + + while (sfc_ef10_tx_get_event(txq, &tx_ev)) + anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX); + + /* + * The function does not process events, so return event queue read + * pointer to the original position to allow the events that were + * read to be processed later + */ + txq->evq_read_ptr = evq_old_read_ptr; + + return (anew_done - curr_done) & txq->ptr_mask; +} + static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status; static int -sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq, - __rte_unused uint16_t offset) +sfc_ef10_tx_qdesc_status(struct sfc_dp_txq *dp_txq, + uint16_t offset) { - return -ENOTSUP; + struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); + unsigned int npending = sfc_ef10_tx_qdesc_npending(txq); + + if (unlikely(offset > txq->ptr_mask)) + return -EINVAL; + + if (unlikely(offset >= txq->max_fill_level)) + return RTE_ETH_TX_DESC_UNAVAIL; + + if (unlikely(offset < npending)) + return RTE_ETH_TX_DESC_FULL; + + return RTE_ETH_TX_DESC_DONE; } struct sfc_dp_tx sfc_ef10_tx = {