X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fem_rxtx.c;h=02fae100c4762220e3aff6c201c5de969fd115e2;hb=91fbf1791cd7d69012010ab159d0b5c7dee81f79;hp=7ef965c8018d57010f0ece94fa1099ae0fe3a884;hpb=693f715da45c48ec1ec0fe4ba2f3b5ffd11ba53e;p=dpdk.git diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 7ef965c801..02fae100c4 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -56,17 +27,17 @@ #include #include #include -#include #include #include #include #include -#include +#include #include #include #include #include #include +#include #include #include "e1000_logs.h" @@ -78,21 +49,13 @@ #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} +#define E1000_TX_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_VLAN_PKT) -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) - -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) +#define E1000_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK) /** * Structure associated with each descriptor of the RX ring of a RX queue. @@ -127,7 +90,7 @@ struct em_rx_queue { uint16_t nb_rx_hold; /**< number of held free RX desc. */ uint16_t rx_free_thresh; /**< max free RX desc to hold. */ uint16_t queue_id; /**< RX queue index. */ - uint8_t port_id; /**< Device port identifier. */ + uint16_t port_id; /**< Device port identifier. */ uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold register. */ @@ -194,7 +157,7 @@ struct em_tx_queue { /** Total number of TX descriptors ready to be allocated. */ uint16_t nb_tx_free; uint16_t queue_id; /**< TX queue index. */ - uint8_t port_id; /**< Device port identifier. */ + uint16_t port_id; /**< Device port identifier. */ uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold register. */ @@ -585,7 +548,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up Transmit Data Descriptor. */ slen = m_seg->data_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + buf_dma_addr = rte_mbuf_data_iova(m_seg); txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen); @@ -627,12 +590,49 @@ end_of_tx: PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); txq->tx_tail = tx_id; return nb_tx; } +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + /********************************************************************* * * RX functions @@ -645,7 +645,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) uint64_t pkt_flags; /* Check if VLAN present */ - pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0); + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0); return pkt_flags; } @@ -735,7 +736,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) status, (unsigned) rte_le_to_cpu_16(rxd.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", @@ -769,7 +770,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->buffer_addr = dma_addr; rxdp->status = 0; @@ -800,7 +801,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->ol_flags = rxm->ol_flags | rx_desc_error_to_pkt_flags(rxd.errors); - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + /* Only valid if PKT_RX_VLAN set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); /* @@ -915,7 +916,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) status, (unsigned) rte_le_to_cpu_16(rxd.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -949,7 +950,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm = rxe->mbuf; rxe->mbuf = nmb; - dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->buffer_addr = dma; rxdp->status = 0; @@ -1026,7 +1027,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->ol_flags = first_seg->ol_flags | rx_desc_error_to_pkt_flags(rxd.errors); - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + /* Only valid if PKT_RX_VLAN set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); /* Prefetch data of first segment, if configured to do so. */ @@ -1259,7 +1260,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, txq->port_id = dev->data->port_id; txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx)); - txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring_phys_addr = tz->iova; txq->tx_ring = (struct e1000_data_desc *) tz->addr; PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, @@ -1386,7 +1387,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring_phys_addr = rz->iova; rxq->rx_ring = (struct e1000_rx_desc *) rz->addr; PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, @@ -1406,11 +1407,6 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct em_rx_queue *rxq; uint32_t desc = 0; - if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id); - return 0; - } - rxq = dev->data->rx_queues[rx_queue_id]; rxdp = &(rxq->rx_ring[rxq->rx_tail]); @@ -1443,6 +1439,57 @@ eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset) return !!(rxdp->status & E1000_RXD_STAT_DD); } +int +eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct em_rx_queue *rxq = rx_queue; + volatile uint8_t *status; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].status; + if (*status & E1000_RXD_STAT_DD) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct em_tx_queue *txq = tx_queue; + volatile uint8_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].upper.fields.status; + if (*status & E1000_TXD_STAT_DD) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + void em_dev_clear_queues(struct rte_eth_dev *dev) { @@ -1567,7 +1614,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) /* Initialize software ring entries */ for (i = 0; i < rxq->nb_rx_desc; i++) { volatile struct e1000_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed " @@ -1575,7 +1622,8 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) return -ENOMEM; } - dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); /* Clear HW ring memory */ rxq->rx_ring[i] = rxd_init;