X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fem_rxtx.c;h=1da56b2292dd0bef9f9557c0aafbadcfbc31ae04;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=06ba68e3913c54b9f4e81f0b7d7978005c8d2337;hpb=f8244c6399d9fae6afab6770ae367aef38742ea5;p=dpdk.git diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 06ba68e391..1da56b2292 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -60,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -79,6 +50,8 @@ #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ #define E1000_TX_OFFLOAD_MASK ( \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ PKT_TX_VLAN_PKT) @@ -114,6 +87,7 @@ struct em_rx_queue { struct em_rx_entry *sw_ring; /**< address of RX software ring. */ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */ uint16_t nb_rx_desc; /**< number of RX descriptors. */ uint16_t rx_tail; /**< current value of RDT register. */ uint16_t nb_rx_hold; /**< number of held free RX desc. */ @@ -192,6 +166,7 @@ struct em_tx_queue { uint8_t wthresh; /**< Write-back threshold register. */ struct em_ctx_info ctx_cache; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -577,7 +552,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up Transmit Data Descriptor. */ slen = m_seg->data_len; - buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + buf_dma_addr = rte_mbuf_data_iova(m_seg); txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen); @@ -675,7 +650,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) /* Check if VLAN present */ pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? - PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0); + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0); return pkt_flags; } @@ -799,7 +774,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->buffer_addr = dma_addr; rxdp->status = 0; @@ -830,7 +805,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->ol_flags = rxm->ol_flags | rx_desc_error_to_pkt_flags(rxd.errors); - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + /* Only valid if PKT_RX_VLAN set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); /* @@ -979,7 +954,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm = rxe->mbuf; rxe->mbuf = nmb; - dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->buffer_addr = dma; rxdp->status = 0; @@ -1030,17 +1005,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt_len -= ETHER_CRC_LEN; - if (data_len <= ETHER_CRC_LEN) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (data_len <= RTE_ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); first_seg->nb_segs--; last_seg->data_len = (uint16_t) (last_seg->data_len - - (ETHER_CRC_LEN - data_len)); + (RTE_ETHER_CRC_LEN - data_len)); last_seg->next = NULL; } else - rxm->data_len = - (uint16_t) (data_len - ETHER_CRC_LEN); + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); } /* @@ -1056,7 +1031,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->ol_flags = first_seg->ol_flags | rx_desc_error_to_pkt_flags(rxd.errors); - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + /* Only valid if PKT_RX_VLAN set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); /* Prefetch data of first segment, if configured to do so. */ @@ -1180,6 +1155,37 @@ em_reset_tx_queue(struct em_tx_queue *txq) memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); } +uint64_t +em_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + return tx_offload_capa; +} + +uint64_t +em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + /* + * As only one Tx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1192,9 +1198,12 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, struct e1000_hw *hw; uint32_t tsize; uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1289,7 +1298,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, txq->port_id = dev->data->port_id; txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx)); - txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring_phys_addr = tz->iova; txq->tx_ring = (struct e1000_data_desc *) tz->addr; PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, @@ -1298,6 +1307,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, em_reset_tx_queue(txq); dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1342,6 +1352,43 @@ em_reset_rx_queue(struct em_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +em_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + uint32_t max_rx_pktlen; + + max_rx_pktlen = em_get_max_pktlen(dev); + + rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER; + if (max_rx_pktlen > RTE_ETHER_MAX_LEN) + rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + return rx_offload_capa; +} + +uint64_t +em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1354,9 +1401,12 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, struct em_rx_queue *rxq; struct e1000_hw *hw; uint32_t rsize; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1369,12 +1419,13 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, } /* - * EM devices don't support drop_en functionality + * EM devices don't support drop_en functionality. + * It's an optimization that does nothing on single-queue devices, + * so just log the issue and carry on. */ if (rx_conf->rx_drop_en) { - PMD_INIT_LOG(ERR, "drop_en functionality not supported by " + PMD_INIT_LOG(NOTICE, "drop_en functionality not supported by " "device"); - return -EINVAL; } /* Free memory prior to re-allocation if needed. */ @@ -1411,12 +1462,14 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring_phys_addr = rz->iova; rxq->rx_ring = (struct e1000_rx_desc *) rz->addr; PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, @@ -1424,6 +1477,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, dev->data->rx_queues[queue_idx] = rxq; em_reset_rx_queue(rxq); + rxq->offloads = offloads; return 0; } @@ -1652,7 +1706,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) } dma_addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); /* Clear HW ring memory */ rxq->rx_ring[i] = rxd_init; @@ -1675,6 +1729,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) { struct e1000_hw *hw; struct em_rx_queue *rxq; + struct rte_eth_rxmode *rxmode; uint32_t rctl; uint32_t rfctl; uint32_t rxcsum; @@ -1683,6 +1738,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) int ret; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rxmode = &dev->data->dev_conf.rxmode; /* * Make sure receives are disabled while setting @@ -1742,9 +1798,10 @@ eth_em_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(i), @@ -1774,8 +1831,8 @@ eth_em_rx_init(struct rte_eth_dev *dev) * to avoid splitting packets that don't fit into * one buffer. */ - if (dev->data->dev_conf.rxmode.jumbo_frame || - rctl_bsize < ETHER_MAX_LEN) { + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME || + rctl_bsize < RTE_ETHER_MAX_LEN) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = @@ -1784,7 +1841,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) } } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_em_recv_scattered_pkts; @@ -1797,7 +1854,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) */ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= E1000_RXCSUM_IPOFL; else rxcsum &= ~E1000_RXCSUM_IPOFL; @@ -1809,24 +1866,24 @@ eth_em_rx_init(struct rte_eth_dev *dev) if ((hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_pch2lan || hw->mac.type == e1000_ich10lan) && - dev->data->dev_conf.rxmode.jumbo_frame == 1) { + rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13)); } if (hw->mac.type == e1000_pch2lan) { - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - else + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + else + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | @@ -1843,7 +1900,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; @@ -1923,6 +1980,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = rxq->nb_rx_desc; qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.offloads = rxq->offloads; } void @@ -1940,4 +1998,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.wthresh = txq->wthresh; qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; }