X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fem_rxtx.c;h=19e3bffd468f314d9f1d8f40da0f60868118811d;hb=1aacc3d388d3aa9b4fdefcaa111ef177f39ad57f;hp=1d8f0794d6943ba86eb0ed6b0bece99ab139a7eb;hpb=bfa9a8a4605bf6c4c82aa6ebff98de89c73d2024;p=dpdk.git diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 1d8f0794d6..19e3bffd46 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -47,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -60,7 +32,7 @@ #include #include #include -#include +#include #include #include #include @@ -79,6 +51,8 @@ #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ #define E1000_TX_OFFLOAD_MASK ( \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ PKT_TX_VLAN_PKT) @@ -86,6 +60,11 @@ #define E1000_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK) +/* PCI offset for querying configuration status register */ +#define PCI_CFG_STATUS_REG 0x06 +#define FLUSH_DESC_REQUIRED 0x100 + + /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -114,6 +93,7 @@ struct em_rx_queue { struct em_rx_entry *sw_ring; /**< address of RX software ring. */ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */ uint16_t nb_rx_desc; /**< number of RX descriptors. */ uint16_t rx_tail; /**< current value of RDT register. */ uint16_t nb_rx_hold; /**< number of held free RX desc. */ @@ -192,6 +172,7 @@ struct em_tx_queue { uint8_t wthresh; /**< Write-back threshold register. */ struct em_ctx_info ctx_cache; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -247,7 +228,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq, /* setup IPCS* fields */ ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len; ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len + - offsetof(struct ipv4_hdr, hdr_checksum)); + offsetof(struct rte_ipv4_hdr, hdr_checksum)); /* * When doing checksum or TCP segmentation with IPv6 headers, @@ -269,12 +250,12 @@ em_set_xmit_ctx(struct em_tx_queue* txq, switch (flags & PKT_TX_L4_MASK) { case PKT_TX_UDP_CKSUM: ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse + - offsetof(struct udp_hdr, dgram_cksum)); + offsetof(struct rte_udp_hdr, dgram_cksum)); cmp_mask |= TX_MACIP_LEN_CMP_MASK; break; case PKT_TX_TCP_CKSUM: ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse + - offsetof(struct tcp_hdr, cksum)); + offsetof(struct rte_tcp_hdr, cksum)); cmd_len |= E1000_TXD_CMD_TCP; cmp_mask |= TX_MACIP_LEN_CMP_MASK; break; @@ -641,20 +622,20 @@ eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, m = tx_pkts[i]; if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -1030,17 +1011,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt_len -= ETHER_CRC_LEN; - if (data_len <= ETHER_CRC_LEN) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (data_len <= RTE_ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); first_seg->nb_segs--; last_seg->data_len = (uint16_t) (last_seg->data_len - - (ETHER_CRC_LEN - data_len)); + (RTE_ETHER_CRC_LEN - data_len)); last_seg->next = NULL; } else - rxm->data_len = - (uint16_t) (data_len - ETHER_CRC_LEN); + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); } /* @@ -1180,6 +1161,37 @@ em_reset_tx_queue(struct em_tx_queue *txq) memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); } +uint64_t +em_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + return tx_offload_capa; +} + +uint64_t +em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + /* + * As only one Tx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1192,9 +1204,12 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, struct e1000_hw *hw; uint32_t tsize; uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1298,6 +1313,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, em_reset_tx_queue(txq); dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1342,6 +1358,43 @@ em_reset_rx_queue(struct em_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +em_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + uint32_t max_rx_pktlen; + + max_rx_pktlen = em_get_max_pktlen(dev); + + rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER; + if (max_rx_pktlen > RTE_ETHER_MAX_LEN) + rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + return rx_offload_capa; +} + +uint64_t +em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1354,9 +1407,12 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, struct em_rx_queue *rxq; struct e1000_hw *hw; uint32_t rsize; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1369,12 +1425,13 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, } /* - * EM devices don't support drop_en functionality + * EM devices don't support drop_en functionality. + * It's an optimization that does nothing on single-queue devices, + * so just log the issue and carry on. */ if (rx_conf->rx_drop_en) { - PMD_INIT_LOG(ERR, "drop_en functionality not supported by " + PMD_INIT_LOG(NOTICE, "drop_en functionality not supported by " "device"); - return -EINVAL; } /* Free memory prior to re-allocation if needed. */ @@ -1411,8 +1468,10 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); @@ -1424,6 +1483,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, dev->data->rx_queues[queue_idx] = rxq; em_reset_rx_queue(rxq); + rxq->offloads = offloads; return 0; } @@ -1551,12 +1611,14 @@ em_dev_free_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { eth_em_rx_queue_release(dev->data->rx_queues[i]); dev->data->rx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "rx_ring", i); } dev->data->nb_rx_queues = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { eth_em_tx_queue_release(dev->data->tx_queues[i]); dev->data->tx_queues[i] = NULL; + rte_eth_dma_zone_free(dev, "tx_ring", i); } dev->data->nb_tx_queues = 0; } @@ -1675,6 +1737,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) { struct e1000_hw *hw; struct em_rx_queue *rxq; + struct rte_eth_rxmode *rxmode; uint32_t rctl; uint32_t rfctl; uint32_t rxcsum; @@ -1683,6 +1746,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) int ret; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rxmode = &dev->data->dev_conf.rxmode; /* * Make sure receives are disabled while setting @@ -1742,9 +1806,10 @@ eth_em_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(i), @@ -1774,8 +1839,8 @@ eth_em_rx_init(struct rte_eth_dev *dev) * to avoid splitting packets that don't fit into * one buffer. */ - if (dev->data->dev_conf.rxmode.jumbo_frame || - rctl_bsize < ETHER_MAX_LEN) { + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME || + rctl_bsize < RTE_ETHER_MAX_LEN) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = @@ -1784,7 +1849,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) } } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_em_recv_scattered_pkts; @@ -1797,7 +1862,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) */ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= E1000_RXCSUM_IPOFL; else rxcsum &= ~E1000_RXCSUM_IPOFL; @@ -1809,24 +1874,24 @@ eth_em_rx_init(struct rte_eth_dev *dev) if ((hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_pch2lan || hw->mac.type == e1000_ich10lan) && - dev->data->dev_conf.rxmode.jumbo_frame == 1) { + rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13)); } if (hw->mac.type == e1000_pch2lan) { - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - else + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + else + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | @@ -1843,7 +1908,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; @@ -1907,6 +1972,22 @@ eth_em_tx_init(struct rte_eth_dev *dev) tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + /* SPT and CNP Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + uint32_t reg_val; + reg_val = E1000_READ_REG(hw, E1000_IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + E1000_WRITE_REG(hw, E1000_IOSFPC, reg_val); + + /* Dropping the number of outstanding requests from + * 3 to 2 in order to avoid a buffer overrun. + */ + reg_val = E1000_READ_REG(hw, E1000_TARC(0)); + reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; + reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; + E1000_WRITE_REG(hw, E1000_TARC(0), reg_val); + } + /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(hw, E1000_TCTL, tctl); } @@ -1923,6 +2004,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = rxq->nb_rx_desc; qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.offloads = rxq->offloads; } void @@ -1940,4 +2022,121 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.wthresh = txq->wthresh; qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; +} + +static void +e1000_flush_tx_ring(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + volatile struct e1000_data_desc *tx_desc; + volatile uint32_t *tdt_reg_addr; + uint32_t tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; + uint16_t size = 512; + struct em_tx_queue *txq; + int i; + + if (dev->data->tx_queues == NULL) + return; + tctl = E1000_READ_REG(hw, E1000_TCTL); + E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); + for (i = 0; i < dev->data->nb_tx_queues && + i < E1000_I219_MAX_TX_QUEUE_NUM; i++) { + txq = dev->data->tx_queues[i]; + tdt = E1000_READ_REG(hw, E1000_TDT(i)); + if (tdt != txq->tx_tail) + return; + tx_desc = &txq->tx_ring[txq->tx_tail]; + tx_desc->buffer_addr = rte_cpu_to_le_64(txq->tx_ring_phys_addr); + tx_desc->lower.data = rte_cpu_to_le_32(txd_lower | size); + tx_desc->upper.data = 0; + + rte_io_wmb(); + txq->tx_tail++; + if (txq->tx_tail == txq->nb_tx_desc) + txq->tx_tail = 0; + tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(i)); + E1000_PCI_REG_WRITE(tdt_reg_addr, txq->tx_tail); + usec_delay(250); + } +} + +static void +e1000_flush_rx_ring(struct rte_eth_dev *dev) +{ + uint32_t rctl, rxdctl; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + usec_delay(150); + + for (i = 0; i < dev->data->nb_rx_queues && + i < E1000_I219_MAX_RX_QUEUE_NUM; i++) { + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); + /* zero the lower 14 bits (prefetch and host thresholds) */ + rxdctl &= 0xffffc000; + + /* update thresholds: prefetch threshold to 31, + * host threshold to 1 and make sure the granularity + * is "descriptors" and not "cache lines" + */ + rxdctl |= (0x1F | (1UL << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + } + /* momentarily enable the RX ring for the changes to take effect */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + usec_delay(150); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); +} + +/** + * em_flush_desc_rings - remove all descriptors from the descriptor rings + * + * In i219, the descriptor rings must be emptied before resetting/closing the + * HW. Failure to do this will cause the HW to enter a unit hang state which + * can only be released by PCI reset on the device + * + */ + +void +em_flush_desc_rings(struct rte_eth_dev *dev) +{ + uint32_t fextnvm11, tdlen; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t pci_cfg_status = 0; + int ret; + + fextnvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); + E1000_WRITE_REG(hw, E1000_FEXTNVM11, + fextnvm11 | E1000_FEXTNVM11_DISABLE_MULR_FIX); + tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); + ret = rte_pci_read_config(pci_dev, &pci_cfg_status, + sizeof(pci_cfg_status), PCI_CFG_STATUS_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_CFG_STATUS_REG); + return; + } + + /* do nothing if we're not in faulty state, or if the queue is empty */ + if ((pci_cfg_status & FLUSH_DESC_REQUIRED) && tdlen) { + /* flush desc ring */ + e1000_flush_tx_ring(dev); + ret = rte_pci_read_config(pci_dev, &pci_cfg_status, + sizeof(pci_cfg_status), PCI_CFG_STATUS_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_CFG_STATUS_REG); + return; + } + + if (pci_cfg_status & FLUSH_DESC_REQUIRED) + e1000_flush_rx_ring(dev); + } }