X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Figc%2Figc_txrx.c;h=4887e922e700132baf01f77f2fd6b2718a43bebe;hb=ede6356582adfb244ba4ced903c00c6f7626e53c;hp=383248423768b21b00c829531c7d6f25cebf4dc7;hpb=5f266d0d8c6decb07fef3d6a6f828bcc75f5fefe;p=dpdk.git diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c index 3832484237..4887e922e7 100644 --- a/drivers/net/igc/igc_txrx.c +++ b/drivers/net/igc/igc_txrx.c @@ -3,8 +3,9 @@ */ #include +#include #include -#include +#include #include #include "igc_logs.h" @@ -715,14 +716,13 @@ igc_rx_queue_release(struct igc_rx_queue *rxq) rte_free(rxq); } -void eth_igc_rx_queue_release(void *rxq) +void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - if (rxq) - igc_rx_queue_release(rxq); + if (dev->data->rx_queues[qid]) + igc_rx_queue_release(dev->data->rx_queues[qid]); } -uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev, - uint16_t rx_queue_id) +uint32_t eth_igc_rx_queue_count(void *rx_queue) { /** * Check the DD bit of a rx descriptor of each 4 in a group, @@ -735,7 +735,7 @@ uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev, struct igc_rx_queue *rxq; uint16_t desc = 0; - rxq = dev->data->rx_queues[rx_queue_id]; + rxq = rx_queue; rxdp = &rxq->rx_ring[rxq->rx_tail]; while (desc < rxq->nb_rx_desc - rxq->rx_tail) { @@ -756,24 +756,6 @@ uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev, return desc; } -int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset) -{ - volatile union igc_adv_rx_desc *rxdp; - struct igc_rx_queue *rxq = rx_queue; - uint32_t desc; - - if (unlikely(!rxq || offset >= rxq->nb_rx_desc)) - return 0; - - desc = rxq->rx_tail + offset; - if (desc >= rxq->nb_rx_desc) - desc -= rxq->nb_rx_desc; - - rxdp = &rxq->rx_ring[desc]; - return !!(rxdp->wb.upper.status_error & - rte_cpu_to_le_32(IGC_RXD_STAT_DD)); -} - int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset) { struct igc_rx_queue *rxq = rx_queue; @@ -836,7 +818,7 @@ static uint8_t default_rss_key[40] = { 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, }; -static void +void igc_rss_disable(struct rte_eth_dev *dev) { struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); @@ -917,6 +899,135 @@ igc_rss_configure(struct rte_eth_dev *dev) igc_hw_rss_hash_set(hw, &rss_conf); } +int +igc_del_rss_filter(struct rte_eth_dev *dev) +{ + struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev); + + if (rss_filter->enable) { + /* recover default RSS configuration */ + igc_rss_configure(dev); + + /* disable RSS logic and clear filter data */ + igc_rss_disable(dev); + memset(rss_filter, 0, sizeof(*rss_filter)); + return 0; + } + PMD_DRV_LOG(ERR, "filter not exist!"); + return -ENOENT; +} + +/* Initiate the filter structure by the structure of rte_flow_action_rss */ +void +igc_rss_conf_set(struct igc_rss_filter *out, + const struct rte_flow_action_rss *rss) +{ + out->conf.func = rss->func; + out->conf.level = rss->level; + out->conf.types = rss->types; + + if (rss->key_len == sizeof(out->key)) { + memcpy(out->key, rss->key, rss->key_len); + out->conf.key = out->key; + out->conf.key_len = rss->key_len; + } else { + out->conf.key = NULL; + out->conf.key_len = 0; + } + + if (rss->queue_num <= IGC_RSS_RDT_SIZD) { + memcpy(out->queue, rss->queue, + sizeof(*out->queue) * rss->queue_num); + out->conf.queue = out->queue; + out->conf.queue_num = rss->queue_num; + } else { + out->conf.queue = NULL; + out->conf.queue_num = 0; + } +} + +int +igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss) +{ + struct rte_eth_rss_conf rss_conf = { + .rss_key = rss->conf.key_len ? + (void *)(uintptr_t)rss->conf.key : NULL, + .rss_key_len = rss->conf.key_len, + .rss_hf = rss->conf.types, + }; + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev); + uint32_t i, j; + + /* check RSS type is valid */ + if ((rss_conf.rss_hf & IGC_RSS_OFFLOAD_ALL) == 0) { + PMD_DRV_LOG(ERR, + "RSS type(0x%" PRIx64 ") error!, only 0x%" PRIx64 + " been supported", rss_conf.rss_hf, + (uint64_t)IGC_RSS_OFFLOAD_ALL); + return -EINVAL; + } + + /* check queue count is not zero */ + if (!rss->conf.queue_num) { + PMD_DRV_LOG(ERR, "Queue number should not be 0!"); + return -EINVAL; + } + + /* check queue id is valid */ + for (i = 0; i < rss->conf.queue_num; i++) + if (rss->conf.queue[i] >= dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Queue id %u is invalid!", + rss->conf.queue[i]); + return -EINVAL; + } + + /* only support one filter */ + if (rss_filter->enable) { + PMD_DRV_LOG(ERR, "Only support one RSS filter!"); + return -ENOTSUP; + } + rss_filter->enable = 1; + + igc_rss_conf_set(rss_filter, &rss->conf); + + /* Fill in redirection table. */ + for (i = 0, j = 0; i < IGC_RSS_RDT_SIZD; i++, j++) { + union igc_rss_reta_reg reta; + uint16_t q_idx, reta_idx; + + if (j == rss->conf.queue_num) + j = 0; + q_idx = rss->conf.queue[j]; + reta_idx = i % sizeof(reta); + reta.bytes[reta_idx] = q_idx; + if (reta_idx == sizeof(reta) - 1) + IGC_WRITE_REG_LE_VALUE(hw, + IGC_RETA(i / sizeof(reta)), reta.dword); + } + + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = default_rss_key; + igc_hw_rss_hash_set(hw, &rss_conf); + return 0; +} + +void +igc_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev); + + if (!rss_filter->enable) + return; + + /* recover default RSS configuration */ + igc_rss_configure(dev); + + /* disable RSS logic and clear filter data */ + igc_rss_disable(dev); + memset(rss_filter, 0, sizeof(*rss_filter)); +} + static int igc_dev_mq_rx_configure(struct rte_eth_dev *dev) { @@ -951,7 +1062,7 @@ igc_rx_init(struct rte_eth_dev *dev) struct igc_rx_queue *rxq; struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); uint64_t offloads = dev->data->dev_conf.rxmode.offloads; - uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + uint32_t max_rx_pktlen; uint32_t rctl; uint32_t rxcsum; uint16_t buf_size; @@ -969,17 +1080,17 @@ igc_rx_init(struct rte_eth_dev *dev) IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); /* Configure support of jumbo frames, if any. */ - if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (dev->data->mtu > RTE_ETHER_MTU) rctl |= IGC_RCTL_LPE; - - /* - * Set maximum packet length by default, and might be updated - * together with enabling/disabling dual VLAN. - */ - IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len); - } else { + else rctl &= ~IGC_RCTL_LPE; - } + + max_rx_pktlen = dev->data->mtu + IGC_ETH_OVERHEAD; + /* + * Set maximum packet length by default, and might be updated + * together with enabling/disabling dual VLAN. + */ + IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pktlen); /* Configure and enable each RX queue. */ rctl_bsize = 0; @@ -1038,7 +1149,7 @@ igc_rx_init(struct rte_eth_dev *dev) IGC_SRRCTL_BSIZEPKT_SHIFT); /* It adds dual VLAN length for supporting dual VLAN */ - if (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size) + if (max_rx_pktlen > buf_size) dev->data->scattered_rx = 1; } else { /* @@ -1142,6 +1253,9 @@ igc_rx_init(struct rte_eth_dev *dev) IGC_RCTL_DPF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + if (dev->data->dev_conf.lpbk_mode == 1) + rctl |= IGC_RCTL_LBM_MAC; + rctl &= ~(IGC_RCTL_HSEL_MSK | IGC_RCTL_CFIEN | IGC_RCTL_CFI | IGC_RCTL_PSP | IGC_RCTL_PMCF); @@ -1158,20 +1272,24 @@ igc_rx_init(struct rte_eth_dev *dev) * This needs to be done after enable. */ for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint32_t dvmolr; + rxq = dev->data->rx_queues[i]; IGC_WRITE_REG(hw, IGC_RDH(rxq->reg_idx), 0); - IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), - rxq->nb_rx_desc - 1); + IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); - /* strip queue vlan offload */ - if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { - uint32_t dvmolr; - dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->queue_id)); + dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx)); + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + dvmolr |= IGC_DVMOLR_STRVLAN; + else + dvmolr &= ~IGC_DVMOLR_STRVLAN; - /* If vlan been stripped off, the CRC is meaningless. */ - dvmolr |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC; - IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr); - } + if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) + dvmolr &= ~IGC_DVMOLR_STRCRC; + else + dvmolr |= IGC_DVMOLR_STRCRC; + + IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr); } return 0; @@ -1302,7 +1420,7 @@ eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } -#ifdef RTE_LIBRTE_ETHDEV_DEBUG +#ifdef RTE_ETHDEV_DEBUG_TX ret = rte_validate_tx_offload(m); if (ret != 0) { rte_errno = -ret; @@ -1762,10 +1880,10 @@ igc_tx_queue_release(struct igc_tx_queue *txq) rte_free(txq); } -void eth_igc_tx_queue_release(void *txq) +void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - if (txq) - igc_tx_queue_release(txq); + if (dev->data->tx_queues[qid]) + igc_tx_queue_release(dev->data->tx_queues[qid]); } static void @@ -2134,12 +2252,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev, reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id)); if (on) { - /* If vlan been stripped off, the CRC is meaningless. */ - reg_val |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC; + reg_val |= IGC_DVMOLR_STRVLAN; rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; } else { - reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN | - IGC_DVMOLR_STRCRC); + reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN); rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; }