X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_e1000%2Figb_rxtx.c;h=946b39dd1c39fe59d183df19a5b6ec7610d0e2cd;hb=ff708facfcbf42f3dcb3c62d82ecd93e7b8c2506;hp=56d1dfc2b05d240d9b6e79c17449b5612491d3f1;hpb=4332beee95fef3113e29fa8e3f82362dd0b9f620;p=dpdk.git diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c index 56d1dfc2b0..946b39dd1c 100644 --- a/lib/librte_pmd_e1000/igb_rxtx.c +++ b/lib/librte_pmd_e1000/igb_rxtx.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -73,16 +72,11 @@ #include "e1000/e1000_api.h" #include "e1000_ethdev.h" -#define IGB_RSS_OFFLOAD_ALL ( \ - ETH_RSS_IPV4 | \ - ETH_RSS_IPV4_TCP | \ - ETH_RSS_IPV6 | \ - ETH_RSS_IPV6_EX | \ - ETH_RSS_IPV6_TCP | \ - ETH_RSS_IPV6_TCP_EX | \ - ETH_RSS_IPV4_UDP | \ - ETH_RSS_IPV6_UDP | \ - ETH_RSS_IPV6_UDP_EX) +/* Bit Mask to indicate what bits required for building TX context */ +#define IGB_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK) static inline struct rte_mbuf * rte_rxmbuf_alloc(struct rte_mempool *mp) @@ -262,7 +256,7 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, if (ol_flags & PKT_TX_IP_CKSUM) { type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4; - cmp_mask |= TX_MAC_LEN_CMP_MASK; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; } /* Specify which HW CTX to upload. */ @@ -361,6 +355,13 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct rte_mbuf *tx_pkt; struct rte_mbuf *m_seg; union igb_vlan_macip vlan_macip_lens; + union { + uint16_t u16; + struct { + uint16_t l3_len:9; + uint16_t l2_len:7; + }; + } l2_l3_len; uint64_t buf_dma_addr; uint32_t olinfo_status; uint32_t cmd_type_len; @@ -398,9 +399,11 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1); ol_flags = tx_pkt->ol_flags; + l2_l3_len.l2_len = tx_pkt->l2_len; + l2_l3_len.l3_len = tx_pkt->l3_len; vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci; - vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len; - tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK; + vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16; + tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK; /* If a Context Descriptor need be built . */ if (tx_ol_req) { @@ -415,7 +418,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" - " tx_first=%u tx_last=%u\n", + " tx_first=%u tx_last=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) pkt_len, @@ -714,8 +717,8 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "staterr=0x%x pkt_len=%u\n", + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x pkt_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -723,7 +726,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -808,7 +811,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -895,8 +898,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "staterr=0x%x data_len=%u\n", + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -904,7 +907,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -1061,7 +1064,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -1222,17 +1225,15 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, * driver. */ if (tx_conf->tx_free_thresh != 0) - RTE_LOG(WARNING, PMD, - "The tx_free_thresh parameter is not " - "used for the 1G driver.\n"); + PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not " + "used for the 1G driver."); if (tx_conf->tx_rs_thresh != 0) - RTE_LOG(WARNING, PMD, - "The tx_rs_thresh parameter is not " - "used for the 1G driver.\n"); + PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not " + "used for the 1G driver."); if (tx_conf->tx_thresh.wthresh == 0) - RTE_LOG(WARNING, PMD, - "To improve 1G driver performance, consider setting " - "the TX WTHRESH value to 4, 8, or 16.\n"); + PMD_INIT_LOG(WARNING, "To improve 1G driver performance, " + "consider setting the TX WTHRESH value to 4, 8, " + "or 16."); /* Free memory prior to re-allocation if needed */ if (dev->data->tx_queues[queue_idx] != NULL) { @@ -1242,7 +1243,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, /* First allocate the tx queue data structure */ txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue), - CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE); if (txq == NULL) return (-ENOMEM); @@ -1280,12 +1281,12 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, /* Allocate software ring */ txq->sw_ring = rte_zmalloc("txq->sw_ring", sizeof(struct igb_tx_entry) * nb_desc, - CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE); if (txq->sw_ring == NULL) { igb_tx_queue_release(txq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); igb_reset_tx_queue(txq, dev); @@ -1376,7 +1377,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, /* First allocate the RX queue data structure. */ rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue), - CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE); if (rxq == NULL) return (-ENOMEM); rxq->mb_pool = mp; @@ -1418,12 +1419,12 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, /* Allocate software ring. */ rxq->sw_ring = rte_zmalloc("rxq->sw_ring", sizeof(struct igb_rx_entry) * nb_desc, - CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE); if (rxq->sw_ring == NULL) { igb_rx_queue_release(rxq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); dev->data->rx_queues[queue_idx] = rxq; @@ -1441,7 +1442,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) uint32_t desc = 0; if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id); + PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); return 0; } @@ -1569,19 +1570,19 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf) mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */ if (rss_hf & ETH_RSS_IPV4) mrqc |= E1000_MRQC_RSS_FIELD_IPV4; - if (rss_hf & ETH_RSS_IPV4_TCP) + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP; if (rss_hf & ETH_RSS_IPV6) mrqc |= E1000_MRQC_RSS_FIELD_IPV6; if (rss_hf & ETH_RSS_IPV6_EX) mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX; - if (rss_hf & ETH_RSS_IPV6_TCP) + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP; if (rss_hf & ETH_RSS_IPV6_TCP_EX) mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; - if (rss_hf & ETH_RSS_IPV4_UDP) + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; - if (rss_hf & ETH_RSS_IPV6_UDP) + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; if (rss_hf & ETH_RSS_IPV6_UDP_EX) mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX; @@ -1651,19 +1652,19 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev, if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) rss_hf |= ETH_RSS_IPV4; if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) - rss_hf |= ETH_RSS_IPV4_TCP; + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) rss_hf |= ETH_RSS_IPV6; if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX) rss_hf |= ETH_RSS_IPV6_EX; if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) - rss_hf |= ETH_RSS_IPV6_TCP; + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX) rss_hf |= ETH_RSS_IPV6_TCP_EX; if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP) - rss_hf |= ETH_RSS_IPV4_UDP; + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP) - rss_hf |= ETH_RSS_IPV6_UDP; + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX) rss_hf |= ETH_RSS_IPV6_UDP_EX; rss_conf->rss_hf = rss_hf; @@ -1740,7 +1741,7 @@ igb_is_vmdq_supported(const struct rte_eth_dev *dev) case e1000_i210: case e1000_i211: default: - PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n"); + PMD_INIT_LOG(ERR, "Cannot support VMDq feature"); return 0; } } @@ -1753,7 +1754,8 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) uint32_t mrqc, vt_ctl, vmolr, rctl; int i; - PMD_INIT_LOG(DEBUG, ">>"); + PMD_INIT_FUNC_TRACE(); + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; @@ -1780,6 +1782,26 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) vt_ctl |= E1000_VT_CTL_IGNORE_MAC; E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); + for (i = 0; i < E1000_VMOLR_SIZE; i++) { + vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE | + E1000_VMOLR_ROPE | E1000_VMOLR_BAM | + E1000_VMOLR_MPME); + + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG) + vmolr |= E1000_VMOLR_AUPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC) + vmolr |= E1000_VMOLR_ROMPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC) + vmolr |= E1000_VMOLR_ROPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST) + vmolr |= E1000_VMOLR_BAM; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST) + vmolr |= E1000_VMOLR_MPME; + + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + /* * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1 * Both 82576 and 82580 support it @@ -1842,7 +1864,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed " - "queue_id=%hu\n", rxq->queue_id); + "queue_id=%hu", rxq->queue_id); return (-ENOMEM); } dma_addr = @@ -1991,6 +2013,9 @@ eth_igb_rx_init(struct rte_eth_dev *dev) /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * VLAN_TAG_SIZE) > buf_size){ + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, + "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; dev->data->scattered_rx = 1; } @@ -2000,6 +2025,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev) */ if ((rctl_bsize == 0) || (rctl_bsize > buf_size)) rctl_bsize = buf_size; + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; dev->data->scattered_rx = 1; } @@ -2021,6 +2048,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev) } if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; dev->data->scattered_rx = 1; } @@ -2255,6 +2284,9 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * VLAN_TAG_SIZE) > buf_size){ + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, + "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; dev->data->scattered_rx = 1; } @@ -2264,6 +2296,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) */ if ((rctl_bsize == 0) || (rctl_bsize > buf_size)) rctl_bsize = buf_size; + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; dev->data->scattered_rx = 1; } @@ -2287,7 +2321,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) * to avoid Write-Back not triggered sometimes */ rxdctl |= 0x10000; - PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n"); + PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !"); } else rxdctl |= ((rxq->wthresh & 0x1F) << 16); @@ -2295,6 +2329,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) } if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; dev->data->scattered_rx = 1; } @@ -2355,7 +2391,7 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev) * to avoid Write-Back not triggered sometimes */ txdctl |= 0x10000; - PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n"); + PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !"); } else txdctl |= ((txq->wthresh & 0x1F) << 16);