X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_e1000%2Figb_rxtx.c;h=46f6f29d9350cfe26ef7284c1713c6a4a4ccb1cc;hb=594f3c1cea092afc62cd5d4c867968cff595542c;hp=3cb410917470284ca845fb919a9ce3cd21ee0c5c;hpb=5e305acc2ec401688ee6c1ba2765b483ae953e07;p=dpdk.git diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c index 3cb4109174..46f6f29d93 100644 --- a/lib/librte_pmd_e1000/igb_rxtx.c +++ b/lib/librte_pmd_e1000/igb_rxtx.c @@ -72,7 +72,7 @@ #include #include "e1000_logs.h" -#include "igb/e1000_api.h" +#include "e1000/e1000_api.h" #include "e1000_ethdev.h" static inline struct rte_mbuf * @@ -88,7 +88,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) #define RTE_MBUF_DATA_DMA_ADDR(mb) \ (uint64_t) ((mb)->buf_physaddr + \ (uint64_t) ((char *)((mb)->pkt.data) - \ - (char *)(mb)->buf_addr)) + (char *)(mb)->buf_addr)) #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) @@ -139,7 +139,7 @@ struct igb_rx_queue { enum igb_advctx_num { IGB_CTX_0 = 0, /**< CTX0 */ IGB_CTX_1 = 1, /**< CTX1 */ - IGB_CTX_NUM = 2, /**< CTX NUM */ + IGB_CTX_NUM = 2, /**< CTX_NUM */ }; /** @@ -148,7 +148,7 @@ enum igb_advctx_num { struct igb_advctx_info { uint16_t flags; /**< ol_flags related to context build. */ uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */ - uint32_t vlan_macip_lens; /**< vlan, mac.ip length. */ + union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */ }; /** @@ -161,16 +161,20 @@ struct igb_tx_queue { volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ uint32_t txd_type; /**< Device-specific TXD type */ uint16_t nb_tx_desc; /**< number of TX descriptors. */ - uint16_t tx_tail; /**< Current value of TDT register. */ - uint16_t tx_head; /**< Index of first used TX descriptor. */ + uint16_t tx_tail; /**< Current value of TDT register. */ + uint16_t tx_head; + /**< Index of first used TX descriptor. */ uint16_t queue_id; /**< TX queue index. */ uint8_t port_id; /**< Device port identifier. */ uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold register. */ - uint32_t ctx_curr; /**< Current used hardware descriptor. */ - uint32_t ctx_start;/**< Start context position for transmit queue. */ - struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/ + uint32_t ctx_curr; + /**< Current used hardware descriptor. */ + uint32_t ctx_start; + /**< Start context position for transmit queue. */ + struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; + /**< Hardware context history.*/ }; #if 1 @@ -255,7 +259,8 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, txq->ctx_cache[ctx_curr].flags = ol_flags; txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask; - txq->ctx_cache[ctx_curr].vlan_macip_lens = vlan_macip_lens & cmp_mask; + txq->ctx_cache[ctx_curr].vlan_macip_lens.data = + vlan_macip_lens & cmp_mask; ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); @@ -273,7 +278,7 @@ what_advctx_update(struct igb_tx_queue *txq, uint16_t flags, { /* If match with the current context */ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens == + (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data == (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) { return txq->ctx_curr; } @@ -281,7 +286,7 @@ what_advctx_update(struct igb_tx_queue *txq, uint16_t flags, /* If match with the second context */ txq->ctx_curr ^= 1; if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens == + (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data == (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) { return txq->ctx_curr; } @@ -310,9 +315,10 @@ tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags) } uint16_t -eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts, +eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + struct igb_tx_queue *txq; struct igb_tx_entry *sw_ring; struct igb_tx_entry *txe, *txn; volatile union e1000_adv_tx_desc *txr; @@ -334,6 +340,7 @@ eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts, uint32_t ctx; uint32_t vlan_macip_lens; + txq = tx_queue; sw_ring = txq->sw_ring; txr = txq->tx_ring; tx_id = txq->tx_tail; @@ -356,12 +363,13 @@ eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1); ol_flags = tx_pkt->ol_flags; - vlan_macip_lens = (tx_pkt->pkt.vlan_tci << 16) | (tx_pkt->pkt.l2_len << E1000_ADVTXD_MACLEN_SHIFT) | tx_pkt->pkt.l3_len; + vlan_macip_lens = tx_pkt->pkt.vlan_macip.data; tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK); /* If a Context Descriptor need be built . */ if (tx_ol_req) { - ctx = what_advctx_update(txq, tx_ol_req,vlan_macip_lens); + ctx = what_advctx_update(txq, tx_ol_req, + vlan_macip_lens); /* Only allocate context descriptor if required*/ new_ctx = (ctx == IGB_CTX_NUM); ctx = txq->ctx_curr; @@ -604,9 +612,10 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) } uint16_t -eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, +eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { + struct igb_rx_queue *rxq; volatile union e1000_adv_rx_desc *rx_ring; volatile union e1000_adv_rx_desc *rxdp; struct igb_rx_entry *sw_ring; @@ -625,6 +634,7 @@ eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, nb_rx = 0; nb_hold = 0; + rxq = rx_queue; rx_id = rxq->rx_tail; rx_ring = rxq->rx_ring; sw_ring = rxq->sw_ring; @@ -736,7 +746,8 @@ eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss; hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + rxm->pkt.vlan_macip.f.vlan_tci = + rte_le_to_cpu_16(rxd.wb.upper.vlan); pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); pkt_flags = (pkt_flags | @@ -779,9 +790,10 @@ eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, } uint16_t -eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, +eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { + struct igb_rx_queue *rxq; volatile union e1000_adv_rx_desc *rx_ring; volatile union e1000_adv_rx_desc *rxdp; struct igb_rx_entry *sw_ring; @@ -802,6 +814,7 @@ eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, nb_rx = 0; nb_hold = 0; + rxq = rx_queue; rx_id = rxq->rx_tail; rx_ring = rxq->rx_ring; sw_ring = rxq->sw_ring; @@ -970,7 +983,8 @@ eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is * set in the pkt_flags field. */ - first_seg->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + first_seg->pkt.vlan_macip.f.vlan_tci = + rte_le_to_cpu_16(rxd.wb.upper.vlan); hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr)); @@ -1084,47 +1098,17 @@ igb_tx_queue_release_mbufs(struct igb_tx_queue *txq) static void igb_tx_queue_release(struct igb_tx_queue *txq) { - igb_tx_queue_release_mbufs(txq); - rte_free(txq->sw_ring); - rte_free(txq); + if (txq != NULL) { + igb_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); + } } -int -igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues) +void +eth_igb_tx_queue_release(void *txq) { - uint16_t i, old_nb_queues = dev->data->nb_tx_queues; - struct igb_tx_queue **txq; - - if (dev->data->tx_queues == NULL) { - dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", - sizeof(struct igb_tx_queue *) * nb_queues, - CACHE_LINE_SIZE); - if (dev->data->tx_queues == NULL) { - dev->data->nb_tx_queues = 0; - return -ENOMEM; - } - } else { - if (nb_queues < old_nb_queues) - for (i = nb_queues; i < old_nb_queues; i++) - igb_tx_queue_release(dev->data->tx_queues[i]); - - if (nb_queues != old_nb_queues) { - txq = rte_realloc(dev->data->tx_queues, - sizeof(struct igb_tx_queue *) * nb_queues, - CACHE_LINE_SIZE); - if (txq == NULL) - return -ENOMEM; - else - dev->data->tx_queues = txq; - if (nb_queues > old_nb_queues) - memset(&(txq[old_nb_queues]), 0, - sizeof(struct igb_tx_queue *) * - (nb_queues - old_nb_queues)); - } - } - dev->data->nb_tx_queues = nb_queues; - - return 0; + igb_tx_queue_release(txq); } static void @@ -1203,15 +1187,15 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, if (tx_conf->tx_free_thresh != 0) RTE_LOG(WARNING, PMD, "The tx_free_thresh parameter is not " - "used for the 1G driver."); + "used for the 1G driver.\n"); if (tx_conf->tx_rs_thresh != 0) RTE_LOG(WARNING, PMD, "The tx_rs_thresh parameter is not " - "used for the 1G driver."); + "used for the 1G driver.\n"); if (tx_conf->tx_thresh.wthresh == 0) RTE_LOG(WARNING, PMD, "To improve 1G driver performance, consider setting " - "the TX WTHRESH value to 4, 8, or 16."); + "the TX WTHRESH value to 4, 8, or 16.\n"); /* Free memory prior to re-allocation if needed */ if (dev->data->tx_queues[queue_idx] != NULL) @@ -1285,47 +1269,17 @@ igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq) static void igb_rx_queue_release(struct igb_rx_queue *rxq) { - igb_rx_queue_release_mbufs(rxq); - rte_free(rxq->sw_ring); - rte_free(rxq); + if (rxq != NULL) { + igb_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); + } } -int -igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues) +void +eth_igb_rx_queue_release(void *rxq) { - uint16_t i, old_nb_queues = dev->data->nb_rx_queues; - struct igb_rx_queue **rxq; - - if (dev->data->rx_queues == NULL) { - dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", - sizeof(struct igb_rx_queue *) * nb_queues, - CACHE_LINE_SIZE); - if (dev->data->rx_queues == NULL) { - dev->data->nb_rx_queues = 0; - return -ENOMEM; - } - } else { - for (i = nb_queues; i < old_nb_queues; i++) { - igb_rx_queue_release(dev->data->rx_queues[i]); - dev->data->rx_queues[i] = NULL; - } - if (nb_queues != old_nb_queues) { - rxq = rte_realloc(dev->data->rx_queues, - sizeof(struct igb_rx_queue *) * nb_queues, - CACHE_LINE_SIZE); - if (rxq == NULL) - return -ENOMEM; - else - dev->data->rx_queues = rxq; - if (nb_queues > old_nb_queues) - memset(&(rxq[old_nb_queues]), 0, - sizeof(struct igb_rx_queue *) * - (nb_queues - old_nb_queues)); - } - } - dev->data->nb_rx_queues = nb_queues; - - return 0; + igb_rx_queue_release(rxq); } static void @@ -1434,14 +1388,18 @@ igb_dev_clear_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - igb_tx_queue_release_mbufs(txq); - igb_reset_tx_queue(txq, dev); + if (txq != NULL) { + igb_tx_queue_release_mbufs(txq); + igb_reset_tx_queue(txq, dev); + } } for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - igb_rx_queue_release_mbufs(rxq); - igb_reset_rx_queue(rxq); + if (rxq != NULL) { + igb_rx_queue_release_mbufs(rxq); + igb_reset_rx_queue(rxq); + } } } @@ -1639,10 +1597,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev) /* Allocate buffers for descriptor rings and set up queue */ ret = igb_alloc_rx_queue_mbufs(rxq); - if (ret) { - igb_dev_clear_queues(dev); + if (ret) return ret; - } /* * Reset crc_len in case it was changed after queue setup by a @@ -1683,7 +1639,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev) E1000_SRRCTL_BSIZEPKT_MASK) << E1000_SRRCTL_BSIZEPKT_SHIFT); - if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len + VLAN_TAG_SIZE + > buf_size){ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; dev->data->scattered_rx = 1; } @@ -1763,19 +1720,20 @@ eth_igb_rx_init(struct rte_eth_dev *dev) /* set STRCRC bit in all queues for Powerville */ if (hw->mac.type == e1000_i350) { for (i = 0; i < dev->data->nb_rx_queues; i++) { - uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i)); + uint32_t dvmolr = E1000_READ_REG(hw, + E1000_DVMOLR(i)); dvmolr |= E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr); } } - } else { rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ /* clear STRCRC bit in all queues for Powerville */ if (hw->mac.type == e1000_i350) { for (i = 0; i < dev->data->nb_rx_queues; i++) { - uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i)); + uint32_t dvmolr = E1000_READ_REG(hw, + E1000_DVMOLR(i)); dvmolr &= ~E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr); }