X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_rxtx.c;h=18aeead0fab3292a1904af67c0fe5dd3040c8b99;hb=23fb3b460fdccf232c74a6115171e035d1f56435;hp=2b8a1c893eb48d05193703c09abc335412661d05;hpb=4c8db5f09a241eed2a78c63aa0c21bdac29cf5fd;p=dpdk.git diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 2b8a1c893e..18aeead0fa 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -79,22 +79,6 @@ PKT_TX_L4_MASK | \ PKT_TX_TCP_SEG) -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return (m); -} - -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) - -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) - /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -331,9 +315,9 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, } txq->ctx_cache[ctx_curr].flags = ol_flags; - txq->ctx_cache[ctx_idx].tx_offload.data = + txq->ctx_cache[ctx_curr].tx_offload.data = tx_offload_mask.data & tx_offload.data; - txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask; + txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask; ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); vlan_macip_lens = (uint32_t)tx_offload.data; @@ -366,7 +350,7 @@ what_advctx_update(struct igb_tx_queue *txq, uint64_t flags, } /* Mismatch, use the previous context */ - return (IGB_CTX_NUM); + return IGB_CTX_NUM; } static inline uint32_t @@ -456,7 +440,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ctx = what_advctx_update(txq, tx_ol_req, tx_offload); /* Only allocate context descriptor if required*/ new_ctx = (ctx == IGB_CTX_NUM); - ctx = txq->ctx_curr; + ctx = txq->ctx_curr + txq->ctx_start; tx_last = (uint16_t) (tx_last + new_ctx); } if (tx_last >= txq->nb_tx_desc) @@ -518,7 +502,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) { if (nb_tx == 0) - return (0); + return 0; goto end_of_tx; } @@ -596,7 +580,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up transmit descriptor. */ slen = (uint16_t) m_seg->data_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); txd->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->read.cmd_type_len = @@ -628,7 +612,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, (unsigned) tx_id, (unsigned) nb_tx); txq->tx_tail = tx_id; - return (nb_tx); + return nb_tx; } /********************************************************************* @@ -714,7 +698,7 @@ igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info) } static inline uint64_t -rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) +rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs) { uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH; @@ -724,7 +708,16 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) 0, 0, 0, 0, }; - pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07]; + struct rte_eth_dev dev = rte_eth_devices[rxq->port_id]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private); + + /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */ + if (hw->mac.type == e1000_i210) + pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07]; + else + pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07]; +#else + RTE_SET_USED(rxq); #endif return pkt_flags; @@ -835,7 +828,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -866,7 +859,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; @@ -898,7 +891,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); rxm->ol_flags = pkt_flags; @@ -935,7 +928,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = 0; } rxq->nb_rx_hold = nb_hold; - return (nb_rx); + return nb_rx; } uint16_t @@ -1018,7 +1011,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -1052,7 +1045,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm = rxe->mbuf; rxe->mbuf = nmb; - dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->read.pkt_addr = dma; rxdp->read.hdr_addr = 0; @@ -1134,7 +1127,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); first_seg->ol_flags = pkt_flags; @@ -1190,19 +1183,9 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = 0; } rxq->nb_rx_hold = nb_hold; - return (nb_rx); + return nb_rx; } -/* - * Rings setup and release. - * - * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be - * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. - * This will also optimize cache line size effect. - * H/W supports up to cache line size 128. - */ -#define IGB_ALIGN 128 - /* * Maximum number of Ring Descriptors. * @@ -1210,31 +1193,6 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * desscriptors should meet the following condition: * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 */ -#define IGB_MIN_RING_DESC 32 -#define IGB_MAX_RING_DESC 4096 - -static const struct rte_memzone * -ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, - uint16_t queue_id, uint32_t ring_size, int socket_id) -{ - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, - dev->data->port_id, queue_id); - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - -#ifdef RTE_LIBRTE_XEN_DOM0 - return rte_memzone_reserve_bounded(z_name, ring_size, - socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M); -#else - return rte_memzone_reserve_aligned(z_name, ring_size, - socket_id, 0, IGB_ALIGN); -#endif -} static void igb_tx_queue_release_mbufs(struct igb_tx_queue *txq) @@ -1328,10 +1286,11 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple - * of IGB_ALIGN. + * of E1000_ALIGN. */ - if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 || - (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) { + if (nb_desc % IGB_TXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { return -EINVAL; } @@ -1340,13 +1299,13 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, * driver. */ if (tx_conf->tx_free_thresh != 0) - PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not " + PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not " "used for the 1G driver."); if (tx_conf->tx_rs_thresh != 0) - PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not " + PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not " "used for the 1G driver."); - if (tx_conf->tx_thresh.wthresh == 0) - PMD_INIT_LOG(WARNING, "To improve 1G driver performance, " + if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576) + PMD_INIT_LOG(INFO, "To improve 1G driver performance, " "consider setting the TX WTHRESH value to 4, 8, " "or 16."); @@ -1360,19 +1319,19 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue), RTE_CACHE_LINE_SIZE); if (txq == NULL) - return (-ENOMEM); + return -ENOMEM; /* * Allocate TX ring hardware descriptors. A memzone large enough to * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC; - tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, - size, socket_id); + size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC; + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size, + E1000_ALIGN, socket_id); if (tz == NULL) { igb_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } txq->nb_tx_desc = nb_desc; @@ -1387,19 +1346,16 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, txq->port_id = dev->data->port_id; txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx)); -#ifndef RTE_LIBRTE_XEN_DOM0 - txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr; -#else txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); -#endif - txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr; + + txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr; /* Allocate software ring */ txq->sw_ring = rte_zmalloc("txq->sw_ring", sizeof(struct igb_tx_entry) * nb_desc, RTE_CACHE_LINE_SIZE); if (txq->sw_ring == NULL) { igb_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); @@ -1408,7 +1364,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, dev->tx_pkt_burst = eth_igb_xmit_pkts; dev->data->tx_queues[queue_idx] = txq; - return (0); + return 0; } static void @@ -1476,11 +1432,12 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple - * of IGB_ALIGN. + * of E1000_ALIGN. */ - if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 || - (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) { - return (-EINVAL); + if (nb_desc % IGB_RXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -EINVAL; } /* Free memory prior to re-allocation if needed */ @@ -1493,13 +1450,14 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue), RTE_CACHE_LINE_SIZE); if (rxq == NULL) - return (-ENOMEM); + return -ENOMEM; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->pthresh = rx_conf->rx_thresh.pthresh; rxq->hthresh = rx_conf->rx_thresh.hthresh; rxq->wthresh = rx_conf->rx_thresh.wthresh; - if (rxq->wthresh > 0 && hw->mac.type == e1000_82576) + if (rxq->wthresh > 0 && + (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350)) rxq->wthresh = 1; rxq->drop_en = rx_conf->rx_drop_en; rxq->rx_free_thresh = rx_conf->rx_free_thresh; @@ -1515,19 +1473,16 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC; - rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id); + size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC; + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, + E1000_ALIGN, socket_id); if (rz == NULL) { igb_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx)); -#ifndef RTE_LIBRTE_XEN_DOM0 - rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr; -#else rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); -#endif rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr; /* Allocate software ring. */ @@ -1536,7 +1491,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (rxq->sw_ring == NULL) { igb_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); @@ -1992,15 +1947,15 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) /* Initialize software ring entries. */ for (i = 0; i < rxq->nb_rx_desc; i++) { volatile union e1000_adv_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed " "queue_id=%hu", rxq->queue_id); - return (-ENOMEM); + return -ENOMEM; } dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); rxd = &rxq->rx_ring[i]; rxd->read.hdr_addr = 0; rxd->read.pkt_addr = dma_addr; @@ -2528,3 +2483,34 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev) } } + +void +igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct igb_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; +} + +void +igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct igb_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; +}