X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=a046bec26128f8207b7cb86d3ce8743371d9e754;hb=2e49ae79ebb9db4864cdb65bd0a2c021c1da872d;hp=af7e22279f8276a71692112f768000c65a627d50;hpb=b50c10cc892ecd2bf84cf3bd046e51ce47c9c5bd;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index af7e22279f..a046bec261 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -130,7 +130,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) /* check DD bit on threshold descriptor */ status = txq->tx_ring[txq->tx_next_dd].wb.status; - if (! (status & IXGBE_ADVTXD_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))) return 0; /* @@ -175,11 +175,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } } @@ -195,11 +198,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); rte_prefetch0(&(*pkts)->pool); } @@ -511,6 +514,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) uint16_t nb_tx_desc = txq->nb_tx_desc; uint16_t desc_to_clean_to; uint16_t nb_tx_to_clean; + uint32_t status; /* Determine the last descriptor needing to be cleaned */ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); @@ -519,7 +523,8 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; - if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD)) + status = txr[desc_to_clean_to].wb.status; + if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" @@ -1034,7 +1039,6 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * LOOK_AHEAD defines how many desc statuses to check beyond the * current descriptor. @@ -1062,14 +1066,15 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) int s[LOOK_AHEAD], nb_dd; #endif /* RTE_NEXT_ABI */ int i, j, nb_rx = 0; - + uint32_t status; /* get references to current descriptor and S/W ring entry */ rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; + status = rxdp->wb.upper.status_error; /* check to make sure there is at least 1 packet to receive */ - if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) return 0; /* @@ -1081,7 +1086,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) { /* Read desc statuses backwards to avoid race condition */ for (j = LOOK_AHEAD-1; j >= 0; --j) - s[j] = rxdp[j].wb.upper.status_error; + s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); #ifdef RTE_NEXT_ABI for (j = LOOK_AHEAD - 1; j >= 0; --j) @@ -1099,7 +1104,8 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) /* Translate descriptor info to mbuf format */ for (j = 0; j < nb_dd; ++j) { mb = rxep[j].mbuf; - pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); + pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) - + rxq->crc_len; mb->data_len = pkt_len; mb->pkt_len = pkt_len; mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); @@ -1115,7 +1121,8 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]); #else /* RTE_NEXT_ABI */ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( - rxdp[j].wb.lower.lo_dword.data); + rte_le_to_cpu_32( + rxdp[j].wb.lower.lo_dword.data)); /* reuse status field from scan list */ pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); @@ -1123,12 +1130,14 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) #endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) - mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - mb->hash.fdir.hash = - (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + mb->hash.fdir.hash = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + mb->hash.fdir.id = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.ip_id); } } @@ -1305,24 +1314,6 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } -#else - -/* Stub to avoid extra ifdefs */ -static uint16_t -ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue, - __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) -{ - return 0; -} - -static inline int -ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq, - __rte_unused bool reset_mbuf) -{ - return -ENOMEM; -} -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ - uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1365,7 +1356,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxdp = &rx_ring[rx_id]; staterr = rxdp->wb.upper.status_error; - if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) break; rxd = *rxdp; @@ -1483,12 +1474,14 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, #endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) - rxm->hash.rss = rxd.wb.lower.hi_dword.rss; + rxm->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - rxm->hash.fdir.hash = - (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; + rxm->hash.fdir.hash = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + rxm->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); } /* * Store the mbuf address into the next entry of the array @@ -1708,7 +1701,6 @@ next_desc: break; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC else if (nb_hold > rxq->rx_free_thresh) { uint16_t next_rdt = rxq->rx_free_trigger; @@ -1727,7 +1719,6 @@ next_desc: break; } } -#endif nb_hold++; rxe = &sw_ring[rx_id]; @@ -2001,7 +1992,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) prev = (uint16_t) (txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; - txd->wb.status = IXGBE_TXD_STAT_DD; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); txe[i].mbuf = NULL; txe[i].last_id = i; txe[prev].next_id = i; @@ -2270,15 +2261,21 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) { unsigned i; +#ifdef RTE_IXGBE_INC_VECTOR + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + ixgbe_rx_queue_release_mbufs_vec(rxq); + return; + } +#endif + if (rxq->sw_ring != NULL) { for (i = 0; i < rxq->nb_rx_desc; i++) { - if (rxq->sw_ring[i].mbuf != NULL && - rte_mbuf_refcnt_read(rxq->sw_ring[i].mbuf)) { + if (rxq->sw_ring[i].mbuf != NULL) { rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); rxq->sw_ring[i].mbuf = NULL; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (rxq->rx_nb_avail) { for (i = 0; i < rxq->rx_nb_avail; ++i) { struct rte_mbuf *mb; @@ -2287,7 +2284,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } rxq->rx_nb_avail = 0; } -#endif } if (rxq->sw_sc_ring) @@ -2324,11 +2320,7 @@ ixgbe_dev_rx_queue_release(void *rxq) * function must be used. */ static inline int __attribute__((cold)) -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) -#else -check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) -#endif { int ret = 0; @@ -2341,7 +2333,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) * Scattered packets are not supported. This should be checked * outside of this function. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " @@ -2370,9 +2361,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; } -#else - ret = -EINVAL; -#endif return ret; } @@ -2408,7 +2396,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_ring[i] = zeroed_desc; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * initialize extra software ring entries. Space for these extra * entries is always allocated @@ -2421,11 +2408,15 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_nb_avail = 0; rxq->rx_next_avail = 0; rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ rxq->rx_tail = 0; rxq->nb_rx_hold = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; + +#ifdef RTE_IXGBE_INC_VECTOR + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +#endif } int __attribute__((cold)) @@ -2607,7 +2598,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdp = &(rxq->rx_ring[rxq->rx_tail]); while ((desc < rxq->nb_rx_desc) && - (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) { + (rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) { desc += IXGBE_RXQ_SCAN_INTERVAL; rxdp += IXGBE_RXQ_SCAN_INTERVAL; if (rxq->rx_tail + desc >= rxq->nb_rx_desc) @@ -2632,7 +2624,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) desc -= rxq->nb_rx_desc; rxdp = &rxq->rx_ring[desc]; - return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD); + return !!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); } void __attribute__((cold)) @@ -3926,6 +3919,7 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) void __attribute__((cold)) ixgbe_set_rx_function(struct rte_eth_dev *dev) { + uint16_t i, rx_using_sse; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; @@ -4007,13 +4001,23 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " - "satisfied, or Scattered Rx is requested, " - "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC " - "is not enabled (port=%d).", + "satisfied, or Scattered Rx is requested " + "(port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts; } + + /* Propagate information about RX function choice through all queues. */ + + rx_using_sse = + (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + rxq->rx_using_sse = rx_using_sse; + } } /**