X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=ad66b0971b8d80bd0d5b8a70739d3fd98f957241;hb=1f7b42e42e027997cb58e1e59cb82b8b5261dc53;hp=db2454ca8965b31e4cafdcfe00b9ef8f7f92d4c7;hpb=11b220c6498dc4a35e3d5061ad18cbdd13f9bc38;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index db2454ca89..ad66b0971b 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -130,7 +130,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) /* check DD bit on threshold descriptor */ status = txq->tx_ring[txq->tx_next_dd].wb.status; - if (! (status & IXGBE_ADVTXD_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))) return 0; /* @@ -175,11 +175,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } } @@ -195,11 +198,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); rte_prefetch0(&(*pkts)->pool); } @@ -511,6 +514,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) uint16_t nb_tx_desc = txq->nb_tx_desc; uint16_t desc_to_clean_to; uint16_t nb_tx_to_clean; + uint32_t status; /* Determine the last descriptor needing to be cleaned */ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); @@ -519,7 +523,8 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; - if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD)) + status = txr[desc_to_clean_to].wb.status; + if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" @@ -859,7 +864,6 @@ end_of_tx: * RX functions * **********************************************************************/ -#ifdef RTE_NEXT_ABI #define IXGBE_PACKET_TYPE_IPV4 0X01 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 @@ -962,43 +966,6 @@ ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) return ip_rss_types_map[pkt_info & 0XF]; #endif } -#else /* RTE_NEXT_ABI */ -static inline uint64_t -rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) -{ - uint64_t pkt_flags; - - static const uint64_t ip_pkt_types_map[16] = { - 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT, - PKT_RX_IPV6_HDR, 0, 0, 0, - PKT_RX_IPV6_HDR_EXT, 0, 0, 0, - PKT_RX_IPV6_HDR_EXT, 0, 0, 0, - }; - - static const uint64_t ip_rss_types_map[16] = { - 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, - 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, - PKT_RX_RSS_HASH, 0, 0, 0, - 0, 0, 0, PKT_RX_FDIR, - }; - -#ifdef RTE_LIBRTE_IEEE1588 - static uint64_t ip_pkt_etqf_map[8] = { - 0, 0, 0, PKT_RX_IEEE1588_PTP, - 0, 0, 0, 0, - }; - - pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? - ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] : - ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; -#else - pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 : - ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; - -#endif - return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]; -} -#endif /* RTE_NEXT_ABI */ static inline uint64_t rx_desc_status_to_pkt_flags(uint32_t rx_status) @@ -1034,7 +1001,6 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * LOOK_AHEAD defines how many desc statuses to check beyond the * current descriptor. @@ -1054,22 +1020,19 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) struct rte_mbuf *mb; uint16_t pkt_len; uint64_t pkt_flags; -#ifdef RTE_NEXT_ABI int nb_dd; uint32_t s[LOOK_AHEAD]; uint16_t pkt_info[LOOK_AHEAD]; -#else - int s[LOOK_AHEAD], nb_dd; -#endif /* RTE_NEXT_ABI */ int i, j, nb_rx = 0; - + uint32_t status; /* get references to current descriptor and S/W ring entry */ rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; + status = rxdp->wb.upper.status_error; /* check to make sure there is at least 1 packet to receive */ - if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) return 0; /* @@ -1081,13 +1044,11 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) { /* Read desc statuses backwards to avoid race condition */ for (j = LOOK_AHEAD-1; j >= 0; --j) - s[j] = rxdp[j].wb.upper.status_error; + s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); -#ifdef RTE_NEXT_ABI for (j = LOOK_AHEAD - 1; j >= 0; --j) pkt_info[j] = rxdp[j].wb.lower.lo_dword. hs_rss.pkt_info; -#endif /* RTE_NEXT_ABI */ /* Compute how many status bits were set */ nb_dd = 0; @@ -1099,13 +1060,13 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) /* Translate descriptor info to mbuf format */ for (j = 0; j < nb_dd; ++j) { mb = rxep[j].mbuf; - pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); + pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) - + rxq->crc_len; mb->data_len = pkt_len; mb->pkt_len = pkt_len; mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ -#ifdef RTE_NEXT_ABI pkt_flags = rx_desc_status_to_pkt_flags(s[j]); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); pkt_flags |= @@ -1113,22 +1074,16 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) mb->ol_flags = pkt_flags; mb->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]); -#else /* RTE_NEXT_ABI */ - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( - rxdp[j].wb.lower.lo_dword.data); - /* reuse status field from scan list */ - pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); - pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); - mb->ol_flags = pkt_flags; -#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) - mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - mb->hash.fdir.hash = - (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + mb->hash.fdir.hash = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + mb->hash.fdir.id = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.ip_id); } } @@ -1184,7 +1139,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) /* populate the descriptors */ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb)); - rxdp[i].read.hdr_addr = dma_addr; + rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -1305,24 +1260,6 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } -#else - -/* Stub to avoid extra ifdefs */ -static uint16_t -ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue, - __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) -{ - return 0; -} - -static inline int -ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq, - __rte_unused bool reset_mbuf) -{ - return -ENOMEM; -} -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ - uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1337,11 +1274,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, union ixgbe_adv_rx_desc rxd; uint64_t dma_addr; uint32_t staterr; -#ifdef RTE_NEXT_ABI uint32_t pkt_info; -#else - uint32_t hlen_type_rss; -#endif uint16_t pkt_len; uint16_t rx_id; uint16_t nb_rx; @@ -1365,7 +1298,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxdp = &rx_ring[rx_id]; staterr = rxdp->wb.upper.status_error; - if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) break; rxd = *rxdp; @@ -1433,7 +1366,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxe->mbuf = nmb; dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); - rxdp->read.hdr_addr = dma_addr; + rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; /* @@ -1459,7 +1392,6 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->data_len = pkt_len; rxm->port = rxq->port_id; -#ifdef RTE_NEXT_ABI pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss. pkt_info); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ @@ -1471,24 +1403,16 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); rxm->ol_flags = pkt_flags; rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); -#else /* RTE_NEXT_ABI */ - hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); - - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); - rxm->ol_flags = pkt_flags; -#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) - rxm->hash.rss = rxd.wb.lower.hi_dword.rss; + rxm->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - rxm->hash.fdir.hash = - (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; + rxm->hash.fdir.hash = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + rxm->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); } /* * Store the mbuf address into the next entry of the array @@ -1554,7 +1478,6 @@ ixgbe_fill_cluster_head_buf( uint8_t port_id, uint32_t staterr) { -#ifdef RTE_NEXT_ABI uint16_t pkt_info; uint64_t pkt_flags; @@ -1570,23 +1493,6 @@ ixgbe_fill_cluster_head_buf( pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); head->ol_flags = pkt_flags; head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); -#else /* RTE_NEXT_ABI */ - uint32_t hlen_type_rss; - uint64_t pkt_flags; - - head->port = port_id; - - /* - * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is - * set in the pkt_flags field. - */ - head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); - hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags |= rx_desc_status_to_pkt_flags(staterr); - pkt_flags |= rx_desc_error_to_pkt_flags(staterr); - head->ol_flags = pkt_flags; -#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss); @@ -1708,7 +1614,6 @@ next_desc: break; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC else if (nb_hold > rxq->rx_free_thresh) { uint16_t next_rdt = rxq->rx_free_trigger; @@ -1727,7 +1632,6 @@ next_desc: break; } } -#endif nb_hold++; rxe = &sw_ring[rx_id]; @@ -1762,7 +1666,7 @@ next_desc: rxe->mbuf = nmb; rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxdp->read.hdr_addr = dma; + rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma; } else rxe->mbuf = NULL; @@ -1838,6 +1742,25 @@ next_desc: ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id, staterr); + /* + * Deal with the case, when HW CRC srip is disabled. + * That can't happen when LRO is enabled, but still could + * happen for scattered RX mode. + */ + first_seg->pkt_len -= rxq->crc_len; + if (unlikely(rxm->data_len <= rxq->crc_len)) { + struct rte_mbuf *lp; + + for (lp = first_seg; lp->next != rxm; lp = lp->next) + ; + + first_seg->nb_segs--; + lp->data_len -= rxq->crc_len - rxm->data_len; + lp->next = NULL; + rte_pktmbuf_free_seg(rxm); + } else + rxm->data_len -= rxq->crc_len; + /* Prefetch data of first segment, if configured to do so. */ rte_packet_prefetch((char *)first_seg->buf_addr + first_seg->data_off); @@ -2001,7 +1924,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) prev = (uint16_t) (txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; - txd->wb.status = IXGBE_TXD_STAT_DD; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); txe[i].mbuf = NULL; txe[i].last_id = i; txe[prev].next_id = i; @@ -2040,23 +1963,23 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { - PMD_INIT_LOG(INFO, "Using simple tx code path"); + PMD_INIT_LOG(DEBUG, "Using simple tx code path"); #ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && (rte_eal_process_type() != RTE_PROC_PRIMARY || ixgbe_txq_vec_setup(txq) == 0)) { - PMD_INIT_LOG(INFO, "Vector tx enabled."); + PMD_INIT_LOG(DEBUG, "Vector tx enabled."); dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else #endif dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; } else { - PMD_INIT_LOG(INFO, "Using full-featured tx code path"); - PMD_INIT_LOG(INFO, + PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); + PMD_INIT_LOG(DEBUG, " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]", (unsigned long)txq->txq_flags, (unsigned long)IXGBE_SIMPLE_FLAGS); - PMD_INIT_LOG(INFO, + PMD_INIT_LOG(DEBUG, " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", (unsigned long)txq->tx_rs_thresh, (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST); @@ -2285,7 +2208,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) rxq->sw_ring[i].mbuf = NULL; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (rxq->rx_nb_avail) { for (i = 0; i < rxq->rx_nb_avail; ++i) { struct rte_mbuf *mb; @@ -2294,7 +2216,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } rxq->rx_nb_avail = 0; } -#endif } if (rxq->sw_sc_ring) @@ -2331,11 +2252,7 @@ ixgbe_dev_rx_queue_release(void *rxq) * function must be used. */ static inline int __attribute__((cold)) -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) -#else -check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) -#endif { int ret = 0; @@ -2348,7 +2265,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) * Scattered packets are not supported. This should be checked * outside of this function. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " @@ -2377,9 +2293,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; } -#else - ret = -EINVAL; -#endif return ret; } @@ -2415,7 +2328,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_ring[i] = zeroed_desc; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * initialize extra software ring entries. Space for these extra * entries is always allocated @@ -2428,11 +2340,15 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_nb_avail = 0; rxq->rx_next_avail = 0; rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ rxq->rx_tail = 0; rxq->nb_rx_hold = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; + +#ifdef RTE_IXGBE_INC_VECTOR + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +#endif } int __attribute__((cold)) @@ -2614,7 +2530,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdp = &(rxq->rx_ring[rxq->rx_tail]); while ((desc < rxq->nb_rx_desc) && - (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) { + (rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) { desc += IXGBE_RXQ_SCAN_INTERVAL; rxdp += IXGBE_RXQ_SCAN_INTERVAL; if (rxq->rx_tail + desc >= rxq->nb_rx_desc) @@ -2639,7 +2556,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) desc -= rxq->nb_rx_desc; rxdp = &rxq->rx_ring[desc]; - return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD); + return !!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); } void __attribute__((cold)) @@ -2729,11 +2647,13 @@ ixgbe_rss_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; uint32_t mrqc; + uint32_t mrqc_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); mrqc &= ~IXGBE_MRQC_RSSEN; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } static void @@ -2744,6 +2664,11 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) uint32_t rss_key; uint64_t rss_hf; uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; + + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); hash_key = rss_conf->rss_key; if (hash_key != NULL) { @@ -2753,7 +2678,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) rss_key |= hash_key[(i * 4) + 1] << 8; rss_key |= hash_key[(i * 4) + 2] << 16; rss_key |= hash_key[(i * 4) + 3] << 24; - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key); + IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key); } } @@ -2778,7 +2703,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if (rss_hf & ETH_RSS_IPV6_UDP_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } int @@ -2788,9 +2713,17 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, struct ixgbe_hw *hw; uint32_t mrqc; uint64_t rss_hf; + uint32_t mrqc_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS hash update is not supported on this " + "NIC."); + return -ENOTSUP; + } + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + /* * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS): * "RSS enabling cannot be done dynamically while it must be @@ -2801,7 +2734,7 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, * disabled at initialization time. */ rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL; - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -(EINVAL); @@ -2824,13 +2757,17 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, uint32_t rss_key; uint64_t rss_hf; uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); hash_key = rss_conf->rss_key; if (hash_key != NULL) { /* Return RSS hash key */ for (i = 0; i < 10; i++) { - rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i); + rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i); hash_key[(i * 4)] = rss_key & 0x000000FF; hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; @@ -2839,7 +2776,7 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, } /* Get RSS functions configured in MRQC register */ - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */ rss_conf->rss_hf = 0; return 0; @@ -2875,22 +2812,28 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta; uint16_t i; uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + /* * Fill in redirection table * The byte-swap is needed because NIC registers are in * little-endian order. */ reta = 0; - for (i = 0, j = 0; i < 128; i++, j++) { + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + if (j == dev->data->nb_rx_queues) j = 0; reta = (reta << 8) | j; if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), + IXGBE_WRITE_REG(hw, reta_reg, rte_bswap32(reta)); } @@ -2910,6 +2853,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) #define NUM_VFTA_REGISTERS 128 #define NIC_RX_BUFFER_SIZE 0x200 +#define X550_RX_BUFFER_SIZE 0x180 static void ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) @@ -2938,7 +2882,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * RXPBSIZE * split rx buffer up into sections, each for 1 traffic class */ - pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs); + break; + default: + pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + break; + } for (i = 0 ; i < nb_tcs; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); @@ -2976,7 +2928,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * mapping is done with 3 bits per priority, * so shift by i*3 each time */ - queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3)); + queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping); @@ -3111,7 +3063,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, } /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_rx_conf->dcb_queue[i]; + j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3139,7 +3091,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_tx_conf->dcb_queue[i]; + j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3161,7 +3113,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = rx_conf->dcb_queue[i]; + j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3182,7 +3134,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = tx_conf->dcb_queue[i]; + j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3217,9 +3169,13 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_VMDQRT4TCEN; else { + /* no matter the mode is DCB or DCB_RSS, just + * set the MRQE to RSSXTCEN. RSS is controlled + * by RSS_FIELD + */ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RT4TCEN; + IXGBE_MRQC_RTRSS4TCEN; } } if (dcb_config->num_tcs.pg_tcs == 8) { @@ -3229,7 +3185,7 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, else { IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RT8TCEN; + IXGBE_MRQC_RTRSS8TCEN; } } @@ -3312,7 +3268,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, { int ret = 0; uint8_t i,pfc_en,nb_tcs; - uint16_t pbsize; + uint16_t pbsize, rx_buffer_size; uint8_t config_dcb_rx = 0; uint8_t config_dcb_tx = 0; uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; @@ -3334,16 +3290,17 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, *get dcb and VT rx configuration parameters *from rte_eth_conf */ - ixgbe_vmdq_dcb_rx_config(dev,dcb_config); + ixgbe_vmdq_dcb_rx_config(dev, dcb_config); /*Configure general VMDQ and DCB RX parameters*/ ixgbe_vmdq_dcb_configure(dev); } break; case ETH_MQ_RX_DCB: + case ETH_MQ_RX_DCB_RSS: dcb_config->vt_mode = false; config_dcb_rx = DCB_RX_CONFIG; /* Get dcb TX configuration parameters from rte_eth_conf */ - ixgbe_dcb_rx_config(dev,dcb_config); + ixgbe_dcb_rx_config(dev, dcb_config); /*Configure general DCB RX parameters*/ ixgbe_dcb_rx_hw_config(hw, dcb_config); break; @@ -3365,7 +3322,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, dcb_config->vt_mode = false; config_dcb_tx = DCB_TX_CONFIG; /*get DCB TX configuration parameters from rte_eth_conf*/ - ixgbe_dcb_tx_config(dev,dcb_config); + ixgbe_dcb_tx_config(dev, dcb_config); /*Configure general DCB TX parameters*/ ixgbe_dcb_tx_hw_config(hw, dcb_config); break; @@ -3403,9 +3360,19 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, } } + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + rx_buffer_size = X550_RX_BUFFER_SIZE; + break; + default: + rx_buffer_size = NIC_RX_BUFFER_SIZE; + break; + } + if(config_dcb_rx) { /* Set RX buffer size */ - pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT; for (i = 0 ; i < nb_tcs; i++) { IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); @@ -3461,7 +3428,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Check if the PFC is supported */ if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { - pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs); + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); for (i = 0; i < nb_tcs; i++) { /* * If the TC count is 8,and the default high_water is 48, @@ -3496,14 +3463,15 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev) /* check support mq_mode for DCB */ if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && - (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB)) + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) && + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)) return; if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES) return; /** Configure DCB hardware **/ - ixgbe_dcb_hw_configure(dev,dcb_cfg); + ixgbe_dcb_hw_configure(dev, dcb_cfg); return; } @@ -3661,7 +3629,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); rxd = &rxq->rx_ring[i]; - rxd->read.hdr_addr = dma_addr; + rxd->read.hdr_addr = 0; rxd->read.pkt_addr = dma_addr; rxe[i].mbuf = mbuf; } @@ -3745,21 +3713,25 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) * any DCB/RSS w/o VMDq multi-queue setting */ switch (dev->data->dev_conf.rxmode.mq_mode) { - case ETH_MQ_RX_RSS: - ixgbe_rss_configure(dev); - break; + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_DCB_RSS: + case ETH_MQ_RX_VMDQ_RSS: + ixgbe_rss_configure(dev); + break; - case ETH_MQ_RX_VMDQ_DCB: - ixgbe_vmdq_dcb_configure(dev); - break; + case ETH_MQ_RX_VMDQ_DCB: + ixgbe_vmdq_dcb_configure(dev); + break; - case ETH_MQ_RX_VMDQ_ONLY: - ixgbe_vmdq_rx_hw_configure(dev); - break; + case ETH_MQ_RX_VMDQ_ONLY: + ixgbe_vmdq_rx_hw_configure(dev); + break; - case ETH_MQ_RX_NONE: - /* if mq_mode is none, disable rss mode.*/ - default: ixgbe_rss_disable(dev); + case ETH_MQ_RX_NONE: + default: + /* if mq_mode is none, disable rss mode.*/ + ixgbe_rss_disable(dev); + break; } } else { /* @@ -3960,11 +3932,11 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) */ if (dev->data->lro) { if (adapter->rx_bulk_alloc_allowed) { - PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk " + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk " "allocation version"); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; } else { - PMD_INIT_LOG(INFO, "LRO is requested. Using a single " + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single " "allocation version"); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; } @@ -3980,7 +3952,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { - PMD_INIT_LOG(INFO, "Using a Scattered with bulk " + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " "allocation callback (port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; @@ -4002,8 +3974,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) * - Single buffer allocation (the simplest one) */ } else if (adapter->rx_vec_allowed) { - PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX " - "burst size no less than 32."); + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_IXGBE_DESCS_PER_LOOP, + dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { @@ -4015,9 +3989,8 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " - "satisfied, or Scattered Rx is requested, " - "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC " - "is not enabled (port=%d).", + "satisfied, or Scattered Rx is requested " + "(port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts; @@ -4171,7 +4144,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) dev->data->lro = 1; - PMD_INIT_LOG(INFO, "enabling LRO mode"); + PMD_INIT_LOG(DEBUG, "enabling LRO mode"); return 0; }