X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=8d5576be685b5e1f3c3bc8d7df100486d885c8a4;hb=d909af8f72ca3f8ab4fe1942abfb4f53e15ff8bc;hp=af7e22279f8276a71692112f768000c65a627d50;hpb=b50c10cc892ecd2bf84cf3bd046e51ce47c9c5bd;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index af7e22279f..8d5576be68 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -94,7 +94,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) m = __rte_mbuf_raw_alloc(mp); __rte_mbuf_sanity_check_raw(m, 0); - return (m); + return m; } @@ -130,7 +130,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) /* check DD bit on threshold descriptor */ status = txq->tx_ring[txq->tx_next_dd].wb.status; - if (! (status & IXGBE_ADVTXD_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))) return 0; /* @@ -171,15 +171,18 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) int i; for (i = 0; i < 4; ++i, ++txdp, ++pkts) { - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts); + buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } } @@ -191,15 +194,15 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) uint64_t buf_dma_addr; uint32_t pkt_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts); + buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); rte_prefetch0(&(*pkts)->pool); } @@ -412,7 +415,6 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; - tx_offload_mask.l4_len |= ~0; break; case PKT_TX_SCTP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | @@ -466,7 +468,7 @@ what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags, } /* Mismatch, use the previous context */ - return (IXGBE_CTX_NUM); + return IXGBE_CTX_NUM; } static inline uint32_t @@ -511,6 +513,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) uint16_t nb_tx_desc = txq->nb_tx_desc; uint16_t desc_to_clean_to; uint16_t nb_tx_to_clean; + uint32_t status; /* Determine the last descriptor needing to be cleaned */ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); @@ -519,7 +522,8 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; - if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD)) + status = txr[desc_to_clean_to].wb.status; + if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" @@ -557,7 +561,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); /* No Error */ - return (0); + return 0; } uint16_t @@ -568,7 +572,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct ixgbe_tx_entry *sw_ring; struct ixgbe_tx_entry *txe, *txn; volatile union ixgbe_adv_tx_desc *txr; - volatile union ixgbe_adv_tx_desc *txd; + volatile union ixgbe_adv_tx_desc *txd, *txp; struct rte_mbuf *tx_pkt; struct rte_mbuf *m_seg; uint64_t buf_dma_addr; @@ -591,6 +595,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, txr = txq->tx_ring; tx_id = txq->tx_tail; txe = &sw_ring[tx_id]; + txp = NULL; /* Determine if the descriptor ring needs to be cleaned. */ if (txq->nb_tx_free < txq->tx_free_thresh) @@ -634,6 +639,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); + if (txp != NULL && + nb_used + txq->nb_tx_used >= txq->tx_rs_thresh) + /* set RS on the previous packet in the burst */ + txp->read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_TXD_CMD_RS); + /* * The number of descriptors that must be allocated for a * packet is the number of segments of that packet, plus 1 @@ -672,7 +683,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (ixgbe_xmit_cleanup(txq) != 0) { /* Could not clean any descriptors */ if (nb_tx == 0) - return (0); + return 0; goto end_of_tx; } @@ -701,7 +712,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * descriptors */ if (nb_tx == 0) - return (0); + return 0; goto end_of_tx; } } @@ -805,7 +816,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up Transmit Data Descriptor. */ slen = m_seg->data_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); txd->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->read.cmd_type_len = @@ -836,10 +847,18 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Update txq RS bit counters */ txq->nb_tx_used = 0; - } + txp = NULL; + } else + txp = txd; + txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len); } + end_of_tx: + /* set RS on last packet in the burst */ + if (txp != NULL) + txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS); + rte_wmb(); /* @@ -851,7 +870,7 @@ end_of_tx: IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); txq->tx_tail = tx_id; - return (nb_tx); + return nb_tx; } /********************************************************************* @@ -859,7 +878,6 @@ end_of_tx: * RX functions * **********************************************************************/ -#ifdef RTE_NEXT_ABI #define IXGBE_PACKET_TYPE_IPV4 0X01 #define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 #define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 @@ -962,43 +980,6 @@ ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) return ip_rss_types_map[pkt_info & 0XF]; #endif } -#else /* RTE_NEXT_ABI */ -static inline uint64_t -rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) -{ - uint64_t pkt_flags; - - static const uint64_t ip_pkt_types_map[16] = { - 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT, - PKT_RX_IPV6_HDR, 0, 0, 0, - PKT_RX_IPV6_HDR_EXT, 0, 0, 0, - PKT_RX_IPV6_HDR_EXT, 0, 0, 0, - }; - - static const uint64_t ip_rss_types_map[16] = { - 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, - 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, - PKT_RX_RSS_HASH, 0, 0, 0, - 0, 0, 0, PKT_RX_FDIR, - }; - -#ifdef RTE_LIBRTE_IEEE1588 - static uint64_t ip_pkt_etqf_map[8] = { - 0, 0, 0, PKT_RX_IEEE1588_PTP, - 0, 0, 0, 0, - }; - - pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? - ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] : - ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; -#else - pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 : - ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; - -#endif - return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]; -} -#endif /* RTE_NEXT_ABI */ static inline uint64_t rx_desc_status_to_pkt_flags(uint32_t rx_status) @@ -1022,6 +1003,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) static inline uint64_t rx_desc_error_to_pkt_flags(uint32_t rx_status) { + uint64_t pkt_flags; + /* * Bit 31: IPE, IPv4 checksum error * Bit 30: L4I, L4I integrity error @@ -1030,11 +1013,17 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD }; - return error_to_pkt_flags_map[(rx_status >> + pkt_flags = error_to_pkt_flags_map[(rx_status >> IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; + + if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) && + (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) { + pkt_flags |= PKT_RX_EIP_CKSUM_BAD; + } + + return pkt_flags; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * LOOK_AHEAD defines how many desc statuses to check beyond the * current descriptor. @@ -1054,22 +1043,19 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) struct rte_mbuf *mb; uint16_t pkt_len; uint64_t pkt_flags; -#ifdef RTE_NEXT_ABI int nb_dd; uint32_t s[LOOK_AHEAD]; uint16_t pkt_info[LOOK_AHEAD]; -#else - int s[LOOK_AHEAD], nb_dd; -#endif /* RTE_NEXT_ABI */ int i, j, nb_rx = 0; - + uint32_t status; /* get references to current descriptor and S/W ring entry */ rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; + status = rxdp->wb.upper.status_error; /* check to make sure there is at least 1 packet to receive */ - if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) return 0; /* @@ -1081,13 +1067,11 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) { /* Read desc statuses backwards to avoid race condition */ for (j = LOOK_AHEAD-1; j >= 0; --j) - s[j] = rxdp[j].wb.upper.status_error; + s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); -#ifdef RTE_NEXT_ABI for (j = LOOK_AHEAD - 1; j >= 0; --j) pkt_info[j] = rxdp[j].wb.lower.lo_dword. hs_rss.pkt_info; -#endif /* RTE_NEXT_ABI */ /* Compute how many status bits were set */ nb_dd = 0; @@ -1099,13 +1083,13 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) /* Translate descriptor info to mbuf format */ for (j = 0; j < nb_dd; ++j) { mb = rxep[j].mbuf; - pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); + pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) - + rxq->crc_len; mb->data_len = pkt_len; mb->pkt_len = pkt_len; mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ -#ifdef RTE_NEXT_ABI pkt_flags = rx_desc_status_to_pkt_flags(s[j]); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); pkt_flags |= @@ -1113,22 +1097,16 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) mb->ol_flags = pkt_flags; mb->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]); -#else /* RTE_NEXT_ABI */ - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( - rxdp[j].wb.lower.lo_dword.data); - /* reuse status field from scan list */ - pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); - pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); - mb->ol_flags = pkt_flags; -#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) - mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - mb->hash.fdir.hash = - (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + mb->hash.fdir.hash = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + mb->hash.fdir.id = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.ip_id); } } @@ -1167,7 +1145,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, rxq->rx_free_thresh); if (unlikely(diag != 0)) - return (-ENOMEM); + return -ENOMEM; rxdp = &rxq->rx_ring[alloc_idx]; for (i = 0; i < rxq->rx_free_thresh; ++i) { @@ -1183,8 +1161,8 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) mb->data_off = RTE_PKTMBUF_HEADROOM; /* populate the descriptors */ - dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb)); - rxdp[i].read.hdr_addr = dma_addr; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb)); + rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -1305,24 +1283,6 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } -#else - -/* Stub to avoid extra ifdefs */ -static uint16_t -ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue, - __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) -{ - return 0; -} - -static inline int -ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq, - __rte_unused bool reset_mbuf) -{ - return -ENOMEM; -} -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ - uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1337,11 +1297,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, union ixgbe_adv_rx_desc rxd; uint64_t dma_addr; uint32_t staterr; -#ifdef RTE_NEXT_ABI uint32_t pkt_info; -#else - uint32_t hlen_type_rss; -#endif uint16_t pkt_len; uint16_t rx_id; uint16_t nb_rx; @@ -1365,7 +1321,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxdp = &rx_ring[rx_id]; staterr = rxdp->wb.upper.status_error; - if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) break; rxd = *rxdp; @@ -1432,8 +1388,8 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); - rxdp->read.hdr_addr = dma_addr; + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; /* @@ -1459,7 +1415,6 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->data_len = pkt_len; rxm->port = rxq->port_id; -#ifdef RTE_NEXT_ABI pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss. pkt_info); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ @@ -1471,24 +1426,16 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); rxm->ol_flags = pkt_flags; rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); -#else /* RTE_NEXT_ABI */ - hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); - /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); - - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); - rxm->ol_flags = pkt_flags; -#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) - rxm->hash.rss = rxd.wb.lower.hi_dword.rss; + rxm->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - rxm->hash.fdir.hash = - (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; + rxm->hash.fdir.hash = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + rxm->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); } /* * Store the mbuf address into the next entry of the array @@ -1520,7 +1467,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = 0; } rxq->nb_rx_hold = nb_hold; - return (nb_rx); + return nb_rx; } /** @@ -1554,7 +1501,6 @@ ixgbe_fill_cluster_head_buf( uint8_t port_id, uint32_t staterr) { -#ifdef RTE_NEXT_ABI uint16_t pkt_info; uint64_t pkt_flags; @@ -1570,23 +1516,6 @@ ixgbe_fill_cluster_head_buf( pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); head->ol_flags = pkt_flags; head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); -#else /* RTE_NEXT_ABI */ - uint32_t hlen_type_rss; - uint64_t pkt_flags; - - head->port = port_id; - - /* - * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is - * set in the pkt_flags field. - */ - head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); - hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags |= rx_desc_status_to_pkt_flags(staterr); - pkt_flags |= rx_desc_error_to_pkt_flags(staterr); - head->ol_flags = pkt_flags; -#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss); @@ -1708,7 +1637,6 @@ next_desc: break; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC else if (nb_hold > rxq->rx_free_thresh) { uint16_t next_rdt = rxq->rx_free_trigger; @@ -1727,7 +1655,6 @@ next_desc: break; } } -#endif nb_hold++; rxe = &sw_ring[rx_id]; @@ -1754,7 +1681,7 @@ next_desc: if (!bulk_alloc) { __le64 dma = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); /* * Update RX descriptor with the physical address of the * new data buffer of the new allocated mbuf. @@ -1762,7 +1689,7 @@ next_desc: rxe->mbuf = nmb; rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxdp->read.hdr_addr = dma; + rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma; } else rxe->mbuf = NULL; @@ -1838,6 +1765,25 @@ next_desc: ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id, staterr); + /* + * Deal with the case, when HW CRC srip is disabled. + * That can't happen when LRO is enabled, but still could + * happen for scattered RX mode. + */ + first_seg->pkt_len -= rxq->crc_len; + if (unlikely(rxm->data_len <= rxq->crc_len)) { + struct rte_mbuf *lp; + + for (lp = first_seg; lp->next != rxm; lp = lp->next) + ; + + first_seg->nb_segs--; + lp->data_len -= rxq->crc_len - rxm->data_len; + lp->next = NULL; + rte_pktmbuf_free_seg(rxm); + } else + rxm->data_len -= rxq->crc_len; + /* Prefetch data of first segment, if configured to do so. */ rte_packet_prefetch((char *)first_seg->buf_addr + first_seg->data_off); @@ -1897,54 +1843,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, * **********************************************************************/ -/* - * Rings setup and release. - * - * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be - * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will - * also optimize cache line size effect. H/W supports up to cache line size 128. - */ -#define IXGBE_ALIGN 128 - -/* - * Maximum number of Ring Descriptors. - * - * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring - * descriptors should meet the following condition: - * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 - */ -#define IXGBE_MIN_RING_DESC 32 -#define IXGBE_MAX_RING_DESC 4096 - -/* - * Create memzone for HW rings. malloc can't be used as the physical address is - * needed. If the memzone is already created, then this function returns a ptr - * to the old one. - */ -static const struct rte_memzone * __attribute__((cold)) -ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, - uint16_t queue_id, uint32_t ring_size, int socket_id) -{ - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, - dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - -#ifdef RTE_LIBRTE_XEN_DOM0 - return rte_memzone_reserve_bounded(z_name, ring_size, - socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M); -#else - return rte_memzone_reserve_aligned(z_name, ring_size, - socket_id, 0, IXGBE_ALIGN); -#endif -} - static void __attribute__((cold)) ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) { @@ -2001,7 +1899,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) prev = (uint16_t) (txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; - txd->wb.status = IXGBE_TXD_STAT_DD; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); txe[i].mbuf = NULL; txe[i].last_id = i; txe[prev].next_id = i; @@ -2040,23 +1938,23 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { - PMD_INIT_LOG(INFO, "Using simple tx code path"); + PMD_INIT_LOG(DEBUG, "Using simple tx code path"); #ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && (rte_eal_process_type() != RTE_PROC_PRIMARY || ixgbe_txq_vec_setup(txq) == 0)) { - PMD_INIT_LOG(INFO, "Vector tx enabled."); + PMD_INIT_LOG(DEBUG, "Vector tx enabled."); dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else #endif dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; } else { - PMD_INIT_LOG(INFO, "Using full-featured tx code path"); - PMD_INIT_LOG(INFO, + PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); + PMD_INIT_LOG(DEBUG, " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]", (unsigned long)txq->txq_flags, (unsigned long)IXGBE_SIMPLE_FLAGS); - PMD_INIT_LOG(INFO, + PMD_INIT_LOG(DEBUG, " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", (unsigned long)txq->tx_rs_thresh, (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST); @@ -2084,9 +1982,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * It must not exceed hardware maximum, and must be multiple * of IXGBE_ALIGN. */ - if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 || - (nb_desc > IXGBE_MAX_RING_DESC) || - (nb_desc < IXGBE_MIN_RING_DESC)) { + if (nb_desc % IXGBE_TXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { return -EINVAL; } @@ -2116,9 +2014,16 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " - "of TX descriptors minus 2. (tx_rs_thresh=%u " - "port=%d queue=%d)", (unsigned int)tx_rs_thresh, - (int)dev->data->port_id, (int)queue_idx); + "of TX descriptors minus 2. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. " + "(tx_rs_thresh=%u port=%d queue=%d)", + DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } if (tx_free_thresh >= (nb_desc - 3)) { @@ -2172,19 +2077,19 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (txq == NULL) - return (-ENOMEM); + return -ENOMEM; /* * Allocate TX ring hardware descriptors. A memzone large enough to * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC, - socket_id); + IXGBE_ALIGN, socket_id); if (tz == NULL) { ixgbe_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } txq->nb_tx_desc = nb_desc; @@ -2211,11 +2116,8 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx)); else txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx)); -#ifndef RTE_LIBRTE_XEN_DOM0 - txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr; -#else + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); -#endif txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr; /* Allocate software ring */ @@ -2224,7 +2126,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE, socket_id); if (txq->sw_ring == NULL) { ixgbe_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); @@ -2237,7 +2139,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, dev->data->tx_queues[queue_idx] = txq; - return (0); + return 0; } /** @@ -2270,15 +2172,21 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) { unsigned i; +#ifdef RTE_IXGBE_INC_VECTOR + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + ixgbe_rx_queue_release_mbufs_vec(rxq); + return; + } +#endif + if (rxq->sw_ring != NULL) { for (i = 0; i < rxq->nb_rx_desc; i++) { - if (rxq->sw_ring[i].mbuf != NULL && - rte_mbuf_refcnt_read(rxq->sw_ring[i].mbuf)) { + if (rxq->sw_ring[i].mbuf != NULL) { rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); rxq->sw_ring[i].mbuf = NULL; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (rxq->rx_nb_avail) { for (i = 0; i < rxq->rx_nb_avail; ++i) { struct rte_mbuf *mb; @@ -2287,7 +2195,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } rxq->rx_nb_avail = 0; } -#endif } if (rxq->sw_sc_ring) @@ -2324,11 +2231,7 @@ ixgbe_dev_rx_queue_release(void *rxq) * function must be used. */ static inline int __attribute__((cold)) -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) -#else -check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) -#endif { int ret = 0; @@ -2341,7 +2244,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) * Scattered packets are not supported. This should be checked * outside of this function. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " @@ -2370,9 +2272,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; } -#else - ret = -EINVAL; -#endif return ret; } @@ -2408,7 +2307,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_ring[i] = zeroed_desc; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * initialize extra software ring entries. Space for these extra * entries is always allocated @@ -2421,11 +2319,15 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_nb_avail = 0; rxq->rx_next_avail = 0; rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ rxq->rx_tail = 0; rxq->nb_rx_hold = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; + +#ifdef RTE_IXGBE_INC_VECTOR + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +#endif } int __attribute__((cold)) @@ -2451,10 +2353,10 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, * It must not exceed hardware maximum, and must be multiple * of IXGBE_ALIGN. */ - if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 || - (nb_desc > IXGBE_MAX_RING_DESC) || - (nb_desc < IXGBE_MIN_RING_DESC)) { - return (-EINVAL); + if (nb_desc % IXGBE_RXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { + return -EINVAL; } /* Free memory prior to re-allocation if needed... */ @@ -2467,7 +2369,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (rxq == NULL) - return (-ENOMEM); + return -ENOMEM; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->rx_free_thresh = rx_conf->rx_free_thresh; @@ -2485,11 +2387,11 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, - RX_RING_SZ, socket_id); + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + RX_RING_SZ, IXGBE_ALIGN, socket_id); if (rz == NULL) { ixgbe_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } /* @@ -2515,11 +2417,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->rdh_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx)); } -#ifndef RTE_LIBRTE_XEN_DOM0 - rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr; -#else + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); -#endif rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr; /* @@ -2549,7 +2448,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE, socket_id); if (!rxq->sw_ring) { ixgbe_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } /* @@ -2566,7 +2465,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE, socket_id); if (!rxq->sw_sc_ring) { ixgbe_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p " @@ -2607,7 +2506,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdp = &(rxq->rx_ring[rxq->rx_tail]); while ((desc < rxq->nb_rx_desc) && - (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) { + (rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) { desc += IXGBE_RXQ_SCAN_INTERVAL; rxdp += IXGBE_RXQ_SCAN_INTERVAL; if (rxq->rx_tail + desc >= rxq->nb_rx_desc) @@ -2632,7 +2532,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) desc -= rxq->nb_rx_desc; rxdp = &rxq->rx_ring[desc]; - return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD); + return !!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); } void __attribute__((cold)) @@ -2722,11 +2623,13 @@ ixgbe_rss_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; uint32_t mrqc; + uint32_t mrqc_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); mrqc &= ~IXGBE_MRQC_RSSEN; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } static void @@ -2737,6 +2640,11 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) uint32_t rss_key; uint64_t rss_hf; uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; + + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); hash_key = rss_conf->rss_key; if (hash_key != NULL) { @@ -2746,7 +2654,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) rss_key |= hash_key[(i * 4) + 1] << 8; rss_key |= hash_key[(i * 4) + 2] << 16; rss_key |= hash_key[(i * 4) + 3] << 24; - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key); + IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key); } } @@ -2771,7 +2679,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if (rss_hf & ETH_RSS_IPV6_UDP_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } int @@ -2781,9 +2689,17 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, struct ixgbe_hw *hw; uint32_t mrqc; uint64_t rss_hf; + uint32_t mrqc_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS hash update is not supported on this " + "NIC."); + return -ENOTSUP; + } + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + /* * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS): * "RSS enabling cannot be done dynamically while it must be @@ -2794,7 +2710,7 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, * disabled at initialization time. */ rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL; - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -(EINVAL); @@ -2817,13 +2733,17 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, uint32_t rss_key; uint64_t rss_hf; uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); hash_key = rss_conf->rss_key; if (hash_key != NULL) { /* Return RSS hash key */ for (i = 0; i < 10; i++) { - rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i); + rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i); hash_key[(i * 4)] = rss_key & 0x000000FF; hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; @@ -2832,7 +2752,7 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, } /* Get RSS functions configured in MRQC register */ - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */ rss_conf->rss_hf = 0; return 0; @@ -2868,22 +2788,28 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta; uint16_t i; uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + /* * Fill in redirection table * The byte-swap is needed because NIC registers are in * little-endian order. */ reta = 0; - for (i = 0, j = 0; i < 128; i++, j++) { + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + if (j == dev->data->nb_rx_queues) j = 0; reta = (reta << 8) | j; if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), + IXGBE_WRITE_REG(hw, reta_reg, rte_bswap32(reta)); } @@ -2903,6 +2829,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) #define NUM_VFTA_REGISTERS 128 #define NIC_RX_BUFFER_SIZE 0x200 +#define X550_RX_BUFFER_SIZE 0x180 static void ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) @@ -2931,7 +2858,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * RXPBSIZE * split rx buffer up into sections, each for 1 traffic class */ - pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs); + break; + default: + pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + break; + } for (i = 0 ; i < nb_tcs; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); @@ -2969,7 +2904,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * mapping is done with 3 bits per priority, * so shift by i*3 each time */ - queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3)); + queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping); @@ -3104,7 +3039,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, } /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_rx_conf->dcb_queue[i]; + j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3132,7 +3067,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_tx_conf->dcb_queue[i]; + j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3154,7 +3089,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = rx_conf->dcb_queue[i]; + j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3175,7 +3110,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = tx_conf->dcb_queue[i]; + j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3210,9 +3145,13 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_VMDQRT4TCEN; else { + /* no matter the mode is DCB or DCB_RSS, just + * set the MRQE to RSSXTCEN. RSS is controlled + * by RSS_FIELD + */ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RT4TCEN; + IXGBE_MRQC_RTRSS4TCEN; } } if (dcb_config->num_tcs.pg_tcs == 8) { @@ -3222,7 +3161,7 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, else { IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | - IXGBE_MRQC_RT8TCEN; + IXGBE_MRQC_RTRSS8TCEN; } } @@ -3305,7 +3244,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, { int ret = 0; uint8_t i,pfc_en,nb_tcs; - uint16_t pbsize; + uint16_t pbsize, rx_buffer_size; uint8_t config_dcb_rx = 0; uint8_t config_dcb_tx = 0; uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; @@ -3327,16 +3266,17 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, *get dcb and VT rx configuration parameters *from rte_eth_conf */ - ixgbe_vmdq_dcb_rx_config(dev,dcb_config); + ixgbe_vmdq_dcb_rx_config(dev, dcb_config); /*Configure general VMDQ and DCB RX parameters*/ ixgbe_vmdq_dcb_configure(dev); } break; case ETH_MQ_RX_DCB: + case ETH_MQ_RX_DCB_RSS: dcb_config->vt_mode = false; config_dcb_rx = DCB_RX_CONFIG; /* Get dcb TX configuration parameters from rte_eth_conf */ - ixgbe_dcb_rx_config(dev,dcb_config); + ixgbe_dcb_rx_config(dev, dcb_config); /*Configure general DCB RX parameters*/ ixgbe_dcb_rx_hw_config(hw, dcb_config); break; @@ -3358,7 +3298,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, dcb_config->vt_mode = false; config_dcb_tx = DCB_TX_CONFIG; /*get DCB TX configuration parameters from rte_eth_conf*/ - ixgbe_dcb_tx_config(dev,dcb_config); + ixgbe_dcb_tx_config(dev, dcb_config); /*Configure general DCB TX parameters*/ ixgbe_dcb_tx_hw_config(hw, dcb_config); break; @@ -3396,9 +3336,19 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, } } + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + rx_buffer_size = X550_RX_BUFFER_SIZE; + break; + default: + rx_buffer_size = NIC_RX_BUFFER_SIZE; + break; + } + if(config_dcb_rx) { /* Set RX buffer size */ - pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT; for (i = 0 ; i < nb_tcs; i++) { IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); @@ -3454,7 +3404,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Check if the PFC is supported */ if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { - pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs); + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); for (i = 0; i < nb_tcs; i++) { /* * If the TC count is 8,and the default high_water is 48, @@ -3489,14 +3439,15 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev) /* check support mq_mode for DCB */ if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && - (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB)) + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) && + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)) return; if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES) return; /** Configure DCB hardware **/ - ixgbe_dcb_hw_configure(dev,dcb_cfg); + ixgbe_dcb_hw_configure(dev, dcb_cfg); return; } @@ -3642,7 +3593,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", (unsigned) rxq->queue_id); - return (-ENOMEM); + return -ENOMEM; } rte_mbuf_refcnt_set(mbuf, 1); @@ -3652,9 +3603,9 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) mbuf->port = rxq->port_id; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); rxd = &rxq->rx_ring[i]; - rxd->read.hdr_addr = dma_addr; + rxd->read.hdr_addr = 0; rxd->read.pkt_addr = dma_addr; rxe[i].mbuf = mbuf; } @@ -3738,21 +3689,25 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) * any DCB/RSS w/o VMDq multi-queue setting */ switch (dev->data->dev_conf.rxmode.mq_mode) { - case ETH_MQ_RX_RSS: - ixgbe_rss_configure(dev); - break; + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_DCB_RSS: + case ETH_MQ_RX_VMDQ_RSS: + ixgbe_rss_configure(dev); + break; - case ETH_MQ_RX_VMDQ_DCB: - ixgbe_vmdq_dcb_configure(dev); - break; + case ETH_MQ_RX_VMDQ_DCB: + ixgbe_vmdq_dcb_configure(dev); + break; - case ETH_MQ_RX_VMDQ_ONLY: - ixgbe_vmdq_rx_hw_configure(dev); - break; + case ETH_MQ_RX_VMDQ_ONLY: + ixgbe_vmdq_rx_hw_configure(dev); + break; - case ETH_MQ_RX_NONE: - /* if mq_mode is none, disable rss mode.*/ - default: ixgbe_rss_disable(dev); + case ETH_MQ_RX_NONE: + default: + /* if mq_mode is none, disable rss mode.*/ + ixgbe_rss_disable(dev); + break; } } else { /* @@ -3926,6 +3881,7 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) void __attribute__((cold)) ixgbe_set_rx_function(struct rte_eth_dev *dev) { + uint16_t i, rx_using_sse; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; @@ -3952,11 +3908,11 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) */ if (dev->data->lro) { if (adapter->rx_bulk_alloc_allowed) { - PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk " + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk " "allocation version"); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; } else { - PMD_INIT_LOG(INFO, "LRO is requested. Using a single " + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single " "allocation version"); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; } @@ -3972,7 +3928,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { - PMD_INIT_LOG(INFO, "Using a Scattered with bulk " + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " "allocation callback (port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; @@ -3994,8 +3950,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) * - Single buffer allocation (the simplest one) */ } else if (adapter->rx_vec_allowed) { - PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX " - "burst size no less than 32."); + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_IXGBE_DESCS_PER_LOOP, + dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { @@ -4007,13 +3965,23 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " - "satisfied, or Scattered Rx is requested, " - "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC " - "is not enabled (port=%d).", + "satisfied, or Scattered Rx is requested " + "(port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts; } + + /* Propagate information about RX function choice through all queues. */ + + rx_using_sse = + (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + rxq->rx_using_sse = rx_using_sse; + } } /** @@ -4152,7 +4120,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) dev->data->lro = 1; - PMD_INIT_LOG(INFO, "enabling LRO mode"); + PMD_INIT_LOG(DEBUG, "enabling LRO mode"); return 0; } @@ -4541,6 +4509,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } else return -1; @@ -4584,6 +4553,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) ixgbe_rx_queue_release_mbufs(rxq); ixgbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } else return -1; @@ -4626,6 +4596,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } else return -1; @@ -4686,12 +4657,50 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) txq->ops->release_mbufs(txq); txq->ops->reset(txq); } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } else return -1; return 0; } +void +ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct ixgbe_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct ixgbe_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + /* * [VF] Initializes Receive Unit. */