X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=6a62d67c78dc509a9279088541aaf6e416f2451e;hb=cb60ede6e3b6a87273e6cd565c56c2338ef811c8;hp=b1db57fe9fd486db2f2d99566cbe91717681b965;hpb=61e4d90f273936d56c56a2ad155fd50ebe98b5fd;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index b1db57fe9f..6a62d67c78 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -130,7 +130,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) /* check DD bit on threshold descriptor */ status = txq->tx_ring[txq->tx_next_dd].wb.status; - if (! (status & IXGBE_ADVTXD_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))) return 0; /* @@ -175,11 +175,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } } @@ -195,11 +198,11 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); rte_prefetch0(&(*pkts)->pool); } @@ -511,6 +514,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) uint16_t nb_tx_desc = txq->nb_tx_desc; uint16_t desc_to_clean_to; uint16_t nb_tx_to_clean; + uint32_t status; /* Determine the last descriptor needing to be cleaned */ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); @@ -519,7 +523,8 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; - if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD)) + status = txr[desc_to_clean_to].wb.status; + if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" @@ -859,40 +864,107 @@ end_of_tx: * RX functions * **********************************************************************/ -static inline uint64_t -rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) +#define IXGBE_PACKET_TYPE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D +#define IXGBE_PACKET_TYPE_MAX 0X80 +#define IXGBE_PACKET_TYPE_MASK 0X7F +#define IXGBE_PACKET_TYPE_SHIFT 0X04 +static inline uint32_t +ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info) { - uint64_t pkt_flags; - - static const uint64_t ip_pkt_types_map[16] = { - 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT, - PKT_RX_IPV6_HDR, 0, 0, 0, - PKT_RX_IPV6_HDR_EXT, 0, 0, 0, - PKT_RX_IPV6_HDR_EXT, 0, 0, 0, + static const uint32_t + ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, }; + if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) + return RTE_PTYPE_UNKNOWN; + + pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & + IXGBE_PACKET_TYPE_MASK; + + return ptype_table[pkt_info]; +} - static const uint64_t ip_rss_types_map[16] = { +static inline uint64_t +ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) +{ + static uint64_t ip_rss_types_map[16] __rte_cache_aligned = { 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0, 0, 0, 0, 0, 0, PKT_RX_FDIR, }; - #ifdef RTE_LIBRTE_IEEE1588 static uint64_t ip_pkt_etqf_map[8] = { 0, 0, 0, PKT_RX_IEEE1588_PTP, 0, 0, 0, 0, }; - pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? - ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] : - ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; + if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) + return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] | + ip_rss_types_map[pkt_info & 0XF]; + else + return ip_rss_types_map[pkt_info & 0XF]; #else - pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 : - ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; - + return ip_rss_types_map[pkt_info & 0XF]; #endif - return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]; } static inline uint64_t @@ -929,7 +1001,6 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * LOOK_AHEAD defines how many desc statuses to check beyond the * current descriptor. @@ -949,16 +1020,19 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) struct rte_mbuf *mb; uint16_t pkt_len; uint64_t pkt_flags; - int s[LOOK_AHEAD], nb_dd; + int nb_dd; + uint32_t s[LOOK_AHEAD]; + uint16_t pkt_info[LOOK_AHEAD]; int i, j, nb_rx = 0; - + uint32_t status; /* get references to current descriptor and S/W ring entry */ rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; + status = rxdp->wb.upper.status_error; /* check to make sure there is at least 1 packet to receive */ - if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) + if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) return 0; /* @@ -970,7 +1044,11 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) { /* Read desc statuses backwards to avoid race condition */ for (j = LOOK_AHEAD-1; j >= 0; --j) - s[j] = rxdp[j].wb.upper.status_error; + s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); + + for (j = LOOK_AHEAD - 1; j >= 0; --j) + pkt_info[j] = rxdp[j].wb.lower.lo_dword. + hs_rss.pkt_info; /* Compute how many status bits were set */ nb_dd = 0; @@ -982,26 +1060,30 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) /* Translate descriptor info to mbuf format */ for (j = 0; j < nb_dd; ++j) { mb = rxep[j].mbuf; - pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); + pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) - + rxq->crc_len; mb->data_len = pkt_len; mb->pkt_len = pkt_len; mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( - rxdp[j].wb.lower.lo_dword.data); - /* reuse status field from scan list */ - pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); + pkt_flags = rx_desc_status_to_pkt_flags(s[j]); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= + ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]); mb->ol_flags = pkt_flags; + mb->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]); if (likely(pkt_flags & PKT_RX_RSS_HASH)) - mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - mb->hash.fdir.hash = - (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + mb->hash.fdir.hash = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + mb->hash.fdir.id = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.ip_id); } } @@ -1057,7 +1139,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) /* populate the descriptors */ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb)); - rxdp[i].read.hdr_addr = dma_addr; + rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } @@ -1178,24 +1260,6 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } -#else - -/* Stub to avoid extra ifdefs */ -static uint16_t -ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue, - __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) -{ - return 0; -} - -static inline int -ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq, - __rte_unused bool reset_mbuf) -{ - return -ENOMEM; -} -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ - uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1210,7 +1274,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, union ixgbe_adv_rx_desc rxd; uint64_t dma_addr; uint32_t staterr; - uint32_t hlen_type_rss; + uint32_t pkt_info; uint16_t pkt_len; uint16_t rx_id; uint16_t nb_rx; @@ -1234,7 +1298,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxdp = &rx_ring[rx_id]; staterr = rxdp->wb.upper.status_error; - if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) break; rxd = *rxdp; @@ -1302,7 +1366,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxe->mbuf = nmb; dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); - rxdp->read.hdr_addr = dma_addr; + rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma_addr; /* @@ -1328,22 +1392,27 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->data_len = pkt_len; rxm->port = rxq->port_id; - hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss. + pkt_info); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = rx_desc_status_to_pkt_flags(staterr); pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + pkt_flags = pkt_flags | + ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); rxm->ol_flags = pkt_flags; + rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); if (likely(pkt_flags & PKT_RX_RSS_HASH)) - rxm->hash.rss = rxd.wb.lower.hi_dword.rss; + rxm->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - rxm->hash.fdir.hash = - (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; + rxm->hash.fdir.hash = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + rxm->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); } /* * Store the mbuf address into the next entry of the array @@ -1409,21 +1478,21 @@ ixgbe_fill_cluster_head_buf( uint8_t port_id, uint32_t staterr) { - uint32_t hlen_type_rss; + uint16_t pkt_info; uint64_t pkt_flags; head->port = port_id; - /* - * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is + /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is * set in the pkt_flags field. */ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); - hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); - pkt_flags |= rx_desc_status_to_pkt_flags(staterr); + pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info); + pkt_flags = rx_desc_status_to_pkt_flags(staterr); pkt_flags |= rx_desc_error_to_pkt_flags(staterr); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); head->ol_flags = pkt_flags; + head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); if (likely(pkt_flags & PKT_RX_RSS_HASH)) head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss); @@ -1544,7 +1613,8 @@ next_desc: rx_mbuf_alloc_failed++; break; } - } else if (nb_hold > rxq->rx_free_thresh) { + } + else if (nb_hold > rxq->rx_free_thresh) { uint16_t next_rdt = rxq->rx_free_trigger; if (!ixgbe_rx_alloc_bufs(rxq, false)) { @@ -1596,7 +1666,7 @@ next_desc: rxe->mbuf = nmb; rxm->data_off = RTE_PKTMBUF_HEADROOM; - rxdp->read.hdr_addr = dma; + rxdp->read.hdr_addr = 0; rxdp->read.pkt_addr = dma; } else rxe->mbuf = NULL; @@ -1672,6 +1742,25 @@ next_desc: ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id, staterr); + /* + * Deal with the case, when HW CRC srip is disabled. + * That can't happen when LRO is enabled, but still could + * happen for scattered RX mode. + */ + first_seg->pkt_len -= rxq->crc_len; + if (unlikely(rxm->data_len <= rxq->crc_len)) { + struct rte_mbuf *lp; + + for (lp = first_seg; lp->next != rxm; lp = lp->next) + ; + + first_seg->nb_segs--; + lp->data_len -= rxq->crc_len - rxm->data_len; + lp->next = NULL; + rte_pktmbuf_free_seg(rxm); + } else + rxm->data_len -= rxq->crc_len; + /* Prefetch data of first segment, if configured to do so. */ rte_packet_prefetch((char *)first_seg->buf_addr + first_seg->data_off); @@ -1835,7 +1924,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) prev = (uint16_t) (txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; - txd->wb.status = IXGBE_TXD_STAT_DD; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); txe[i].mbuf = NULL; txe[i].last_id = i; txe[prev].next_id = i; @@ -1874,23 +1963,23 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { - PMD_INIT_LOG(INFO, "Using simple tx code path"); + PMD_INIT_LOG(DEBUG, "Using simple tx code path"); #ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && (rte_eal_process_type() != RTE_PROC_PRIMARY || ixgbe_txq_vec_setup(txq) == 0)) { - PMD_INIT_LOG(INFO, "Vector tx enabled."); + PMD_INIT_LOG(DEBUG, "Vector tx enabled."); dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else #endif dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; } else { - PMD_INIT_LOG(INFO, "Using full-featured tx code path"); - PMD_INIT_LOG(INFO, + PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); + PMD_INIT_LOG(DEBUG, " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]", (unsigned long)txq->txq_flags, (unsigned long)IXGBE_SIMPLE_FLAGS); - PMD_INIT_LOG(INFO, + PMD_INIT_LOG(DEBUG, " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", (unsigned long)txq->tx_rs_thresh, (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST); @@ -2104,15 +2193,21 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) { unsigned i; +#ifdef RTE_IXGBE_INC_VECTOR + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + ixgbe_rx_queue_release_mbufs_vec(rxq); + return; + } +#endif + if (rxq->sw_ring != NULL) { for (i = 0; i < rxq->nb_rx_desc; i++) { - if (rxq->sw_ring[i].mbuf != NULL && - rte_mbuf_refcnt_read(rxq->sw_ring[i].mbuf)) { + if (rxq->sw_ring[i].mbuf != NULL) { rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); rxq->sw_ring[i].mbuf = NULL; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (rxq->rx_nb_avail) { for (i = 0; i < rxq->rx_nb_avail; ++i) { struct rte_mbuf *mb; @@ -2121,7 +2216,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } rxq->rx_nb_avail = 0; } -#endif } if (rxq->sw_sc_ring) @@ -2158,11 +2252,7 @@ ixgbe_dev_rx_queue_release(void *rxq) * function must be used. */ static inline int __attribute__((cold)) -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) -#else -check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) -#endif { int ret = 0; @@ -2175,7 +2265,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) * Scattered packets are not supported. This should be checked * outside of this function. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " @@ -2204,9 +2293,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; } -#else - ret = -EINVAL; -#endif return ret; } @@ -2242,7 +2328,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_ring[i] = zeroed_desc; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * initialize extra software ring entries. Space for these extra * entries is always allocated @@ -2255,11 +2340,15 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_nb_avail = 0; rxq->rx_next_avail = 0; rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ rxq->rx_tail = 0; rxq->nb_rx_hold = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; + +#ifdef RTE_IXGBE_INC_VECTOR + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +#endif } int __attribute__((cold)) @@ -2441,7 +2530,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdp = &(rxq->rx_ring[rxq->rx_tail]); while ((desc < rxq->nb_rx_desc) && - (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) { + (rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) { desc += IXGBE_RXQ_SCAN_INTERVAL; rxdp += IXGBE_RXQ_SCAN_INTERVAL; if (rxq->rx_tail + desc >= rxq->nb_rx_desc) @@ -2466,7 +2556,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) desc -= rxq->nb_rx_desc; rxdp = &rxq->rx_ring[desc]; - return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD); + return !!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); } void __attribute__((cold)) @@ -2495,6 +2586,26 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) } } +void +ixgbe_dev_free_queues(struct rte_eth_dev *dev) +{ + unsigned i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + /********************************************************************* * * Device RX/TX init functions @@ -2536,11 +2647,13 @@ ixgbe_rss_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; uint32_t mrqc; + uint32_t mrqc_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); mrqc &= ~IXGBE_MRQC_RSSEN; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } static void @@ -2551,6 +2664,11 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) uint32_t rss_key; uint64_t rss_hf; uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; + + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); hash_key = rss_conf->rss_key; if (hash_key != NULL) { @@ -2560,7 +2678,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) rss_key |= hash_key[(i * 4) + 1] << 8; rss_key |= hash_key[(i * 4) + 2] << 16; rss_key |= hash_key[(i * 4) + 3] << 24; - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key); + IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key); } } @@ -2585,7 +2703,7 @@ ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if (rss_hf & ETH_RSS_IPV6_UDP_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); } int @@ -2595,9 +2713,17 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, struct ixgbe_hw *hw; uint32_t mrqc; uint64_t rss_hf; + uint32_t mrqc_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS hash update is not supported on this " + "NIC."); + return -ENOTSUP; + } + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + /* * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS): * "RSS enabling cannot be done dynamically while it must be @@ -2608,7 +2734,7 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, * disabled at initialization time. */ rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL; - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -(EINVAL); @@ -2631,13 +2757,17 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, uint32_t rss_key; uint64_t rss_hf; uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); hash_key = rss_conf->rss_key; if (hash_key != NULL) { /* Return RSS hash key */ for (i = 0; i < 10; i++) { - rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i); + rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i); hash_key[(i * 4)] = rss_key & 0x000000FF; hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; @@ -2646,7 +2776,7 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, } /* Get RSS functions configured in MRQC register */ - mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */ rss_conf->rss_hf = 0; return 0; @@ -2682,22 +2812,28 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta; uint16_t i; uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + /* * Fill in redirection table * The byte-swap is needed because NIC registers are in * little-endian order. */ reta = 0; - for (i = 0, j = 0; i < 128; i++, j++) { + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + if (j == dev->data->nb_rx_queues) j = 0; reta = (reta << 8) | j; if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), + IXGBE_WRITE_REG(hw, reta_reg, rte_bswap32(reta)); } @@ -2717,6 +2853,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) #define NUM_VFTA_REGISTERS 128 #define NIC_RX_BUFFER_SIZE 0x200 +#define X550_RX_BUFFER_SIZE 0x180 static void ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) @@ -2745,7 +2882,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * RXPBSIZE * split rx buffer up into sections, each for 1 traffic class */ - pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs); + break; + default: + pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + break; + } for (i = 0 ; i < nb_tcs; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); @@ -2783,7 +2928,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * mapping is done with 3 bits per priority, * so shift by i*3 each time */ - queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3)); + queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping); @@ -2918,7 +3063,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, } /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_rx_conf->dcb_queue[i]; + j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -2946,7 +3091,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_tx_conf->dcb_queue[i]; + j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -2968,7 +3113,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = rx_conf->dcb_queue[i]; + j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -2989,7 +3134,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = tx_conf->dcb_queue[i]; + j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3119,7 +3264,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, { int ret = 0; uint8_t i,pfc_en,nb_tcs; - uint16_t pbsize; + uint16_t pbsize, rx_buffer_size; uint8_t config_dcb_rx = 0; uint8_t config_dcb_tx = 0; uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; @@ -3210,9 +3355,19 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, } } + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + rx_buffer_size = X550_RX_BUFFER_SIZE; + break; + default: + rx_buffer_size = NIC_RX_BUFFER_SIZE; + break; + } + if(config_dcb_rx) { /* Set RX buffer size */ - pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT; for (i = 0 ; i < nb_tcs; i++) { IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); @@ -3268,7 +3423,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Check if the PFC is supported */ if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { - pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs); + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); for (i = 0; i < nb_tcs; i++) { /* * If the TC count is 8,and the default high_water is 48, @@ -3468,7 +3623,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); rxd = &rxq->rx_ring[i]; - rxd->read.hdr_addr = dma_addr; + rxd->read.hdr_addr = 0; rxd->read.pkt_addr = dma_addr; rxe[i].mbuf = mbuf; } @@ -3740,6 +3895,7 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) void __attribute__((cold)) ixgbe_set_rx_function(struct rte_eth_dev *dev) { + uint16_t i, rx_using_sse; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; @@ -3766,11 +3922,11 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) */ if (dev->data->lro) { if (adapter->rx_bulk_alloc_allowed) { - PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk " + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk " "allocation version"); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; } else { - PMD_INIT_LOG(INFO, "LRO is requested. Using a single " + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single " "allocation version"); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; } @@ -3786,7 +3942,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { - PMD_INIT_LOG(INFO, "Using a Scattered with bulk " + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " "allocation callback (port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; @@ -3808,8 +3964,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) * - Single buffer allocation (the simplest one) */ } else if (adapter->rx_vec_allowed) { - PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX " - "burst size no less than 32."); + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_IXGBE_DESCS_PER_LOOP, + dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts_vec; } else if (adapter->rx_bulk_alloc_allowed) { @@ -3821,13 +3979,23 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " - "satisfied, or Scattered Rx is requested, " - "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC " - "is not enabled (port=%d).", + "satisfied, or Scattered Rx is requested " + "(port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts; } + + /* Propagate information about RX function choice through all queues. */ + + rx_using_sse = + (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + rxq->rx_using_sse = rx_using_sse; + } } /** @@ -3966,7 +4134,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) dev->data->lro = 1; - PMD_INIT_LOG(INFO, "enabling LRO mode"); + PMD_INIT_LOG(DEBUG, "enabling LRO mode"); return 0; }