X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=a0c88473a2035d1188ffafd00ac9f0f2ae213ddf;hb=48e967695ba78e6ca8f7b6c754251c240b0ee87f;hp=4f9ab22c5773b214e8f4c3bd62c07584a6348bd7;hpb=abf7275bbaa2918a387e1f28f2c352053279c879;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 4f9ab22c57..a0c88473a2 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -375,10 +375,15 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, /* check if TCP segmentation required for this packet */ if (ol_flags & PKT_TX_TCP_SEG) { - /* implies IP cksum and TCP cksum */ - type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 | - IXGBE_ADVTXD_TUCMD_L4T_TCP | - IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + /* implies IP cksum in IPv4 */ + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 | + IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + else + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 | + IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; @@ -588,9 +593,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, txe = &sw_ring[tx_id]; /* Determine if the descriptor ring needs to be cleaned. */ - if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) { + if (txq->nb_tx_free < txq->tx_free_thresh) ixgbe_xmit_cleanup(txq); - } rte_prefetch0(&txe->mbuf->pool); @@ -855,6 +859,110 @@ end_of_tx: * RX functions * **********************************************************************/ +#ifdef RTE_NEXT_ABI +#define IXGBE_PACKET_TYPE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D +#define IXGBE_PACKET_TYPE_MAX 0X80 +#define IXGBE_PACKET_TYPE_MASK 0X7F +#define IXGBE_PACKET_TYPE_SHIFT 0X04 +static inline uint32_t +ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info) +{ + static const uint32_t + ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + }; + if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) + return RTE_PTYPE_UNKNOWN; + + pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & + IXGBE_PACKET_TYPE_MASK; + + return ptype_table[pkt_info]; +} + +static inline uint64_t +ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) +{ + static uint64_t ip_rss_types_map[16] __rte_cache_aligned = { + 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, 0, 0, + 0, 0, 0, PKT_RX_FDIR, + }; +#ifdef RTE_LIBRTE_IEEE1588 + static uint64_t ip_pkt_etqf_map[8] = { + 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, 0, + }; + + if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) + return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] | + ip_rss_types_map[pkt_info & 0XF]; + else + return ip_rss_types_map[pkt_info & 0XF]; +#else + return ip_rss_types_map[pkt_info & 0XF]; +#endif +} +#else /* RTE_NEXT_ABI */ static inline uint64_t rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) { @@ -890,6 +998,7 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) #endif return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]; } +#endif /* RTE_NEXT_ABI */ static inline uint64_t rx_desc_status_to_pkt_flags(uint32_t rx_status) @@ -925,7 +1034,6 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * LOOK_AHEAD defines how many desc statuses to check beyond the * current descriptor. @@ -945,7 +1053,13 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) struct rte_mbuf *mb; uint16_t pkt_len; uint64_t pkt_flags; +#ifdef RTE_NEXT_ABI + int nb_dd; + uint32_t s[LOOK_AHEAD]; + uint16_t pkt_info[LOOK_AHEAD]; +#else int s[LOOK_AHEAD], nb_dd; +#endif /* RTE_NEXT_ABI */ int i, j, nb_rx = 0; @@ -968,6 +1082,12 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) for (j = LOOK_AHEAD-1; j >= 0; --j) s[j] = rxdp[j].wb.upper.status_error; +#ifdef RTE_NEXT_ABI + for (j = LOOK_AHEAD - 1; j >= 0; --j) + pkt_info[j] = rxdp[j].wb.lower.lo_dword. + hs_rss.pkt_info; +#endif /* RTE_NEXT_ABI */ + /* Compute how many status bits were set */ nb_dd = 0; for (j = 0; j < LOOK_AHEAD; ++j) @@ -981,16 +1101,25 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); mb->data_len = pkt_len; mb->pkt_len = pkt_len; - mb->vlan_tci = rxdp[j].wb.upper.vlan; mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ +#ifdef RTE_NEXT_ABI + pkt_flags = rx_desc_status_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= + ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]); + mb->ol_flags = pkt_flags; + mb->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]); +#else /* RTE_NEXT_ABI */ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( rxdp[j].wb.lower.lo_dword.data); /* reuse status field from scan list */ pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); mb->ol_flags = pkt_flags; +#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; @@ -1175,24 +1304,6 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } -#else - -/* Stub to avoid extra ifdefs */ -static uint16_t -ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue, - __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) -{ - return 0; -} - -static inline int -ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq, - __rte_unused bool reset_mbuf) -{ - return -ENOMEM; -} -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ - uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1207,7 +1318,11 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, union ixgbe_adv_rx_desc rxd; uint64_t dma_addr; uint32_t staterr; +#ifdef RTE_NEXT_ABI + uint32_t pkt_info; +#else uint32_t hlen_type_rss; +#endif uint16_t pkt_len; uint16_t rx_id; uint16_t nb_rx; @@ -1325,6 +1440,19 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->data_len = pkt_len; rxm->port = rxq->port_id; +#ifdef RTE_NEXT_ABI + pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss. + pkt_info); + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + + pkt_flags = rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + pkt_flags = pkt_flags | + ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); + rxm->ol_flags = pkt_flags; + rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); +#else /* RTE_NEXT_ABI */ hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); @@ -1333,6 +1461,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); rxm->ol_flags = pkt_flags; +#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) rxm->hash.rss = rxd.wb.lower.hi_dword.rss; @@ -1406,6 +1535,23 @@ ixgbe_fill_cluster_head_buf( uint8_t port_id, uint32_t staterr) { +#ifdef RTE_NEXT_ABI + uint16_t pkt_info; + uint64_t pkt_flags; + + head->port = port_id; + + /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is + * set in the pkt_flags field. + */ + head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); + pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info); + pkt_flags = rx_desc_status_to_pkt_flags(staterr); + pkt_flags |= rx_desc_error_to_pkt_flags(staterr); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info); + head->ol_flags = pkt_flags; + head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info); +#else /* RTE_NEXT_ABI */ uint32_t hlen_type_rss; uint64_t pkt_flags; @@ -1421,6 +1567,7 @@ ixgbe_fill_cluster_head_buf( pkt_flags |= rx_desc_status_to_pkt_flags(staterr); pkt_flags |= rx_desc_error_to_pkt_flags(staterr); head->ol_flags = pkt_flags; +#endif /* RTE_NEXT_ABI */ if (likely(pkt_flags & PKT_RX_RSS_HASH)) head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss); @@ -1541,7 +1688,8 @@ next_desc: rx_mbuf_alloc_failed++; break; } - } else if (nb_hold > rxq->rx_free_thresh) { + } + else if (nb_hold > rxq->rx_free_thresh) { uint16_t next_rdt = rxq->rx_free_trigger; if (!ixgbe_rx_alloc_bufs(rxq, false)) { @@ -1752,7 +1900,7 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, * needed. If the memzone is already created, then this function returns a ptr * to the old one. */ -static const struct rte_memzone * +static const struct rte_memzone * __attribute__((cold)) ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) { @@ -1776,7 +1924,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, #endif } -static void +static void __attribute__((cold)) ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) { unsigned i; @@ -1791,7 +1939,7 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) } } -static void +static void __attribute__((cold)) ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) { if (txq != NULL && @@ -1799,7 +1947,7 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) rte_free(txq->sw_ring); } -static void +static void __attribute__((cold)) ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) { if (txq != NULL && txq->ops != NULL) { @@ -1809,14 +1957,14 @@ ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) } } -void +void __attribute__((cold)) ixgbe_dev_tx_queue_release(void *txq) { ixgbe_tx_queue_release(txq); } /* (Re)set dynamic ixgbe_tx_queue fields to defaults */ -static void +static void __attribute__((cold)) ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) { static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; @@ -1865,7 +2013,7 @@ static const struct ixgbe_txq_ops def_txq_ops = { * the queue parameters. Used in tx_queue_setup by primary process and then * in dev_init by secondary process when attaching to an existing ethdev. */ -void +void __attribute__((cold)) ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ @@ -1895,7 +2043,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) } } -int +int __attribute__((cold)) ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -2083,7 +2231,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * * @m scattered cluster head */ -static void +static void __attribute__((cold)) ixgbe_free_sc_cluster(struct rte_mbuf *m) { uint8_t i, nb_segs = m->nb_segs; @@ -2096,11 +2244,19 @@ ixgbe_free_sc_cluster(struct rte_mbuf *m) } } -static void +static void __attribute__((cold)) ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) { unsigned i; +#ifdef RTE_IXGBE_INC_VECTOR + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + ixgbe_rx_queue_release_mbufs_vec(rxq); + return; + } +#endif + if (rxq->sw_ring != NULL) { for (i = 0; i < rxq->nb_rx_desc; i++) { if (rxq->sw_ring[i].mbuf != NULL) { @@ -2108,7 +2264,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) rxq->sw_ring[i].mbuf = NULL; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (rxq->rx_nb_avail) { for (i = 0; i < rxq->rx_nb_avail; ++i) { struct rte_mbuf *mb; @@ -2117,7 +2272,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } rxq->rx_nb_avail = 0; } -#endif } if (rxq->sw_sc_ring) @@ -2128,7 +2282,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } } -static void +static void __attribute__((cold)) ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) { if (rxq != NULL) { @@ -2139,7 +2293,7 @@ ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) } } -void +void __attribute__((cold)) ixgbe_dev_rx_queue_release(void *rxq) { ixgbe_rx_queue_release(rxq); @@ -2153,12 +2307,8 @@ ixgbe_dev_rx_queue_release(void *rxq) * -EINVAL: the preconditions are NOT satisfied and the default Rx burst * function must be used. */ -static inline int -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC +static inline int __attribute__((cold)) check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) -#else -check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) -#endif { int ret = 0; @@ -2171,7 +2321,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) * Scattered packets are not supported. This should be checked * outside of this function. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " @@ -2200,15 +2349,12 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; } -#else - ret = -EINVAL; -#endif return ret; } /* Reset dynamic ixgbe_rx_queue fields back to defaults */ -static void +static void __attribute__((cold)) ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) { static const union ixgbe_adv_rx_desc zeroed_desc = {{0}}; @@ -2238,7 +2384,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_ring[i] = zeroed_desc; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * initialize extra software ring entries. Space for these extra * entries is always allocated @@ -2251,14 +2396,18 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_nb_avail = 0; rxq->rx_next_avail = 0; rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ rxq->rx_tail = 0; rxq->nb_rx_hold = 0; rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; + +#ifdef RTE_IXGBE_INC_VECTOR + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +#endif } -int +int __attribute__((cold)) ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -2465,7 +2614,7 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD); } -void +void __attribute__((cold)) ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; @@ -2491,6 +2640,26 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) } } +void +ixgbe_dev_free_queues(struct rte_eth_dev *dev) +{ + unsigned i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + /********************************************************************* * * Device RX/TX init functions @@ -3438,7 +3607,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) return; } -static int +static int __attribute__((cold)) ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) { struct ixgbe_rx_entry *rxe = rxq->sw_ring; @@ -3733,8 +3902,10 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) } } -void ixgbe_set_rx_function(struct rte_eth_dev *dev) +void __attribute__((cold)) +ixgbe_set_rx_function(struct rte_eth_dev *dev) { + uint16_t i, rx_using_sse; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; @@ -3816,13 +3987,23 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " - "satisfied, or Scattered Rx is requested, " - "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC " - "is not enabled (port=%d).", + "satisfied, or Scattered Rx is requested " + "(port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts; } + + /* Propagate information about RX function choice through all queues. */ + + rx_using_sse = + (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + rxq->rx_using_sse = rx_using_sse; + } } /** @@ -3969,7 +4150,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* * Initializes Receive Unit. */ -int +int __attribute__((cold)) ixgbe_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4151,7 +4332,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Initializes Transmit Unit. */ -void +void __attribute__((cold)) ixgbe_dev_tx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4219,7 +4400,7 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) /* * Set up link for 82599 loopback mode Tx->Rx. */ -static inline void +static inline void __attribute__((cold)) ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) { PMD_INIT_FUNC_TRACE(); @@ -4247,7 +4428,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) /* * Start Transmit and Receive Units. */ -int +int __attribute__((cold)) ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4314,7 +4495,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) /* * Start Receive Units for specified queue. */ -int +int __attribute__((cold)) ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; @@ -4359,7 +4540,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* * Stop Receive Units for specified queue. */ -int +int __attribute__((cold)) ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; @@ -4403,7 +4584,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* * Start Transmit Units for specified queue. */ -int +int __attribute__((cold)) ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ixgbe_hw *hw; @@ -4444,7 +4625,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* * Stop Transmit Units for specified queue. */ -int +int __attribute__((cold)) ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ixgbe_hw *hw; @@ -4504,7 +4685,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* * [VF] Initializes Receive Unit. */ -int +int __attribute__((cold)) ixgbevf_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4549,7 +4730,6 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); /* Setup RX queues */ - dev->rx_pkt_burst = ixgbe_recv_pkts; for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -4615,14 +4795,6 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; -#ifdef RTE_IXGBE_INC_VECTOR - if (rte_is_power_of_2(rxq->nb_rx_desc)) - dev->rx_pkt_burst = - ixgbe_recv_scattered_pkts_vec; - else -#endif - dev->rx_pkt_burst = - ixgbe_recv_pkts_lro_single_alloc; } } @@ -4640,13 +4812,15 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) IXGBE_PSRTYPE_RQPL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + ixgbe_set_rx_function(dev); + return 0; } /* * [VF] Initializes Transmit Unit. */ -void +void __attribute__((cold)) ixgbevf_dev_tx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -4687,7 +4861,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev) /* * [VF] Start Transmit and Receive Units. */ -void +void __attribute__((cold)) ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw;