X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx.c;h=81af81441eeaa064b1f9e3fd6fd5dd780f5263ae;hb=fc1134c79283f8a3c954c32020390a22efef95e6;hp=c0d1a4bea31ac2833bcac2af2d3481b3b2b07b4a;hpb=e73e3547ce54d7ae48dff82d87efac0b7a30692a;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index c0d1a4bea3..81af81441e 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -70,7 +70,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) memset(&rx_ctx, 0, sizeof(rx_ctx)); - rx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT; + rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; rx_ctx.qlen = rxq->nb_rx_desc; rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; @@ -442,7 +442,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) txq_elem.num_txqs = 1; txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); - tx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT; + tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; tx_ctx.qlen = txq->nb_tx_desc; tx_ctx.pf_num = hw->pf_id; tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; @@ -663,8 +663,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, /* Zero all the descriptors in the ring. */ memset(rz->addr, 0, ring_size); - rxq->rx_ring_phys_addr = rz->phys_addr; - rxq->rx_ring = (union ice_rx_desc *)rz->addr; + rxq->rx_ring_dma = rz->iova; + rxq->rx_ring = rz->addr; #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); @@ -881,8 +881,8 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, txq->vsi = vsi; txq->tx_deferred_start = tx_conf->tx_deferred_start; - txq->tx_ring_phys_addr = tz->phys_addr; - txq->tx_ring = (struct ice_tx_desc *)tz->addr; + txq->tx_ring_dma = tz->iova; + txq->tx_ring = tz->addr; /* Allocate software ring */ txq->sw_ring = @@ -1741,15 +1741,72 @@ ice_recv_pkts(void *rx_queue, return nb_rx; } +static inline void +ice_parse_tunneling_params(uint64_t ol_flags, + union ice_tx_offload tx_offload, + uint32_t *cd_tunneling) +{ + /* EIPT: External (outer) IP header type */ + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4; + else if (ol_flags & PKT_TX_OUTER_IPV4) + *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM; + else if (ol_flags & PKT_TX_OUTER_IPV6) + *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6; + + /* EIPLEN: External (outer) IP header length, in DWords */ + *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << + ICE_TXD_CTX_QW0_EIPLEN_S; + + /* L4TUNT: L4 Tunneling Type */ + switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_IPIP: + /* for non UDP / GRE tunneling, set to 00b */ + break; + case PKT_TX_TUNNEL_VXLAN: + case PKT_TX_TUNNEL_GENEVE: + *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING; + break; + case PKT_TX_TUNNEL_GRE: + *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING; + break; + default: + PMD_TX_LOG(ERR, "Tunnel type not supported"); + return; + } + + /* L4TUNLEN: L4 Tunneling Length, in Words + * + * We depend on app to set rte_mbuf.l2_len correctly. + * For IP in GRE it should be set to the length of the GRE + * header; + * For MAC in GRE or MAC in UDP it should be set to the length + * of the GRE or UDP headers plus the inner MAC up to including + * its last Ethertype. + * If MPLS labels exists, it should include them as well. + */ + *cd_tunneling |= (tx_offload.l2_len >> 1) << + ICE_TXD_CTX_QW0_NATLEN_S; + + if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) && + (ol_flags & PKT_TX_OUTER_IP_CKSUM) && + (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING)) + *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; +} + static inline void ice_txd_enable_checksum(uint64_t ol_flags, uint32_t *td_cmd, uint32_t *td_offset, union ice_tx_offload tx_offload) { - /* L2 length must be set. */ - *td_offset |= (tx_offload.l2_len >> 1) << - ICE_TX_DESC_LEN_MACLEN_S; + /* Set MACLEN */ + if (ol_flags & PKT_TX_TUNNEL_MASK) + *td_offset |= (tx_offload.outer_l2_len >> 1) + << ICE_TX_DESC_LEN_MACLEN_S; + else + *td_offset |= (tx_offload.l2_len >> 1) + << ICE_TX_DESC_LEN_MACLEN_S; /* Enable L3 checksum offloads */ if (ol_flags & PKT_TX_IP_CKSUM) { @@ -1863,7 +1920,10 @@ ice_build_ctob(uint32_t td_cmd, static inline uint16_t ice_calc_context_desc(uint64_t flags) { - static uint64_t mask = PKT_TX_TCP_SEG | PKT_TX_QINQ; + static uint64_t mask = PKT_TX_TCP_SEG | + PKT_TX_QINQ | + PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_MASK; return (flags & mask) ? 1 : 0; } @@ -1880,15 +1940,9 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload) return ctx_desc; } - /** - * in case of non tunneling packet, the outer_l2_len and - * outer_l3_len must be 0. - */ - hdr_len = tx_offload.outer_l2_len + - tx_offload.outer_l3_len + - tx_offload.l2_len + - tx_offload.l3_len + - tx_offload.l4_len; + hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; + hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ? + tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; cd_cmd = ICE_TX_CTX_DESC_TSO; cd_tso_len = mbuf->pkt_len - hdr_len; @@ -1909,6 +1963,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct ice_tx_entry *txe, *txn; struct rte_mbuf *tx_pkt; struct rte_mbuf *m_seg; + uint32_t cd_tunneling_params; uint16_t tx_id; uint16_t nb_tx; uint16_t nb_used; @@ -1979,6 +2034,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) td_tag = tx_pkt->vlan_tci; } + /* Fill in tunneling parameters if necessary */ + cd_tunneling_params = 0; + if (ol_flags & PKT_TX_TUNNEL_MASK) + ice_parse_tunneling_params(ol_flags, tx_offload, + &cd_tunneling_params); + /* Enable checksum offloading */ if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) { ice_txd_enable_checksum(ol_flags, &td_cmd, @@ -2004,6 +2065,9 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) cd_type_cmd_tso_mss |= ice_set_tso_ctx(tx_pkt, tx_offload); + ctx_txd->tunneling_params = + rte_cpu_to_le_32(cd_tunneling_params); + /* TX context descriptor based double VLAN insert */ if (ol_flags & PKT_TX_QINQ) { cd_l2tag2 = tx_pkt->vlan_tci_outer; @@ -2268,35 +2332,46 @@ ice_set_rx_function(struct rte_eth_dev *dev) int i; bool use_avx2 = false; - if (!ice_rx_vec_dev_check(dev)) { - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - (void)ice_rxq_vec_setup(rxq); - } + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) { + ad->rx_vec_allowed = true; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq && ice_rxq_vec_setup(rxq)) { + ad->rx_vec_allowed = false; + break; + } + } - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) - use_avx2 = true; + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) + use_avx2 = true; + } else { + ad->rx_vec_allowed = false; + } + } + + if (ad->rx_vec_allowed) { if (dev->data->scattered_rx) { PMD_DRV_LOG(DEBUG, - "Using %sVector Scattered Rx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); + "Using %sVector Scattered Rx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); dev->rx_pkt_burst = use_avx2 ? - ice_recv_scattered_pkts_vec_avx2 : - ice_recv_scattered_pkts_vec; + ice_recv_scattered_pkts_vec_avx2 : + ice_recv_scattered_pkts_vec; } else { PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); + use_avx2 ? "avx2 " : "", + dev->data->port_id); dev->rx_pkt_burst = use_avx2 ? - ice_recv_pkts_vec_avx2 : - ice_recv_pkts_vec; + ice_recv_pkts_vec_avx2 : + ice_recv_pkts_vec; } - return; } + #endif if (dev->data->scattered_rx) { @@ -2370,20 +2445,20 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, /** * MSS outside the range are considered malicious */ - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -2400,16 +2475,27 @@ ice_set_tx_function(struct rte_eth_dev *dev) int i; bool use_avx2 = false; - if (!ice_tx_vec_dev_check(dev)) { - for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - (void)ice_txq_vec_setup(txq); - } + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (!ice_tx_vec_dev_check(dev)) { + ad->tx_vec_allowed = true; + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq && ice_txq_vec_setup(txq)) { + ad->tx_vec_allowed = false; + break; + } + } - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) - use_avx2 = true; + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) + use_avx2 = true; + + } else { + ad->tx_vec_allowed = false; + } + } + if (ad->tx_vec_allowed) { PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id);