X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=6cfbb582e21e652cab1f21001acc8c649ee8599c;hb=bd885ab120e2335f978a28ee0aa4303017390e15;hp=29d385c0620dc526b5dbe17855d571776871db78;hpb=bc4c8309b76faa5157e4f0a456c4160ed21efece;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 29d385c062..6cfbb582e2 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,7 @@ #include #include #include +#include #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -452,7 +454,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, seqnum_seed |= tx_offload.l2_len << IXGBE_ADVTXD_TUNNEL_LEN; } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (ol_flags & PKT_TX_SEC_OFFLOAD) { union ixgbe_crypto_tx_desc_md *md = (union ixgbe_crypto_tx_desc_md *)mdata; @@ -645,7 +647,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union ixgbe_tx_offload tx_offload; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY uint8_t use_ipsec; #endif @@ -675,7 +677,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); #endif @@ -689,11 +691,11 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.tso_segsz = tx_pkt->tso_segsz; tx_offload.outer_l2_len = tx_pkt->outer_l2_len; tx_offload.outer_l3_len = tx_pkt->outer_l3_len; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (use_ipsec) { union ixgbe_crypto_tx_desc_md *ipsec_mdata = (union ixgbe_crypto_tx_desc_md *) - &tx_pkt->udata64; + rte_security_dynfield(tx_pkt); tx_offload.sa_idx = ipsec_mdata->sa_idx; tx_offload.sec_pad_len = ipsec_mdata->pad_len; } @@ -858,7 +860,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload, &tx_pkt->udata64); + tx_offload, + rte_security_dynfield(tx_pkt)); txe->last_id = tx_last; tx_id = txe->next_id; @@ -876,7 +879,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (use_ipsec) olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC; #endif @@ -1460,7 +1463,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags |= PKT_RX_EIP_CKSUM_BAD; } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (rx_status & IXGBE_RXD_STAT_SECP) { pkt_flags |= PKT_RX_SEC_OFFLOAD; if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG) @@ -2400,11 +2403,12 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) { struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; if (txq->offloads == 0 && -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY !(txq->using_ipsec) && #endif txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) { if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 && (rte_eal_process_type() != RTE_PROC_PRIMARY || txq->sw_ring_v != NULL)) { return ixgbe_tx_done_cleanup_vec(txq, free_cnt); @@ -2496,13 +2500,14 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if ((txq->offloads == 0) && -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY !(txq->using_ipsec) && #endif (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_prepare = NULL; if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 && (rte_eal_process_type() != RTE_PROC_PRIMARY || ixgbe_txq_vec_setup(txq) == 0)) { PMD_INIT_LOG(DEBUG, "Vector tx enabled."); @@ -2555,7 +2560,7 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev) hw->mac.type == ixgbe_mac_X550EM_a) tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (dev->security_ctx) tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; #endif @@ -2723,7 +2728,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY); #endif @@ -3006,7 +3011,7 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) hw->mac.type == ixgbe_mac_X550EM_a) offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (dev->security_ctx) offloads |= DEV_RX_OFFLOAD_SECURITY; #endif @@ -4744,7 +4749,8 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) * conditions to be met and Rx Bulk Allocation should be allowed. */ if (ixgbe_rx_vec_dev_conf_condition_check(dev) || - !adapter->rx_bulk_alloc_allowed) { + !adapter->rx_bulk_alloc_allowed || + rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) { PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " "preconditions", dev->data->port_id); @@ -4835,7 +4841,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; rxq->rx_using_sse = rx_using_sse; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY); #endif @@ -5347,7 +5353,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) ixgbe_setup_loopback_link_x540_x550(hw, true); } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) || (dev->data->dev_conf.txmode.offloads & @@ -5916,7 +5922,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, return 0; } -/* Stubs needed for linkage when CONFIG_RTE_ARCH_PPC_64 is set */ +/* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */ #if defined(RTE_ARCH_PPC_64) int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)