X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=38a014a92dfd51939daf8a8b3b02ffdce64b19a2;hb=9a0752f498d2725428480b4b004feeedd852c395;hp=0038dfbb4bc837ac6811a5bcbac44763ff3b61bc;hpb=40ff8c99ea99bd890b9b2a306b1310893dd7ce5a;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 0038dfbb4b..38a014a92d 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -93,6 +93,7 @@ PKT_TX_TCP_SEG | \ PKT_TX_MACSEC | \ PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_SEC_OFFLOAD | \ IXGBE_TX_IEEE1588_TMST) #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \ @@ -395,7 +396,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, static inline void ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, volatile struct ixgbe_adv_tx_context_desc *ctx_txd, - uint64_t ol_flags, union ixgbe_tx_offload tx_offload) + uint64_t ol_flags, union ixgbe_tx_offload tx_offload, + union ixgbe_crypto_tx_desc_md *mdata) { uint32_t type_tucmd_mlhl; uint32_t mss_l4len_idx = 0; @@ -479,6 +481,17 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, seqnum_seed |= tx_offload.l2_len << IXGBE_ADVTXD_TUNNEL_LEN; } + if (ol_flags & PKT_TX_SEC_OFFLOAD) { + seqnum_seed |= + (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx); + type_tucmd_mlhl |= mdata->enc ? + (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | + IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0; + type_tucmd_mlhl |= + (mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK); + tx_offload_mask.sa_idx |= ~0; + tx_offload_mask.sec_pad_len |= ~0; + } txq->ctx_cache[ctx_idx].flags = ol_flags; txq->ctx_cache[ctx_idx].tx_offload.data[0] = @@ -657,6 +670,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union ixgbe_tx_offload tx_offload; + uint8_t use_ipsec; tx_offload.data[0] = 0; tx_offload.data[1] = 0; @@ -684,6 +698,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; + use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); /* If hardware offload required */ tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK; @@ -695,6 +710,13 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.tso_segsz = tx_pkt->tso_segsz; tx_offload.outer_l2_len = tx_pkt->outer_l2_len; tx_offload.outer_l3_len = tx_pkt->outer_l3_len; + if (use_ipsec) { + union ixgbe_crypto_tx_desc_md *ipsec_mdata = + (union ixgbe_crypto_tx_desc_md *) + &tx_pkt->udata64; + tx_offload.sa_idx = ipsec_mdata->sa_idx; + tx_offload.sec_pad_len = ipsec_mdata->pad_len; + } /* If new context need be built or reuse the exist ctx. */ ctx = what_advctx_update(txq, tx_ol_req, @@ -855,7 +877,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload); + tx_offload, + (union ixgbe_crypto_tx_desc_md *) + &tx_pkt->udata64); txe->last_id = tx_last; tx_id = txe->next_id; @@ -873,6 +897,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + if (use_ipsec) + olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC; m_seg = tx_pkt; do { @@ -1447,6 +1473,12 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags |= PKT_RX_EIP_CKSUM_BAD; } + if (rx_status & IXGBE_RXD_STAT_SECP) { + pkt_flags |= PKT_RX_SEC_OFFLOAD; + if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG) + pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED; + } + return pkt_flags; } @@ -2364,8 +2396,10 @@ void __attribute__((cold)) ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) - && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { + if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && + (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) && + !(dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_SECURITY)) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_prepare = NULL; #ifdef RTE_IXGBE_INC_VECTOR @@ -2535,6 +2569,8 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->txq_flags = tx_conf->txq_flags; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; + txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY); /* * Modification to set VFTDT for virtual function if vf is detected @@ -4519,6 +4555,8 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; rxq->rx_using_sse = rx_using_sse; + rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY); } } @@ -5006,6 +5044,19 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) ixgbe_setup_loopback_link_82599(hw); + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY) || + (dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY)) { + ret = ixgbe_crypto_enable_ipsec(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "ixgbe_crypto_enable_ipsec fails with %d.", + ret); + return ret; + } + } + return 0; }