X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftxgbe%2Ftxgbe_ipsec.c;h=9f4eee4081da8445b34b7a7c18302a2cffc7cceb;hb=9f1b1fbbdd483836f30004ce5db3b99080ebf718;hp=ebf23493eca1d5e0a2f6a0db1eade32f8a55b228;hpb=87d8a2a4a884900ed0f5bc6669c72cdd074f5231;p=dpdk.git diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c index ebf23493ec..9f4eee4081 100644 --- a/drivers/net/txgbe/txgbe_ipsec.c +++ b/drivers/net/txgbe/txgbe_ipsec.c @@ -2,7 +2,7 @@ * Copyright(c) 2015-2020 */ -#include +#include #include #include @@ -16,6 +16,55 @@ (a).ipv6[2] == (b).ipv6[2] && \ (a).ipv6[3] == (b).ipv6[3]) +static void +txgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev); + int i = 0; + + /* clear Rx IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + uint16_t index = i << 3; + uint32_t reg_val = TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_IP | index; + wr32(hw, TXGBE_IPSRXADDR(0), 0); + wr32(hw, TXGBE_IPSRXADDR(1), 0); + wr32(hw, TXGBE_IPSRXADDR(2), 0); + wr32(hw, TXGBE_IPSRXADDR(3), 0); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + } + + /* clear Rx SPI and Rx/Tx SA tables*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + uint32_t index = i << 3; + uint32_t reg_val = TXGBE_IPSRXIDX_WRITE | + TXGBE_IPSRXIDX_TB_SPI | index; + wr32(hw, TXGBE_IPSRXSPI, 0); + wr32(hw, TXGBE_IPSRXADDRIDX, 0); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_KEY | index; + wr32(hw, TXGBE_IPSRXKEY(0), 0); + wr32(hw, TXGBE_IPSRXKEY(1), 0); + wr32(hw, TXGBE_IPSRXKEY(2), 0); + wr32(hw, TXGBE_IPSRXKEY(3), 0); + wr32(hw, TXGBE_IPSRXSALT, 0); + wr32(hw, TXGBE_IPSRXMODE, 0); + wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000); + reg_val = TXGBE_IPSTXIDX_WRITE | index; + wr32(hw, TXGBE_IPSTXKEY(0), 0); + wr32(hw, TXGBE_IPSTXKEY(1), 0); + wr32(hw, TXGBE_IPSTXKEY(2), 0); + wr32(hw, TXGBE_IPSTXKEY(3), 0); + wr32(hw, TXGBE_IPSTXSALT, 0); + wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000); + } + + memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl)); + memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl)); + memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl)); +} + static int txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session) { @@ -549,6 +598,93 @@ txgbe_crypto_capabilities_get(void *device __rte_unused) return txgbe_security_capabilities; } +int +txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t reg; + uint64_t rx_offloads; + uint64_t tx_offloads; + + rx_offloads = dev->data->dev_conf.rxmode.offloads; + tx_offloads = dev->data->dev_conf.txmode.offloads; + + /* sanity checks */ + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { + PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); + return -1; + } + if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); + return -1; + } + + /* Set TXGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/ + wr32(hw, TXGBE_SECTXBUFAF, 0x14); + + /* IFG needs to be set to 3 when we are using security. Otherwise a Tx + * hang will occur with heavy traffic. + */ + reg = rd32(hw, TXGBE_SECTXIFG); + reg = (reg & ~TXGBE_SECTXIFG_MIN_MASK) | TXGBE_SECTXIFG_MIN(0x3); + wr32(hw, TXGBE_SECTXIFG, reg); + + reg = rd32(hw, TXGBE_SECRXCTL); + reg |= TXGBE_SECRXCTL_CRCSTRIP; + wr32(hw, TXGBE_SECRXCTL, reg); + + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0); + reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA); + if (reg != 0) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) { + wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD); + reg = rd32(hw, TXGBE_SECTXCTL); + if (reg != TXGBE_SECTXCTL_STFWD) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + + txgbe_crypto_clear_ipsec_tables(dev); + + return 0; +} + +int +txgbe_crypto_add_ingress_sa_from_flow(const void *sess, + const void *ip_spec, + uint8_t is_ipv6) +{ + struct txgbe_crypto_session *ic_session = + get_sec_session_private_data(sess); + + if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) { + if (is_ipv6) { + const struct rte_flow_item_ipv6 *ipv6 = ip_spec; + ic_session->src_ip.type = IPv6; + ic_session->dst_ip.type = IPv6; + rte_memcpy(ic_session->src_ip.ipv6, + ipv6->hdr.src_addr, 16); + rte_memcpy(ic_session->dst_ip.ipv6, + ipv6->hdr.dst_addr, 16); + } else { + const struct rte_flow_item_ipv4 *ipv4 = ip_spec; + ic_session->src_ip.type = IPv4; + ic_session->dst_ip.type = IPv4; + ic_session->src_ip.ipv4 = ipv4->hdr.src_addr; + ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr; + } + return txgbe_crypto_add_sa(ic_session); + } + + return 0; +} + static struct rte_security_ops txgbe_security_ops = { .session_create = txgbe_crypto_create_session, .session_get_size = txgbe_crypto_session_get_size,