1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
5 #include <rte_ethdev_pci.h>
6 #include <rte_security_driver.h>
7 #include <rte_cryptodev.h>
9 #include "base/txgbe.h"
10 #include "txgbe_ethdev.h"
11 #include "txgbe_ipsec.h"
13 #define CMP_IP(a, b) (\
14 (a).ipv6[0] == (b).ipv6[0] && \
15 (a).ipv6[1] == (b).ipv6[1] && \
16 (a).ipv6[2] == (b).ipv6[2] && \
17 (a).ipv6[3] == (b).ipv6[3])
20 txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
22 struct rte_eth_dev *dev = ic_session->dev;
23 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
24 struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
28 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
32 /* Find a match in the IP table*/
33 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
34 if (CMP_IP(priv->rx_ip_tbl[i].ip,
35 ic_session->dst_ip)) {
40 /* If no match, find a free entry in the IP table*/
42 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
43 if (priv->rx_ip_tbl[i].ref_count == 0) {
50 /* Fail if no match and no free entries*/
53 "No free entry left in the Rx IP table\n");
57 /* Find a free entry in the SA table*/
58 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
59 if (priv->rx_sa_tbl[i].used == 0) {
64 /* Fail if no free entries*/
67 "No free entry left in the Rx SA table\n");
71 priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
72 ic_session->dst_ip.ipv6[0];
73 priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
74 ic_session->dst_ip.ipv6[1];
75 priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
76 ic_session->dst_ip.ipv6[2];
77 priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
78 ic_session->dst_ip.ipv6[3];
79 priv->rx_ip_tbl[ip_index].ref_count++;
81 priv->rx_sa_tbl[sa_index].spi = ic_session->spi;
82 priv->rx_sa_tbl[sa_index].ip_index = ip_index;
83 priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
84 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION)
85 priv->rx_sa_tbl[sa_index].mode |=
86 (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
87 if (ic_session->dst_ip.type == IPv6) {
88 priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
89 priv->rx_ip_tbl[ip_index].ip.type = IPv6;
90 } else if (ic_session->dst_ip.type == IPv4) {
91 priv->rx_ip_tbl[ip_index].ip.type = IPv4;
93 priv->rx_sa_tbl[sa_index].used = 1;
95 /* write IP table entry*/
96 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
97 TXGBE_IPSRXIDX_TB_IP | (ip_index << 3);
98 if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
99 wr32(hw, TXGBE_IPSRXADDR(0), 0);
100 wr32(hw, TXGBE_IPSRXADDR(1), 0);
101 wr32(hw, TXGBE_IPSRXADDR(2), 0);
102 wr32(hw, TXGBE_IPSRXADDR(3),
103 priv->rx_ip_tbl[ip_index].ip.ipv4);
105 wr32(hw, TXGBE_IPSRXADDR(0),
106 priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
107 wr32(hw, TXGBE_IPSRXADDR(1),
108 priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
109 wr32(hw, TXGBE_IPSRXADDR(2),
110 priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
111 wr32(hw, TXGBE_IPSRXADDR(3),
112 priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
114 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
116 /* write SPI table entry*/
117 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
118 TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
119 wr32(hw, TXGBE_IPSRXSPI,
120 priv->rx_sa_tbl[sa_index].spi);
121 wr32(hw, TXGBE_IPSRXADDRIDX,
122 priv->rx_sa_tbl[sa_index].ip_index);
123 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
125 /* write Key table entry*/
126 key = malloc(ic_session->key_len);
130 memcpy(key, ic_session->key, ic_session->key_len);
132 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
133 TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
134 wr32(hw, TXGBE_IPSRXKEY(0),
135 rte_cpu_to_be_32(*(uint32_t *)&key[12]));
136 wr32(hw, TXGBE_IPSRXKEY(1),
137 rte_cpu_to_be_32(*(uint32_t *)&key[8]));
138 wr32(hw, TXGBE_IPSRXKEY(2),
139 rte_cpu_to_be_32(*(uint32_t *)&key[4]));
140 wr32(hw, TXGBE_IPSRXKEY(3),
141 rte_cpu_to_be_32(*(uint32_t *)&key[0]));
142 wr32(hw, TXGBE_IPSRXSALT,
143 rte_cpu_to_be_32(ic_session->salt));
144 wr32(hw, TXGBE_IPSRXMODE,
145 priv->rx_sa_tbl[sa_index].mode);
146 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
149 } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
153 /* Find a free entry in the SA table*/
154 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
155 if (priv->tx_sa_tbl[i].used == 0) {
160 /* Fail if no free entries*/
163 "No free entry left in the Tx SA table\n");
167 priv->tx_sa_tbl[sa_index].spi =
168 rte_cpu_to_be_32(ic_session->spi);
169 priv->tx_sa_tbl[i].used = 1;
170 ic_session->sa_index = sa_index;
172 key = malloc(ic_session->key_len);
176 memcpy(key, ic_session->key, ic_session->key_len);
178 /* write Key table entry*/
179 reg_val = TXGBE_IPSRXIDX_ENA |
180 TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
181 wr32(hw, TXGBE_IPSTXKEY(0),
182 rte_cpu_to_be_32(*(uint32_t *)&key[12]));
183 wr32(hw, TXGBE_IPSTXKEY(1),
184 rte_cpu_to_be_32(*(uint32_t *)&key[8]));
185 wr32(hw, TXGBE_IPSTXKEY(2),
186 rte_cpu_to_be_32(*(uint32_t *)&key[4]));
187 wr32(hw, TXGBE_IPSTXKEY(3),
188 rte_cpu_to_be_32(*(uint32_t *)&key[0]));
189 wr32(hw, TXGBE_IPSTXSALT,
190 rte_cpu_to_be_32(ic_session->salt));
191 wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
200 txgbe_crypto_create_session(void *device,
201 struct rte_security_session_conf *conf,
202 struct rte_security_session *session,
203 struct rte_mempool *mempool)
205 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
206 struct txgbe_crypto_session *ic_session = NULL;
207 struct rte_crypto_aead_xform *aead_xform;
208 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
210 if (rte_mempool_get(mempool, (void **)&ic_session)) {
211 PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
215 if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
216 conf->crypto_xform->aead.algo !=
217 RTE_CRYPTO_AEAD_AES_GCM) {
218 PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
219 rte_mempool_put(mempool, (void *)ic_session);
222 aead_xform = &conf->crypto_xform->aead;
224 if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
225 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
226 ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
228 PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
229 rte_mempool_put(mempool, (void *)ic_session);
233 if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
234 ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
236 PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
237 rte_mempool_put(mempool, (void *)ic_session);
242 ic_session->key = aead_xform->key.data;
243 ic_session->key_len = aead_xform->key.length;
244 memcpy(&ic_session->salt,
245 &aead_xform->key.data[aead_xform->key.length], 4);
246 ic_session->spi = conf->ipsec.spi;
247 ic_session->dev = eth_dev;
249 set_sec_session_private_data(session, ic_session);
251 if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
252 if (txgbe_crypto_add_sa(ic_session)) {
253 PMD_DRV_LOG(ERR, "Failed to add SA\n");
254 rte_mempool_put(mempool, (void *)ic_session);
262 static const struct rte_security_capability *
263 txgbe_crypto_capabilities_get(void *device __rte_unused)
265 static const struct rte_cryptodev_capabilities
266 aes_gcm_gmac_crypto_capabilities[] = {
267 { /* AES GMAC (128-bit) */
268 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
270 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
272 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
292 { /* AES GCM (128-bit) */
293 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
295 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
297 .algo = RTE_CRYPTO_AEAD_AES_GCM,
323 .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
325 .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
330 static const struct rte_security_capability
331 txgbe_security_capabilities[] = {
332 { /* IPsec Inline Crypto ESP Transport Egress */
333 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
334 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
336 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
337 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
338 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
341 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
342 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
344 { /* IPsec Inline Crypto ESP Transport Ingress */
345 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
346 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
348 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
349 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
350 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
353 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
356 { /* IPsec Inline Crypto ESP Tunnel Egress */
357 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
358 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
360 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
361 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
362 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
365 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
366 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
368 { /* IPsec Inline Crypto ESP Tunnel Ingress */
369 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
370 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
372 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
373 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
374 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
377 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
381 .action = RTE_SECURITY_ACTION_TYPE_NONE
385 return txgbe_security_capabilities;
388 static struct rte_security_ops txgbe_security_ops = {
389 .session_create = txgbe_crypto_create_session,
390 .capabilities_get = txgbe_crypto_capabilities_get
394 txgbe_crypto_capable(struct rte_eth_dev *dev)
396 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
397 uint32_t reg_i, reg, capable = 1;
398 /* test if rx crypto can be enabled and then write back initial value*/
399 reg_i = rd32(hw, TXGBE_SECRXCTL);
400 wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
401 reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
404 wr32(hw, TXGBE_SECRXCTL, reg_i);
409 txgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
411 struct rte_security_ctx *ctx = NULL;
413 if (txgbe_crypto_capable(dev)) {
414 ctx = rte_malloc("rte_security_instances_ops",
415 sizeof(struct rte_security_ctx), 0);
417 ctx->device = (void *)dev;
418 ctx->ops = &txgbe_security_ops;
420 dev->security_ctx = ctx;
425 if (rte_security_dynfield_register() < 0)