1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
5 #include <rte_ethdev_pci.h>
6 #include <rte_security_driver.h>
7 #include <rte_cryptodev.h>
9 #include "base/txgbe.h"
10 #include "txgbe_ethdev.h"
11 #include "txgbe_ipsec.h"
13 #define CMP_IP(a, b) (\
14 (a).ipv6[0] == (b).ipv6[0] && \
15 (a).ipv6[1] == (b).ipv6[1] && \
16 (a).ipv6[2] == (b).ipv6[2] && \
17 (a).ipv6[3] == (b).ipv6[3])
20 txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
22 struct rte_eth_dev *dev = ic_session->dev;
23 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
24 struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
28 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
32 /* Find a match in the IP table*/
33 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
34 if (CMP_IP(priv->rx_ip_tbl[i].ip,
35 ic_session->dst_ip)) {
40 /* If no match, find a free entry in the IP table*/
42 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
43 if (priv->rx_ip_tbl[i].ref_count == 0) {
50 /* Fail if no match and no free entries*/
53 "No free entry left in the Rx IP table\n");
57 /* Find a free entry in the SA table*/
58 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
59 if (priv->rx_sa_tbl[i].used == 0) {
64 /* Fail if no free entries*/
67 "No free entry left in the Rx SA table\n");
71 priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
72 ic_session->dst_ip.ipv6[0];
73 priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
74 ic_session->dst_ip.ipv6[1];
75 priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
76 ic_session->dst_ip.ipv6[2];
77 priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
78 ic_session->dst_ip.ipv6[3];
79 priv->rx_ip_tbl[ip_index].ref_count++;
81 priv->rx_sa_tbl[sa_index].spi = ic_session->spi;
82 priv->rx_sa_tbl[sa_index].ip_index = ip_index;
83 priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
84 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION)
85 priv->rx_sa_tbl[sa_index].mode |=
86 (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
87 if (ic_session->dst_ip.type == IPv6) {
88 priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
89 priv->rx_ip_tbl[ip_index].ip.type = IPv6;
90 } else if (ic_session->dst_ip.type == IPv4) {
91 priv->rx_ip_tbl[ip_index].ip.type = IPv4;
93 priv->rx_sa_tbl[sa_index].used = 1;
95 /* write IP table entry*/
96 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
97 TXGBE_IPSRXIDX_TB_IP | (ip_index << 3);
98 if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
99 wr32(hw, TXGBE_IPSRXADDR(0), 0);
100 wr32(hw, TXGBE_IPSRXADDR(1), 0);
101 wr32(hw, TXGBE_IPSRXADDR(2), 0);
102 wr32(hw, TXGBE_IPSRXADDR(3),
103 priv->rx_ip_tbl[ip_index].ip.ipv4);
105 wr32(hw, TXGBE_IPSRXADDR(0),
106 priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
107 wr32(hw, TXGBE_IPSRXADDR(1),
108 priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
109 wr32(hw, TXGBE_IPSRXADDR(2),
110 priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
111 wr32(hw, TXGBE_IPSRXADDR(3),
112 priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
114 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
116 /* write SPI table entry*/
117 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
118 TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
119 wr32(hw, TXGBE_IPSRXSPI,
120 priv->rx_sa_tbl[sa_index].spi);
121 wr32(hw, TXGBE_IPSRXADDRIDX,
122 priv->rx_sa_tbl[sa_index].ip_index);
123 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
125 /* write Key table entry*/
126 key = malloc(ic_session->key_len);
130 memcpy(key, ic_session->key, ic_session->key_len);
132 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
133 TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
134 wr32(hw, TXGBE_IPSRXKEY(0),
135 rte_cpu_to_be_32(*(uint32_t *)&key[12]));
136 wr32(hw, TXGBE_IPSRXKEY(1),
137 rte_cpu_to_be_32(*(uint32_t *)&key[8]));
138 wr32(hw, TXGBE_IPSRXKEY(2),
139 rte_cpu_to_be_32(*(uint32_t *)&key[4]));
140 wr32(hw, TXGBE_IPSRXKEY(3),
141 rte_cpu_to_be_32(*(uint32_t *)&key[0]));
142 wr32(hw, TXGBE_IPSRXSALT,
143 rte_cpu_to_be_32(ic_session->salt));
144 wr32(hw, TXGBE_IPSRXMODE,
145 priv->rx_sa_tbl[sa_index].mode);
146 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
149 } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
153 /* Find a free entry in the SA table*/
154 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
155 if (priv->tx_sa_tbl[i].used == 0) {
160 /* Fail if no free entries*/
163 "No free entry left in the Tx SA table\n");
167 priv->tx_sa_tbl[sa_index].spi =
168 rte_cpu_to_be_32(ic_session->spi);
169 priv->tx_sa_tbl[i].used = 1;
170 ic_session->sa_index = sa_index;
172 key = malloc(ic_session->key_len);
176 memcpy(key, ic_session->key, ic_session->key_len);
178 /* write Key table entry*/
179 reg_val = TXGBE_IPSRXIDX_ENA |
180 TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
181 wr32(hw, TXGBE_IPSTXKEY(0),
182 rte_cpu_to_be_32(*(uint32_t *)&key[12]));
183 wr32(hw, TXGBE_IPSTXKEY(1),
184 rte_cpu_to_be_32(*(uint32_t *)&key[8]));
185 wr32(hw, TXGBE_IPSTXKEY(2),
186 rte_cpu_to_be_32(*(uint32_t *)&key[4]));
187 wr32(hw, TXGBE_IPSTXKEY(3),
188 rte_cpu_to_be_32(*(uint32_t *)&key[0]));
189 wr32(hw, TXGBE_IPSTXSALT,
190 rte_cpu_to_be_32(ic_session->salt));
191 wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
200 txgbe_crypto_remove_sa(struct rte_eth_dev *dev,
201 struct txgbe_crypto_session *ic_session)
203 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
204 struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
208 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
209 int i, ip_index = -1;
211 /* Find a match in the IP table*/
212 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
213 if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
219 /* Fail if no match*/
222 "Entry not found in the Rx IP table\n");
226 /* Find a free entry in the SA table*/
227 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
228 if (priv->rx_sa_tbl[i].spi ==
229 rte_cpu_to_be_32(ic_session->spi)) {
234 /* Fail if no match*/
237 "Entry not found in the Rx SA table\n");
241 /* Disable and clear Rx SPI and key table entryes*/
242 reg_val = TXGBE_IPSRXIDX_WRITE |
243 TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
244 wr32(hw, TXGBE_IPSRXSPI, 0);
245 wr32(hw, TXGBE_IPSRXADDRIDX, 0);
246 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
247 reg_val = TXGBE_IPSRXIDX_WRITE |
248 TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
249 wr32(hw, TXGBE_IPSRXKEY(0), 0);
250 wr32(hw, TXGBE_IPSRXKEY(1), 0);
251 wr32(hw, TXGBE_IPSRXKEY(2), 0);
252 wr32(hw, TXGBE_IPSRXKEY(3), 0);
253 wr32(hw, TXGBE_IPSRXSALT, 0);
254 wr32(hw, TXGBE_IPSRXMODE, 0);
255 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
256 priv->rx_sa_tbl[sa_index].used = 0;
258 /* If last used then clear the IP table entry*/
259 priv->rx_ip_tbl[ip_index].ref_count--;
260 if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
261 reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_IP |
263 wr32(hw, TXGBE_IPSRXADDR(0), 0);
264 wr32(hw, TXGBE_IPSRXADDR(1), 0);
265 wr32(hw, TXGBE_IPSRXADDR(2), 0);
266 wr32(hw, TXGBE_IPSRXADDR(3), 0);
268 } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
271 /* Find a match in the SA table*/
272 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
273 if (priv->tx_sa_tbl[i].spi ==
274 rte_cpu_to_be_32(ic_session->spi)) {
279 /* Fail if no match entries*/
282 "Entry not found in the Tx SA table\n");
285 reg_val = TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
286 wr32(hw, TXGBE_IPSTXKEY(0), 0);
287 wr32(hw, TXGBE_IPSTXKEY(1), 0);
288 wr32(hw, TXGBE_IPSTXKEY(2), 0);
289 wr32(hw, TXGBE_IPSTXKEY(3), 0);
290 wr32(hw, TXGBE_IPSTXSALT, 0);
291 wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
293 priv->tx_sa_tbl[sa_index].used = 0;
300 txgbe_crypto_create_session(void *device,
301 struct rte_security_session_conf *conf,
302 struct rte_security_session *session,
303 struct rte_mempool *mempool)
305 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
306 struct txgbe_crypto_session *ic_session = NULL;
307 struct rte_crypto_aead_xform *aead_xform;
308 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
310 if (rte_mempool_get(mempool, (void **)&ic_session)) {
311 PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
315 if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
316 conf->crypto_xform->aead.algo !=
317 RTE_CRYPTO_AEAD_AES_GCM) {
318 PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
319 rte_mempool_put(mempool, (void *)ic_session);
322 aead_xform = &conf->crypto_xform->aead;
324 if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
325 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
326 ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
328 PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
329 rte_mempool_put(mempool, (void *)ic_session);
333 if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
334 ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
336 PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
337 rte_mempool_put(mempool, (void *)ic_session);
342 ic_session->key = aead_xform->key.data;
343 ic_session->key_len = aead_xform->key.length;
344 memcpy(&ic_session->salt,
345 &aead_xform->key.data[aead_xform->key.length], 4);
346 ic_session->spi = conf->ipsec.spi;
347 ic_session->dev = eth_dev;
349 set_sec_session_private_data(session, ic_session);
351 if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
352 if (txgbe_crypto_add_sa(ic_session)) {
353 PMD_DRV_LOG(ERR, "Failed to add SA\n");
354 rte_mempool_put(mempool, (void *)ic_session);
363 txgbe_crypto_session_get_size(__rte_unused void *device)
365 return sizeof(struct txgbe_crypto_session);
369 txgbe_crypto_remove_session(void *device,
370 struct rte_security_session *session)
372 struct rte_eth_dev *eth_dev = device;
373 struct txgbe_crypto_session *ic_session =
374 (struct txgbe_crypto_session *)
375 get_sec_session_private_data(session);
376 struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
378 if (eth_dev != ic_session->dev) {
379 PMD_DRV_LOG(ERR, "Session not bound to this device\n");
383 if (txgbe_crypto_remove_sa(eth_dev, ic_session)) {
384 PMD_DRV_LOG(ERR, "Failed to remove session\n");
388 rte_mempool_put(mempool, (void *)ic_session);
393 static inline uint8_t
394 txgbe_crypto_compute_pad_len(struct rte_mbuf *m)
396 if (m->nb_segs == 1) {
397 /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
398 * payload padding size is stored at <pkt_len - 18>
400 uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
401 rte_pktmbuf_pkt_len(m) -
402 (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
403 return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
409 txgbe_crypto_update_mb(void *device __rte_unused,
410 struct rte_security_session *session,
411 struct rte_mbuf *m, void *params __rte_unused)
413 struct txgbe_crypto_session *ic_session =
414 get_sec_session_private_data(session);
415 if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
416 union txgbe_crypto_tx_desc_md *mdata =
417 (union txgbe_crypto_tx_desc_md *)
418 rte_security_dynfield(m);
420 mdata->sa_idx = ic_session->sa_index;
421 mdata->pad_len = txgbe_crypto_compute_pad_len(m);
426 static const struct rte_security_capability *
427 txgbe_crypto_capabilities_get(void *device __rte_unused)
429 static const struct rte_cryptodev_capabilities
430 aes_gcm_gmac_crypto_capabilities[] = {
431 { /* AES GMAC (128-bit) */
432 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
434 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
436 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
456 { /* AES GCM (128-bit) */
457 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
459 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
461 .algo = RTE_CRYPTO_AEAD_AES_GCM,
487 .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
489 .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
494 static const struct rte_security_capability
495 txgbe_security_capabilities[] = {
496 { /* IPsec Inline Crypto ESP Transport Egress */
497 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
498 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
500 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
501 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
502 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
505 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
506 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
508 { /* IPsec Inline Crypto ESP Transport Ingress */
509 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
510 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
512 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
513 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
514 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
517 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
520 { /* IPsec Inline Crypto ESP Tunnel Egress */
521 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
522 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
524 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
525 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
526 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
529 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
530 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
532 { /* IPsec Inline Crypto ESP Tunnel Ingress */
533 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
534 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
536 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
537 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
538 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
541 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
545 .action = RTE_SECURITY_ACTION_TYPE_NONE
549 return txgbe_security_capabilities;
552 static struct rte_security_ops txgbe_security_ops = {
553 .session_create = txgbe_crypto_create_session,
554 .session_get_size = txgbe_crypto_session_get_size,
555 .session_destroy = txgbe_crypto_remove_session,
556 .set_pkt_metadata = txgbe_crypto_update_mb,
557 .capabilities_get = txgbe_crypto_capabilities_get
561 txgbe_crypto_capable(struct rte_eth_dev *dev)
563 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
564 uint32_t reg_i, reg, capable = 1;
565 /* test if rx crypto can be enabled and then write back initial value*/
566 reg_i = rd32(hw, TXGBE_SECRXCTL);
567 wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
568 reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
571 wr32(hw, TXGBE_SECRXCTL, reg_i);
576 txgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
578 struct rte_security_ctx *ctx = NULL;
580 if (txgbe_crypto_capable(dev)) {
581 ctx = rte_malloc("rte_security_instances_ops",
582 sizeof(struct rte_security_ctx), 0);
584 ctx->device = (void *)dev;
585 ctx->ops = &txgbe_security_ops;
587 dev->security_ctx = ctx;
592 if (rte_security_dynfield_register() < 0)