1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <rte_ethdev.h>
6 #include <rte_ethdev_pci.h>
9 #include <rte_security_driver.h>
10 #include <rte_cryptodev.h>
13 #include "base/ixgbe_type.h"
14 #include "base/ixgbe_api.h"
15 #include "ixgbe_ethdev.h"
16 #include "ixgbe_ipsec.h"
18 #define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS 5
20 #define IXGBE_WAIT_RREAD \
21 IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
22 IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
23 #define IXGBE_WAIT_RWRITE \
24 IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
25 IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
26 #define IXGBE_WAIT_TREAD \
27 IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
28 IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
29 #define IXGBE_WAIT_TWRITE \
30 IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
31 IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
33 #define CMP_IP(a, b) (\
34 (a).ipv6[0] == (b).ipv6[0] && \
35 (a).ipv6[1] == (b).ipv6[1] && \
36 (a).ipv6[2] == (b).ipv6[2] && \
37 (a).ipv6[3] == (b).ipv6[3])
41 ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
43 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
46 /* clear Rx IP table*/
47 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
48 uint16_t index = i << 3;
49 uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
50 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
51 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
52 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
53 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
57 /* clear Rx SPI and Rx/Tx SA tables*/
58 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
59 uint32_t index = i << 3;
60 uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
61 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
62 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
64 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
65 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
66 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
67 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
68 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
69 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
70 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
72 reg_val = IPSRXIDX_WRITE | index;
73 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
74 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
75 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
76 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
77 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
83 ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
85 struct rte_eth_dev *dev = ic_session->dev;
86 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
87 struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
88 dev->data->dev_private);
92 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
95 /* Find a match in the IP table*/
96 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
97 if (CMP_IP(priv->rx_ip_tbl[i].ip,
98 ic_session->dst_ip)) {
103 /* If no match, find a free entry in the IP table*/
105 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
106 if (priv->rx_ip_tbl[i].ref_count == 0) {
113 /* Fail if no match and no free entries*/
116 "No free entry left in the Rx IP table\n");
120 /* Find a free entry in the SA table*/
121 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
122 if (priv->rx_sa_tbl[i].used == 0) {
127 /* Fail if no free entries*/
130 "No free entry left in the Rx SA table\n");
134 priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
135 ic_session->dst_ip.ipv6[0];
136 priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
137 ic_session->dst_ip.ipv6[1];
138 priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
139 ic_session->dst_ip.ipv6[2];
140 priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
141 ic_session->dst_ip.ipv6[3];
142 priv->rx_ip_tbl[ip_index].ref_count++;
144 priv->rx_sa_tbl[sa_index].spi =
145 rte_cpu_to_be_32(ic_session->spi);
146 priv->rx_sa_tbl[sa_index].ip_index = ip_index;
147 priv->rx_sa_tbl[sa_index].key[3] =
148 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
149 priv->rx_sa_tbl[sa_index].key[2] =
150 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
151 priv->rx_sa_tbl[sa_index].key[1] =
152 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
153 priv->rx_sa_tbl[sa_index].key[0] =
154 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
155 priv->rx_sa_tbl[sa_index].salt =
156 rte_cpu_to_be_32(ic_session->salt);
157 priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
158 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
159 priv->rx_sa_tbl[sa_index].mode |=
160 (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
161 if (ic_session->dst_ip.type == IPv6)
162 priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
163 priv->rx_sa_tbl[sa_index].used = 1;
165 /* write IP table entry*/
166 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
167 IPSRXIDX_TABLE_IP | (ip_index << 3);
168 if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
169 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
170 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
171 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
172 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
173 priv->rx_ip_tbl[ip_index].ip.ipv4);
175 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
176 priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
177 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
178 priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
179 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
180 priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
181 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
182 priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
186 /* write SPI table entry*/
187 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
188 IPSRXIDX_TABLE_SPI | (sa_index << 3);
189 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
190 priv->rx_sa_tbl[sa_index].spi);
191 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
192 priv->rx_sa_tbl[sa_index].ip_index);
195 /* write Key table entry*/
196 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
197 IPSRXIDX_TABLE_KEY | (sa_index << 3);
198 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
199 priv->rx_sa_tbl[sa_index].key[0]);
200 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
201 priv->rx_sa_tbl[sa_index].key[1]);
202 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
203 priv->rx_sa_tbl[sa_index].key[2]);
204 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
205 priv->rx_sa_tbl[sa_index].key[3]);
206 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
207 priv->rx_sa_tbl[sa_index].salt);
208 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
209 priv->rx_sa_tbl[sa_index].mode);
212 } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
215 /* Find a free entry in the SA table*/
216 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
217 if (priv->tx_sa_tbl[i].used == 0) {
222 /* Fail if no free entries*/
225 "No free entry left in the Tx SA table\n");
229 priv->tx_sa_tbl[sa_index].spi =
230 rte_cpu_to_be_32(ic_session->spi);
231 priv->tx_sa_tbl[sa_index].key[3] =
232 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
233 priv->tx_sa_tbl[sa_index].key[2] =
234 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
235 priv->tx_sa_tbl[sa_index].key[1] =
236 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
237 priv->tx_sa_tbl[sa_index].key[0] =
238 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
239 priv->tx_sa_tbl[sa_index].salt =
240 rte_cpu_to_be_32(ic_session->salt);
242 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
243 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
244 priv->tx_sa_tbl[sa_index].key[0]);
245 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
246 priv->tx_sa_tbl[sa_index].key[1]);
247 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
248 priv->tx_sa_tbl[sa_index].key[2]);
249 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
250 priv->tx_sa_tbl[sa_index].key[3]);
251 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
252 priv->tx_sa_tbl[sa_index].salt);
255 priv->tx_sa_tbl[i].used = 1;
256 ic_session->sa_index = sa_index;
263 ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
264 struct ixgbe_crypto_session *ic_session)
266 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
267 struct ixgbe_ipsec *priv =
268 IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
272 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
273 int i, ip_index = -1;
275 /* Find a match in the IP table*/
276 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
277 if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
283 /* Fail if no match*/
286 "Entry not found in the Rx IP table\n");
290 /* Find a free entry in the SA table*/
291 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
292 if (priv->rx_sa_tbl[i].spi ==
293 rte_cpu_to_be_32(ic_session->spi)) {
298 /* Fail if no match*/
301 "Entry not found in the Rx SA table\n");
305 /* Disable and clear Rx SPI and key table table entryes*/
306 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
307 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
308 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
310 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
311 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
312 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
313 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
314 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
315 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
316 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
318 priv->rx_sa_tbl[sa_index].used = 0;
320 /* If last used then clear the IP table entry*/
321 priv->rx_ip_tbl[ip_index].ref_count--;
322 if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
323 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
325 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
326 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
327 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
328 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
330 } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
333 /* Find a match in the SA table*/
334 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
335 if (priv->tx_sa_tbl[i].spi ==
336 rte_cpu_to_be_32(ic_session->spi)) {
341 /* Fail if no match entries*/
344 "Entry not found in the Tx SA table\n");
347 reg_val = IPSRXIDX_WRITE | (sa_index << 3);
348 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
349 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
350 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
351 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
352 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
355 priv->tx_sa_tbl[sa_index].used = 0;
362 ixgbe_crypto_create_session(void *device,
363 struct rte_security_session_conf *conf,
364 struct rte_security_session *session,
365 struct rte_mempool *mempool)
367 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
368 struct ixgbe_crypto_session *ic_session = NULL;
369 struct rte_crypto_aead_xform *aead_xform;
370 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
372 if (rte_mempool_get(mempool, (void **)&ic_session)) {
373 PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
377 if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
378 conf->crypto_xform->aead.algo !=
379 RTE_CRYPTO_AEAD_AES_GCM) {
380 PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
383 aead_xform = &conf->crypto_xform->aead;
385 if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
386 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
387 ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
389 PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
393 if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
394 ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
396 PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
401 ic_session->key = aead_xform->key.data;
402 memcpy(&ic_session->salt,
403 &aead_xform->key.data[aead_xform->key.length], 4);
404 ic_session->spi = conf->ipsec.spi;
405 ic_session->dev = eth_dev;
407 set_sec_session_private_data(session, ic_session);
409 if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
410 if (ixgbe_crypto_add_sa(ic_session)) {
411 PMD_DRV_LOG(ERR, "Failed to add SA\n");
420 ixgbe_crypto_remove_session(void *device,
421 struct rte_security_session *session)
423 struct rte_eth_dev *eth_dev = device;
424 struct ixgbe_crypto_session *ic_session =
425 (struct ixgbe_crypto_session *)
426 get_sec_session_private_data(session);
427 struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
429 if (eth_dev != ic_session->dev) {
430 PMD_DRV_LOG(ERR, "Session not bound to this device\n");
434 if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
435 PMD_DRV_LOG(ERR, "Failed to remove session\n");
439 rte_mempool_put(mempool, (void *)ic_session);
444 static inline uint8_t
445 ixgbe_crypto_compute_pad_len(struct rte_mbuf *m)
447 if (m->nb_segs == 1) {
448 /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
449 * payload padding size is stored at <pkt_len - 18>
451 uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
452 rte_pktmbuf_pkt_len(m) -
453 (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
454 return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
460 ixgbe_crypto_update_mb(void *device __rte_unused,
461 struct rte_security_session *session,
462 struct rte_mbuf *m, void *params __rte_unused)
464 struct ixgbe_crypto_session *ic_session =
465 get_sec_session_private_data(session);
466 if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
467 union ixgbe_crypto_tx_desc_md *mdata =
468 (union ixgbe_crypto_tx_desc_md *)&m->udata64;
470 mdata->sa_idx = ic_session->sa_index;
471 mdata->pad_len = ixgbe_crypto_compute_pad_len(m);
477 static const struct rte_security_capability *
478 ixgbe_crypto_capabilities_get(void *device __rte_unused)
480 static const struct rte_cryptodev_capabilities
481 aes_gcm_gmac_crypto_capabilities[] = {
482 { /* AES GMAC (128-bit) */
483 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
485 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
487 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
507 { /* AES GCM (128-bit) */
508 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
510 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
512 .algo = RTE_CRYPTO_AEAD_AES_GCM,
538 .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
540 .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
545 static const struct rte_security_capability
546 ixgbe_security_capabilities[] = {
547 { /* IPsec Inline Crypto ESP Transport Egress */
548 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
549 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
551 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
552 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
553 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
556 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
557 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
559 { /* IPsec Inline Crypto ESP Transport Ingress */
560 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
561 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
563 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
564 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
565 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
568 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
571 { /* IPsec Inline Crypto ESP Tunnel Egress */
572 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
573 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
575 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
576 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
577 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
580 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
581 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
583 { /* IPsec Inline Crypto ESP Tunnel Ingress */
584 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
585 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
587 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
588 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
589 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
592 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
596 .action = RTE_SECURITY_ACTION_TYPE_NONE
600 return ixgbe_security_capabilities;
605 ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
607 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
611 if (dev->data->dev_conf.rxmode.enable_lro) {
612 PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
615 if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
616 PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
621 /* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
622 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
624 /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
625 * hang will occur with heavy traffic.
627 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
628 reg = (reg & 0xFFFFFFF0) | 0x3;
629 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
631 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
632 reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
633 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
635 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
636 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
637 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
639 PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
643 if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
644 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
645 IXGBE_SECTXCTRL_STORE_FORWARD);
646 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
647 if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
648 PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
653 ixgbe_crypto_clear_ipsec_tables(dev);
659 ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
663 struct ixgbe_crypto_session *ic_session
664 = get_sec_session_private_data(sess);
666 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
668 const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
669 ic_session->src_ip.type = IPv6;
670 ic_session->dst_ip.type = IPv6;
671 rte_memcpy(ic_session->src_ip.ipv6,
672 ipv6->hdr.src_addr, 16);
673 rte_memcpy(ic_session->dst_ip.ipv6,
674 ipv6->hdr.dst_addr, 16);
676 const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
677 ic_session->src_ip.type = IPv4;
678 ic_session->dst_ip.type = IPv4;
679 ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
680 ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
682 return ixgbe_crypto_add_sa(ic_session);
688 static struct rte_security_ops ixgbe_security_ops = {
689 .session_create = ixgbe_crypto_create_session,
690 .session_update = NULL,
691 .session_stats_get = NULL,
692 .session_destroy = ixgbe_crypto_remove_session,
693 .set_pkt_metadata = ixgbe_crypto_update_mb,
694 .capabilities_get = ixgbe_crypto_capabilities_get
697 struct rte_security_ctx *
698 ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
700 struct rte_security_ctx *ctx = rte_malloc("rte_security_instances_ops",
701 sizeof(struct rte_security_ctx), 0);
703 ctx->device = (void *)dev;
704 ctx->ops = &ixgbe_security_ops;