4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_ethdev_pci.h>
37 #include <rte_jhash.h>
38 #include <rte_security_driver.h>
39 #include <rte_cryptodev.h>
42 #include "base/ixgbe_type.h"
43 #include "base/ixgbe_api.h"
44 #include "ixgbe_ethdev.h"
45 #include "ixgbe_ipsec.h"
47 #define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS 5
49 #define IXGBE_WAIT_RREAD \
50 IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
51 IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
52 #define IXGBE_WAIT_RWRITE \
53 IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
54 IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
55 #define IXGBE_WAIT_TREAD \
56 IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
57 IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
58 #define IXGBE_WAIT_TWRITE \
59 IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
60 IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
62 #define CMP_IP(a, b) (\
63 (a).ipv6[0] == (b).ipv6[0] && \
64 (a).ipv6[1] == (b).ipv6[1] && \
65 (a).ipv6[2] == (b).ipv6[2] && \
66 (a).ipv6[3] == (b).ipv6[3])
70 ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
72 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
75 /* clear Rx IP table*/
76 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
77 uint16_t index = i << 3;
78 uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
79 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
80 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
81 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
82 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
86 /* clear Rx SPI and Rx/Tx SA tables*/
87 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
88 uint32_t index = i << 3;
89 uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
90 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
91 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
93 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
94 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
95 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
96 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
97 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
98 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
99 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
101 reg_val = IPSRXIDX_WRITE | index;
102 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
103 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
104 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
105 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
106 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
112 ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
114 struct rte_eth_dev *dev = ic_session->dev;
115 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
116 struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
117 dev->data->dev_private);
121 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
122 int i, ip_index = -1;
124 /* Find a match in the IP table*/
125 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
126 if (CMP_IP(priv->rx_ip_tbl[i].ip,
127 ic_session->dst_ip)) {
132 /* If no match, find a free entry in the IP table*/
134 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
135 if (priv->rx_ip_tbl[i].ref_count == 0) {
142 /* Fail if no match and no free entries*/
145 "No free entry left in the Rx IP table\n");
149 /* Find a free entry in the SA table*/
150 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
151 if (priv->rx_sa_tbl[i].used == 0) {
156 /* Fail if no free entries*/
159 "No free entry left in the Rx SA table\n");
163 priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
164 ic_session->dst_ip.ipv6[0];
165 priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
166 ic_session->dst_ip.ipv6[1];
167 priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
168 ic_session->dst_ip.ipv6[2];
169 priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
170 ic_session->dst_ip.ipv6[3];
171 priv->rx_ip_tbl[ip_index].ref_count++;
173 priv->rx_sa_tbl[sa_index].spi =
174 rte_cpu_to_be_32(ic_session->spi);
175 priv->rx_sa_tbl[sa_index].ip_index = ip_index;
176 priv->rx_sa_tbl[sa_index].key[3] =
177 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
178 priv->rx_sa_tbl[sa_index].key[2] =
179 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
180 priv->rx_sa_tbl[sa_index].key[1] =
181 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
182 priv->rx_sa_tbl[sa_index].key[0] =
183 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
184 priv->rx_sa_tbl[sa_index].salt =
185 rte_cpu_to_be_32(ic_session->salt);
186 priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
187 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
188 priv->rx_sa_tbl[sa_index].mode |=
189 (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
190 if (ic_session->dst_ip.type == IPv6)
191 priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
192 priv->rx_sa_tbl[sa_index].used = 1;
194 /* write IP table entry*/
195 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
196 IPSRXIDX_TABLE_IP | (ip_index << 3);
197 if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
198 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
199 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
200 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
201 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
202 priv->rx_ip_tbl[ip_index].ip.ipv4);
204 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
205 priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
206 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
207 priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
208 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
209 priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
210 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
211 priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
215 /* write SPI table entry*/
216 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
217 IPSRXIDX_TABLE_SPI | (sa_index << 3);
218 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
219 priv->rx_sa_tbl[sa_index].spi);
220 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
221 priv->rx_sa_tbl[sa_index].ip_index);
224 /* write Key table entry*/
225 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
226 IPSRXIDX_TABLE_KEY | (sa_index << 3);
227 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
228 priv->rx_sa_tbl[sa_index].key[0]);
229 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
230 priv->rx_sa_tbl[sa_index].key[1]);
231 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
232 priv->rx_sa_tbl[sa_index].key[2]);
233 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
234 priv->rx_sa_tbl[sa_index].key[3]);
235 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
236 priv->rx_sa_tbl[sa_index].salt);
237 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
238 priv->rx_sa_tbl[sa_index].mode);
241 } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
244 /* Find a free entry in the SA table*/
245 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
246 if (priv->tx_sa_tbl[i].used == 0) {
251 /* Fail if no free entries*/
254 "No free entry left in the Tx SA table\n");
258 priv->tx_sa_tbl[sa_index].spi =
259 rte_cpu_to_be_32(ic_session->spi);
260 priv->tx_sa_tbl[sa_index].key[3] =
261 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
262 priv->tx_sa_tbl[sa_index].key[2] =
263 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
264 priv->tx_sa_tbl[sa_index].key[1] =
265 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
266 priv->tx_sa_tbl[sa_index].key[0] =
267 rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
268 priv->tx_sa_tbl[sa_index].salt =
269 rte_cpu_to_be_32(ic_session->salt);
271 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
272 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
273 priv->tx_sa_tbl[sa_index].key[0]);
274 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
275 priv->tx_sa_tbl[sa_index].key[1]);
276 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
277 priv->tx_sa_tbl[sa_index].key[2]);
278 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
279 priv->tx_sa_tbl[sa_index].key[3]);
280 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
281 priv->tx_sa_tbl[sa_index].salt);
284 priv->tx_sa_tbl[i].used = 1;
285 ic_session->sa_index = sa_index;
292 ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
293 struct ixgbe_crypto_session *ic_session)
295 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
296 struct ixgbe_ipsec *priv =
297 IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
301 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
302 int i, ip_index = -1;
304 /* Find a match in the IP table*/
305 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
306 if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
312 /* Fail if no match*/
315 "Entry not found in the Rx IP table\n");
319 /* Find a free entry in the SA table*/
320 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
321 if (priv->rx_sa_tbl[i].spi ==
322 rte_cpu_to_be_32(ic_session->spi)) {
327 /* Fail if no match*/
330 "Entry not found in the Rx SA table\n");
334 /* Disable and clear Rx SPI and key table table entryes*/
335 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
336 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
337 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
339 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
340 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
341 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
342 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
343 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
344 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
345 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
347 priv->rx_sa_tbl[sa_index].used = 0;
349 /* If last used then clear the IP table entry*/
350 priv->rx_ip_tbl[ip_index].ref_count--;
351 if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
352 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
354 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
355 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
356 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
357 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
359 } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
362 /* Find a match in the SA table*/
363 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
364 if (priv->tx_sa_tbl[i].spi ==
365 rte_cpu_to_be_32(ic_session->spi)) {
370 /* Fail if no match entries*/
373 "Entry not found in the Tx SA table\n");
376 reg_val = IPSRXIDX_WRITE | (sa_index << 3);
377 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
378 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
379 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
380 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
381 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
384 priv->tx_sa_tbl[sa_index].used = 0;
391 ixgbe_crypto_create_session(void *device,
392 struct rte_security_session_conf *conf,
393 struct rte_security_session *session,
394 struct rte_mempool *mempool)
396 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
397 struct ixgbe_crypto_session *ic_session = NULL;
398 struct rte_crypto_aead_xform *aead_xform;
399 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
401 if (rte_mempool_get(mempool, (void **)&ic_session)) {
402 PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
406 if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
407 conf->crypto_xform->aead.algo !=
408 RTE_CRYPTO_AEAD_AES_GCM) {
409 PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
412 aead_xform = &conf->crypto_xform->aead;
414 if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
415 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
416 ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
418 PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
422 if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
423 ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
425 PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
430 ic_session->key = aead_xform->key.data;
431 memcpy(&ic_session->salt,
432 &aead_xform->key.data[aead_xform->key.length], 4);
433 ic_session->spi = conf->ipsec.spi;
434 ic_session->dev = eth_dev;
436 set_sec_session_private_data(session, ic_session);
438 if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
439 if (ixgbe_crypto_add_sa(ic_session)) {
440 PMD_DRV_LOG(ERR, "Failed to add SA\n");
449 ixgbe_crypto_remove_session(void *device,
450 struct rte_security_session *session)
452 struct rte_eth_dev *eth_dev = device;
453 struct ixgbe_crypto_session *ic_session =
454 (struct ixgbe_crypto_session *)
455 get_sec_session_private_data(session);
456 struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
458 if (eth_dev != ic_session->dev) {
459 PMD_DRV_LOG(ERR, "Session not bound to this device\n");
463 if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
464 PMD_DRV_LOG(ERR, "Failed to remove session\n");
468 rte_mempool_put(mempool, (void *)ic_session);
473 static inline uint8_t
474 ixgbe_crypto_compute_pad_len(struct rte_mbuf *m)
476 if (m->nb_segs == 1) {
477 /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
478 * payload padding size is stored at <pkt_len - 18>
480 uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
481 rte_pktmbuf_pkt_len(m) -
482 (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
483 return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
489 ixgbe_crypto_update_mb(void *device __rte_unused,
490 struct rte_security_session *session,
491 struct rte_mbuf *m, void *params __rte_unused)
493 struct ixgbe_crypto_session *ic_session =
494 get_sec_session_private_data(session);
495 if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
496 union ixgbe_crypto_tx_desc_md *mdata =
497 (union ixgbe_crypto_tx_desc_md *)&m->udata64;
499 mdata->sa_idx = ic_session->sa_index;
500 mdata->pad_len = ixgbe_crypto_compute_pad_len(m);
506 static const struct rte_security_capability *
507 ixgbe_crypto_capabilities_get(void *device __rte_unused)
509 static const struct rte_cryptodev_capabilities
510 aes_gcm_gmac_crypto_capabilities[] = {
511 { /* AES GMAC (128-bit) */
512 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
514 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
516 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
536 { /* AES GCM (128-bit) */
537 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
539 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
541 .algo = RTE_CRYPTO_AEAD_AES_GCM,
567 .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
569 .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
574 static const struct rte_security_capability
575 ixgbe_security_capabilities[] = {
576 { /* IPsec Inline Crypto ESP Transport Egress */
577 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
578 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
580 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
581 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
582 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
585 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
586 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
588 { /* IPsec Inline Crypto ESP Transport Ingress */
589 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
590 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
592 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
593 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
594 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
597 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
600 { /* IPsec Inline Crypto ESP Tunnel Egress */
601 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
602 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
604 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
605 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
606 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
609 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
610 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
612 { /* IPsec Inline Crypto ESP Tunnel Ingress */
613 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
614 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
616 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
617 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
618 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
621 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
625 .action = RTE_SECURITY_ACTION_TYPE_NONE
629 return ixgbe_security_capabilities;
634 ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
636 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
640 if (dev->data->dev_conf.rxmode.enable_lro) {
641 PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
644 if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
645 PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
650 /* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
651 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
653 /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
654 * hang will occur with heavy traffic.
656 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
657 reg = (reg & 0xFFFFFFF0) | 0x3;
658 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
660 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
661 reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
662 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
664 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
665 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
666 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
668 PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
672 if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
673 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
674 IXGBE_SECTXCTRL_STORE_FORWARD);
675 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
676 if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
677 PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
682 ixgbe_crypto_clear_ipsec_tables(dev);
688 ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
692 struct ixgbe_crypto_session *ic_session
693 = get_sec_session_private_data(sess);
695 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
697 const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
698 ic_session->src_ip.type = IPv6;
699 ic_session->dst_ip.type = IPv6;
700 rte_memcpy(ic_session->src_ip.ipv6,
701 ipv6->hdr.src_addr, 16);
702 rte_memcpy(ic_session->dst_ip.ipv6,
703 ipv6->hdr.dst_addr, 16);
705 const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
706 ic_session->src_ip.type = IPv4;
707 ic_session->dst_ip.type = IPv4;
708 ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
709 ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
711 return ixgbe_crypto_add_sa(ic_session);
717 static struct rte_security_ops ixgbe_security_ops = {
718 .session_create = ixgbe_crypto_create_session,
719 .session_update = NULL,
720 .session_stats_get = NULL,
721 .session_destroy = ixgbe_crypto_remove_session,
722 .set_pkt_metadata = ixgbe_crypto_update_mb,
723 .capabilities_get = ixgbe_crypto_capabilities_get
726 struct rte_security_ctx *
727 ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
729 struct rte_security_ctx *ctx = rte_malloc("rte_security_instances_ops",
730 sizeof(struct rte_security_ctx), 0);
732 ctx->device = (void *)dev;
733 ctx->ops = &ixgbe_security_ops;