1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
5 #include <ethdev_pci.h>
6 #include <rte_security_driver.h>
7 #include <rte_cryptodev.h>
9 #include "base/txgbe.h"
10 #include "txgbe_ethdev.h"
11 #include "txgbe_ipsec.h"
13 #define CMP_IP(a, b) (\
14 (a).ipv6[0] == (b).ipv6[0] && \
15 (a).ipv6[1] == (b).ipv6[1] && \
16 (a).ipv6[2] == (b).ipv6[2] && \
17 (a).ipv6[3] == (b).ipv6[3])
20 txgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
22 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
23 struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
26 /* clear Rx IP table*/
27 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
28 uint16_t index = i << 3;
29 uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
30 TXGBE_IPSRXIDX_TB_IP | index;
31 wr32(hw, TXGBE_IPSRXADDR(0), 0);
32 wr32(hw, TXGBE_IPSRXADDR(1), 0);
33 wr32(hw, TXGBE_IPSRXADDR(2), 0);
34 wr32(hw, TXGBE_IPSRXADDR(3), 0);
35 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
38 /* clear Rx SPI and Rx/Tx SA tables*/
39 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
40 uint32_t index = i << 3;
41 uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
42 TXGBE_IPSRXIDX_TB_SPI | index;
43 wr32(hw, TXGBE_IPSRXSPI, 0);
44 wr32(hw, TXGBE_IPSRXADDRIDX, 0);
45 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
46 reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_KEY | index;
47 wr32(hw, TXGBE_IPSRXKEY(0), 0);
48 wr32(hw, TXGBE_IPSRXKEY(1), 0);
49 wr32(hw, TXGBE_IPSRXKEY(2), 0);
50 wr32(hw, TXGBE_IPSRXKEY(3), 0);
51 wr32(hw, TXGBE_IPSRXSALT, 0);
52 wr32(hw, TXGBE_IPSRXMODE, 0);
53 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
54 reg_val = TXGBE_IPSTXIDX_WRITE | index;
55 wr32(hw, TXGBE_IPSTXKEY(0), 0);
56 wr32(hw, TXGBE_IPSTXKEY(1), 0);
57 wr32(hw, TXGBE_IPSTXKEY(2), 0);
58 wr32(hw, TXGBE_IPSTXKEY(3), 0);
59 wr32(hw, TXGBE_IPSTXSALT, 0);
60 wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
63 memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
64 memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
65 memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
69 txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
71 struct rte_eth_dev *dev = ic_session->dev;
72 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
73 struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
77 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
81 /* Find a match in the IP table*/
82 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
83 if (CMP_IP(priv->rx_ip_tbl[i].ip,
84 ic_session->dst_ip)) {
89 /* If no match, find a free entry in the IP table*/
91 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
92 if (priv->rx_ip_tbl[i].ref_count == 0) {
99 /* Fail if no match and no free entries*/
102 "No free entry left in the Rx IP table\n");
106 /* Find a free entry in the SA table*/
107 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
108 if (priv->rx_sa_tbl[i].used == 0) {
113 /* Fail if no free entries*/
116 "No free entry left in the Rx SA table\n");
120 priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
121 ic_session->dst_ip.ipv6[0];
122 priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
123 ic_session->dst_ip.ipv6[1];
124 priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
125 ic_session->dst_ip.ipv6[2];
126 priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
127 ic_session->dst_ip.ipv6[3];
128 priv->rx_ip_tbl[ip_index].ref_count++;
130 priv->rx_sa_tbl[sa_index].spi = ic_session->spi;
131 priv->rx_sa_tbl[sa_index].ip_index = ip_index;
132 priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
133 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION)
134 priv->rx_sa_tbl[sa_index].mode |=
135 (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
136 if (ic_session->dst_ip.type == IPv6) {
137 priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
138 priv->rx_ip_tbl[ip_index].ip.type = IPv6;
139 } else if (ic_session->dst_ip.type == IPv4) {
140 priv->rx_ip_tbl[ip_index].ip.type = IPv4;
142 priv->rx_sa_tbl[sa_index].used = 1;
144 /* write IP table entry*/
145 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
146 TXGBE_IPSRXIDX_TB_IP | (ip_index << 3);
147 if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
148 uint32_t ipv4 = priv->rx_ip_tbl[ip_index].ip.ipv4;
149 wr32(hw, TXGBE_IPSRXADDR(0), rte_cpu_to_be_32(ipv4));
150 wr32(hw, TXGBE_IPSRXADDR(1), 0);
151 wr32(hw, TXGBE_IPSRXADDR(2), 0);
152 wr32(hw, TXGBE_IPSRXADDR(3), 0);
154 wr32(hw, TXGBE_IPSRXADDR(0),
155 priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
156 wr32(hw, TXGBE_IPSRXADDR(1),
157 priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
158 wr32(hw, TXGBE_IPSRXADDR(2),
159 priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
160 wr32(hw, TXGBE_IPSRXADDR(3),
161 priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
163 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
165 /* write SPI table entry*/
166 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
167 TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
168 wr32(hw, TXGBE_IPSRXSPI,
169 priv->rx_sa_tbl[sa_index].spi);
170 wr32(hw, TXGBE_IPSRXADDRIDX,
171 priv->rx_sa_tbl[sa_index].ip_index);
172 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
174 /* write Key table entry*/
175 key = malloc(ic_session->key_len);
179 memcpy(key, ic_session->key, ic_session->key_len);
181 reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
182 TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
183 wr32(hw, TXGBE_IPSRXKEY(0),
184 rte_cpu_to_be_32(*(uint32_t *)&key[12]));
185 wr32(hw, TXGBE_IPSRXKEY(1),
186 rte_cpu_to_be_32(*(uint32_t *)&key[8]));
187 wr32(hw, TXGBE_IPSRXKEY(2),
188 rte_cpu_to_be_32(*(uint32_t *)&key[4]));
189 wr32(hw, TXGBE_IPSRXKEY(3),
190 rte_cpu_to_be_32(*(uint32_t *)&key[0]));
191 wr32(hw, TXGBE_IPSRXSALT,
192 rte_cpu_to_be_32(ic_session->salt));
193 wr32(hw, TXGBE_IPSRXMODE,
194 priv->rx_sa_tbl[sa_index].mode);
195 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
198 } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
202 /* Find a free entry in the SA table*/
203 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
204 if (priv->tx_sa_tbl[i].used == 0) {
209 /* Fail if no free entries*/
212 "No free entry left in the Tx SA table\n");
216 priv->tx_sa_tbl[sa_index].spi =
217 rte_cpu_to_be_32(ic_session->spi);
218 priv->tx_sa_tbl[i].used = 1;
219 ic_session->sa_index = sa_index;
221 key = malloc(ic_session->key_len);
225 memcpy(key, ic_session->key, ic_session->key_len);
227 /* write Key table entry*/
228 reg_val = TXGBE_IPSRXIDX_ENA |
229 TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
230 wr32(hw, TXGBE_IPSTXKEY(0),
231 rte_cpu_to_be_32(*(uint32_t *)&key[12]));
232 wr32(hw, TXGBE_IPSTXKEY(1),
233 rte_cpu_to_be_32(*(uint32_t *)&key[8]));
234 wr32(hw, TXGBE_IPSTXKEY(2),
235 rte_cpu_to_be_32(*(uint32_t *)&key[4]));
236 wr32(hw, TXGBE_IPSTXKEY(3),
237 rte_cpu_to_be_32(*(uint32_t *)&key[0]));
238 wr32(hw, TXGBE_IPSTXSALT,
239 rte_cpu_to_be_32(ic_session->salt));
240 wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
249 txgbe_crypto_remove_sa(struct rte_eth_dev *dev,
250 struct txgbe_crypto_session *ic_session)
252 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
253 struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
257 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
258 int i, ip_index = -1;
260 /* Find a match in the IP table*/
261 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
262 if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
268 /* Fail if no match*/
271 "Entry not found in the Rx IP table\n");
275 /* Find a free entry in the SA table*/
276 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
277 if (priv->rx_sa_tbl[i].spi ==
278 rte_cpu_to_be_32(ic_session->spi)) {
283 /* Fail if no match*/
286 "Entry not found in the Rx SA table\n");
290 /* Disable and clear Rx SPI and key table entryes*/
291 reg_val = TXGBE_IPSRXIDX_WRITE |
292 TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
293 wr32(hw, TXGBE_IPSRXSPI, 0);
294 wr32(hw, TXGBE_IPSRXADDRIDX, 0);
295 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
296 reg_val = TXGBE_IPSRXIDX_WRITE |
297 TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
298 wr32(hw, TXGBE_IPSRXKEY(0), 0);
299 wr32(hw, TXGBE_IPSRXKEY(1), 0);
300 wr32(hw, TXGBE_IPSRXKEY(2), 0);
301 wr32(hw, TXGBE_IPSRXKEY(3), 0);
302 wr32(hw, TXGBE_IPSRXSALT, 0);
303 wr32(hw, TXGBE_IPSRXMODE, 0);
304 wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
305 priv->rx_sa_tbl[sa_index].used = 0;
307 /* If last used then clear the IP table entry*/
308 priv->rx_ip_tbl[ip_index].ref_count--;
309 if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
310 reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_IP |
312 wr32(hw, TXGBE_IPSRXADDR(0), 0);
313 wr32(hw, TXGBE_IPSRXADDR(1), 0);
314 wr32(hw, TXGBE_IPSRXADDR(2), 0);
315 wr32(hw, TXGBE_IPSRXADDR(3), 0);
317 } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
320 /* Find a match in the SA table*/
321 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
322 if (priv->tx_sa_tbl[i].spi ==
323 rte_cpu_to_be_32(ic_session->spi)) {
328 /* Fail if no match entries*/
331 "Entry not found in the Tx SA table\n");
334 reg_val = TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
335 wr32(hw, TXGBE_IPSTXKEY(0), 0);
336 wr32(hw, TXGBE_IPSTXKEY(1), 0);
337 wr32(hw, TXGBE_IPSTXKEY(2), 0);
338 wr32(hw, TXGBE_IPSTXKEY(3), 0);
339 wr32(hw, TXGBE_IPSTXSALT, 0);
340 wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
342 priv->tx_sa_tbl[sa_index].used = 0;
349 txgbe_crypto_create_session(void *device,
350 struct rte_security_session_conf *conf,
351 struct rte_security_session *session,
352 struct rte_mempool *mempool)
354 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
355 struct txgbe_crypto_session *ic_session = NULL;
356 struct rte_crypto_aead_xform *aead_xform;
357 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
359 if (rte_mempool_get(mempool, (void **)&ic_session)) {
360 PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
364 if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
365 conf->crypto_xform->aead.algo !=
366 RTE_CRYPTO_AEAD_AES_GCM) {
367 PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
368 rte_mempool_put(mempool, (void *)ic_session);
371 aead_xform = &conf->crypto_xform->aead;
373 if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
374 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
375 ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
377 PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
378 rte_mempool_put(mempool, (void *)ic_session);
382 if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
383 ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
385 PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
386 rte_mempool_put(mempool, (void *)ic_session);
391 ic_session->key = aead_xform->key.data;
392 ic_session->key_len = aead_xform->key.length;
393 memcpy(&ic_session->salt,
394 &aead_xform->key.data[aead_xform->key.length], 4);
395 ic_session->spi = conf->ipsec.spi;
396 ic_session->dev = eth_dev;
398 set_sec_session_private_data(session, ic_session);
400 if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
401 if (txgbe_crypto_add_sa(ic_session)) {
402 PMD_DRV_LOG(ERR, "Failed to add SA\n");
403 rte_mempool_put(mempool, (void *)ic_session);
412 txgbe_crypto_session_get_size(__rte_unused void *device)
414 return sizeof(struct txgbe_crypto_session);
418 txgbe_crypto_remove_session(void *device,
419 struct rte_security_session *session)
421 struct rte_eth_dev *eth_dev = device;
422 struct txgbe_crypto_session *ic_session =
423 (struct txgbe_crypto_session *)
424 get_sec_session_private_data(session);
425 struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
427 if (eth_dev != ic_session->dev) {
428 PMD_DRV_LOG(ERR, "Session not bound to this device\n");
432 if (txgbe_crypto_remove_sa(eth_dev, ic_session)) {
433 PMD_DRV_LOG(ERR, "Failed to remove session\n");
437 rte_mempool_put(mempool, (void *)ic_session);
442 static inline uint8_t
443 txgbe_crypto_compute_pad_len(struct rte_mbuf *m)
445 if (m->nb_segs == 1) {
446 /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
447 * payload padding size is stored at <pkt_len - 18>
449 uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
450 rte_pktmbuf_pkt_len(m) -
451 (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
452 return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
458 txgbe_crypto_update_mb(void *device __rte_unused,
459 struct rte_security_session *session,
460 struct rte_mbuf *m, void *params __rte_unused)
462 struct txgbe_crypto_session *ic_session =
463 get_sec_session_private_data(session);
464 if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
465 union txgbe_crypto_tx_desc_md *mdata =
466 (union txgbe_crypto_tx_desc_md *)
467 rte_security_dynfield(m);
469 mdata->sa_idx = ic_session->sa_index;
470 mdata->pad_len = txgbe_crypto_compute_pad_len(m);
475 static const struct rte_security_capability *
476 txgbe_crypto_capabilities_get(void *device __rte_unused)
478 static const struct rte_cryptodev_capabilities
479 aes_gcm_gmac_crypto_capabilities[] = {
480 { /* AES GMAC (128-bit) */
481 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
483 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
485 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
505 { /* AES GCM (128-bit) */
506 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
508 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
510 .algo = RTE_CRYPTO_AEAD_AES_GCM,
536 .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
538 .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
543 static const struct rte_security_capability
544 txgbe_security_capabilities[] = {
545 { /* IPsec Inline Crypto ESP Transport Egress */
546 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
547 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
549 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
550 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
551 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
554 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
555 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
557 { /* IPsec Inline Crypto ESP Transport Ingress */
558 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
559 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
561 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
562 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
563 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
566 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
569 { /* IPsec Inline Crypto ESP Tunnel Egress */
570 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
571 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
573 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
574 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
575 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
578 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
579 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
581 { /* IPsec Inline Crypto ESP Tunnel Ingress */
582 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
583 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
585 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
586 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
587 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
590 .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
594 .action = RTE_SECURITY_ACTION_TYPE_NONE
598 return txgbe_security_capabilities;
602 txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
604 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
606 uint64_t rx_offloads;
607 uint64_t tx_offloads;
609 rx_offloads = dev->data->dev_conf.rxmode.offloads;
610 tx_offloads = dev->data->dev_conf.txmode.offloads;
613 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
614 PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
617 if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
618 PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
622 /* Set TXGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/
623 wr32(hw, TXGBE_SECTXBUFAF, 0x14);
625 /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
626 * hang will occur with heavy traffic.
628 reg = rd32(hw, TXGBE_SECTXIFG);
629 reg = (reg & ~TXGBE_SECTXIFG_MIN_MASK) | TXGBE_SECTXIFG_MIN(0x3);
630 wr32(hw, TXGBE_SECTXIFG, reg);
632 reg = rd32(hw, TXGBE_SECRXCTL);
633 reg |= TXGBE_SECRXCTL_CRCSTRIP;
634 wr32(hw, TXGBE_SECRXCTL, reg);
636 if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
637 wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
638 reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
640 PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
644 if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
645 wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
646 reg = rd32(hw, TXGBE_SECTXCTL);
647 if (reg != TXGBE_SECTXCTL_STFWD) {
648 PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
653 txgbe_crypto_clear_ipsec_tables(dev);
659 txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
663 struct txgbe_crypto_session *ic_session =
664 get_sec_session_private_data(sess);
666 if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
668 const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
669 ic_session->src_ip.type = IPv6;
670 ic_session->dst_ip.type = IPv6;
671 rte_memcpy(ic_session->src_ip.ipv6,
672 ipv6->hdr.src_addr, 16);
673 rte_memcpy(ic_session->dst_ip.ipv6,
674 ipv6->hdr.dst_addr, 16);
676 const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
677 ic_session->src_ip.type = IPv4;
678 ic_session->dst_ip.type = IPv4;
679 ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
680 ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
682 return txgbe_crypto_add_sa(ic_session);
688 static struct rte_security_ops txgbe_security_ops = {
689 .session_create = txgbe_crypto_create_session,
690 .session_get_size = txgbe_crypto_session_get_size,
691 .session_destroy = txgbe_crypto_remove_session,
692 .set_pkt_metadata = txgbe_crypto_update_mb,
693 .capabilities_get = txgbe_crypto_capabilities_get
697 txgbe_crypto_capable(struct rte_eth_dev *dev)
699 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
700 uint32_t reg_i, reg, capable = 1;
701 /* test if rx crypto can be enabled and then write back initial value*/
702 reg_i = rd32(hw, TXGBE_SECRXCTL);
703 wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
704 reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
707 wr32(hw, TXGBE_SECRXCTL, reg_i);
712 txgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
714 struct rte_security_ctx *ctx = NULL;
716 if (txgbe_crypto_capable(dev)) {
717 ctx = rte_malloc("rte_security_instances_ops",
718 sizeof(struct rte_security_ctx), 0);
720 ctx->device = (void *)dev;
721 ctx->ops = &txgbe_security_ops;
723 dev->security_ctx = ctx;
728 if (rte_security_dynfield_register() < 0)