net/ixgbe: fix IP type for crypto session
[dpdk.git] / drivers / net / ixgbe / ixgbe_ipsec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_ip.h>
8 #include <rte_jhash.h>
9 #include <rte_security_driver.h>
10 #include <rte_cryptodev.h>
11 #include <rte_flow.h>
12
13 #include "base/ixgbe_type.h"
14 #include "base/ixgbe_api.h"
15 #include "ixgbe_ethdev.h"
16 #include "ixgbe_ipsec.h"
17
18 #define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS  5
19
20 #define IXGBE_WAIT_RREAD \
21         IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
22         IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
23 #define IXGBE_WAIT_RWRITE \
24         IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
25         IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
26 #define IXGBE_WAIT_TREAD \
27         IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
28         IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
29 #define IXGBE_WAIT_TWRITE \
30         IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
31         IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
32
33 #define CMP_IP(a, b) (\
34         (a).ipv6[0] == (b).ipv6[0] && \
35         (a).ipv6[1] == (b).ipv6[1] && \
36         (a).ipv6[2] == (b).ipv6[2] && \
37         (a).ipv6[3] == (b).ipv6[3])
38
39
40 static void
41 ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
42 {
43         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
44         struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
45                                 dev->data->dev_private);
46         int i = 0;
47
48         /* clear Rx IP table*/
49         for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
50                 uint16_t index = i << 3;
51                 uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
52                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
53                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
54                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
55                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
56                 IXGBE_WAIT_RWRITE;
57         }
58
59         /* clear Rx SPI and Rx/Tx SA tables*/
60         for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
61                 uint32_t index = i << 3;
62                 uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
63                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
64                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
65                 IXGBE_WAIT_RWRITE;
66                 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
67                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
68                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
69                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
70                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
71                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
72                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
73                 IXGBE_WAIT_RWRITE;
74                 reg_val = IPSRXIDX_WRITE | index;
75                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
76                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
77                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
78                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
79                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
80                 IXGBE_WAIT_TWRITE;
81         }
82
83         memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
84         memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
85         memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
86 }
87
88 static int
89 ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
90 {
91         struct rte_eth_dev *dev = ic_session->dev;
92         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
93         struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
94                         dev->data->dev_private);
95         uint32_t reg_val;
96         int sa_index = -1;
97
98         if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
99                 int i, ip_index = -1;
100
101                 /* Find a match in the IP table*/
102                 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
103                         if (CMP_IP(priv->rx_ip_tbl[i].ip,
104                                    ic_session->dst_ip)) {
105                                 ip_index = i;
106                                 break;
107                         }
108                 }
109                 /* If no match, find a free entry in the IP table*/
110                 if (ip_index < 0) {
111                         for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
112                                 if (priv->rx_ip_tbl[i].ref_count == 0) {
113                                         ip_index = i;
114                                         break;
115                                 }
116                         }
117                 }
118
119                 /* Fail if no match and no free entries*/
120                 if (ip_index < 0) {
121                         PMD_DRV_LOG(ERR,
122                                     "No free entry left in the Rx IP table\n");
123                         return -1;
124                 }
125
126                 /* Find a free entry in the SA table*/
127                 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
128                         if (priv->rx_sa_tbl[i].used == 0) {
129                                 sa_index = i;
130                                 break;
131                         }
132                 }
133                 /* Fail if no free entries*/
134                 if (sa_index < 0) {
135                         PMD_DRV_LOG(ERR,
136                                     "No free entry left in the Rx SA table\n");
137                         return -1;
138                 }
139
140                 priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
141                                 ic_session->dst_ip.ipv6[0];
142                 priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
143                                 ic_session->dst_ip.ipv6[1];
144                 priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
145                                 ic_session->dst_ip.ipv6[2];
146                 priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
147                                 ic_session->dst_ip.ipv6[3];
148                 priv->rx_ip_tbl[ip_index].ref_count++;
149
150                 priv->rx_sa_tbl[sa_index].spi =
151                         rte_cpu_to_be_32(ic_session->spi);
152                 priv->rx_sa_tbl[sa_index].ip_index = ip_index;
153                 priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
154                 if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
155                         priv->rx_sa_tbl[sa_index].mode |=
156                                         (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
157                 if (ic_session->dst_ip.type == IPv6) {
158                         priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
159                         priv->rx_ip_tbl[ip_index].ip.type = IPv6;
160                 } else if (ic_session->dst_ip.type == IPv4)
161                         priv->rx_ip_tbl[ip_index].ip.type = IPv4;
162
163                 priv->rx_sa_tbl[sa_index].used = 1;
164
165                 /* write IP table entry*/
166                 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
167                                 IPSRXIDX_TABLE_IP | (ip_index << 3);
168                 if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
169                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
170                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
171                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
172                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
173                                         priv->rx_ip_tbl[ip_index].ip.ipv4);
174                 } else {
175                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
176                                         priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
177                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
178                                         priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
179                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
180                                         priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
181                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
182                                         priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
183                 }
184                 IXGBE_WAIT_RWRITE;
185
186                 /* write SPI table entry*/
187                 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
188                                 IPSRXIDX_TABLE_SPI | (sa_index << 3);
189                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
190                                 priv->rx_sa_tbl[sa_index].spi);
191                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
192                                 priv->rx_sa_tbl[sa_index].ip_index);
193                 IXGBE_WAIT_RWRITE;
194
195                 /* write Key table entry*/
196                 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
197                                 IPSRXIDX_TABLE_KEY | (sa_index << 3);
198                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
199                         rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]));
200                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
201                         rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]));
202                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
203                         rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]));
204                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
205                         rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]));
206                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
207                                 rte_cpu_to_be_32(ic_session->salt));
208                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
209                                 priv->rx_sa_tbl[sa_index].mode);
210                 IXGBE_WAIT_RWRITE;
211
212         } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
213                 int i;
214
215                 /* Find a free entry in the SA table*/
216                 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
217                         if (priv->tx_sa_tbl[i].used == 0) {
218                                 sa_index = i;
219                                 break;
220                         }
221                 }
222                 /* Fail if no free entries*/
223                 if (sa_index < 0) {
224                         PMD_DRV_LOG(ERR,
225                                     "No free entry left in the Tx SA table\n");
226                         return -1;
227                 }
228
229                 priv->tx_sa_tbl[sa_index].spi =
230                         rte_cpu_to_be_32(ic_session->spi);
231                 priv->tx_sa_tbl[i].used = 1;
232                 ic_session->sa_index = sa_index;
233
234                 /* write Key table entry*/
235                 reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
236                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
237                         rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]));
238                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
239                         rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]));
240                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
241                         rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]));
242                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
243                         rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]));
244                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
245                                 rte_cpu_to_be_32(ic_session->salt));
246                 IXGBE_WAIT_TWRITE;
247         }
248
249         return 0;
250 }
251
252 static int
253 ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
254                        struct ixgbe_crypto_session *ic_session)
255 {
256         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
257         struct ixgbe_ipsec *priv =
258                         IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
259         uint32_t reg_val;
260         int sa_index = -1;
261
262         if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
263                 int i, ip_index = -1;
264
265                 /* Find a match in the IP table*/
266                 for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
267                         if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
268                                 ip_index = i;
269                                 break;
270                         }
271                 }
272
273                 /* Fail if no match*/
274                 if (ip_index < 0) {
275                         PMD_DRV_LOG(ERR,
276                                     "Entry not found in the Rx IP table\n");
277                         return -1;
278                 }
279
280                 /* Find a free entry in the SA table*/
281                 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
282                         if (priv->rx_sa_tbl[i].spi ==
283                                   rte_cpu_to_be_32(ic_session->spi)) {
284                                 sa_index = i;
285                                 break;
286                         }
287                 }
288                 /* Fail if no match*/
289                 if (sa_index < 0) {
290                         PMD_DRV_LOG(ERR,
291                                     "Entry not found in the Rx SA table\n");
292                         return -1;
293                 }
294
295                 /* Disable and clear Rx SPI and key table table entryes*/
296                 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
297                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
298                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
299                 IXGBE_WAIT_RWRITE;
300                 reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
301                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
302                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
303                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
304                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
305                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
306                 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
307                 IXGBE_WAIT_RWRITE;
308                 priv->rx_sa_tbl[sa_index].used = 0;
309
310                 /* If last used then clear the IP table entry*/
311                 priv->rx_ip_tbl[ip_index].ref_count--;
312                 if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
313                         reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
314                                         (ip_index << 3);
315                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
316                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
317                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
318                         IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
319                 }
320         } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
321                 int i;
322
323                 /* Find a match in the SA table*/
324                 for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
325                         if (priv->tx_sa_tbl[i].spi ==
326                                     rte_cpu_to_be_32(ic_session->spi)) {
327                                 sa_index = i;
328                                 break;
329                         }
330                 }
331                 /* Fail if no match entries*/
332                 if (sa_index < 0) {
333                         PMD_DRV_LOG(ERR,
334                                     "Entry not found in the Tx SA table\n");
335                         return -1;
336                 }
337                 reg_val = IPSRXIDX_WRITE | (sa_index << 3);
338                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
339                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
340                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
341                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
342                 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
343                 IXGBE_WAIT_TWRITE;
344
345                 priv->tx_sa_tbl[sa_index].used = 0;
346         }
347
348         return 0;
349 }
350
351 static int
352 ixgbe_crypto_create_session(void *device,
353                 struct rte_security_session_conf *conf,
354                 struct rte_security_session *session,
355                 struct rte_mempool *mempool)
356 {
357         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
358         struct ixgbe_crypto_session *ic_session = NULL;
359         struct rte_crypto_aead_xform *aead_xform;
360         struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
361
362         if (rte_mempool_get(mempool, (void **)&ic_session)) {
363                 PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
364                 return -ENOMEM;
365         }
366
367         if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
368                         conf->crypto_xform->aead.algo !=
369                                         RTE_CRYPTO_AEAD_AES_GCM) {
370                 PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
371                 rte_mempool_put(mempool, (void *)ic_session);
372                 return -ENOTSUP;
373         }
374         aead_xform = &conf->crypto_xform->aead;
375
376         if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
377                 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
378                         ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
379                 } else {
380                         PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
381                         rte_mempool_put(mempool, (void *)ic_session);
382                         return -ENOTSUP;
383                 }
384         } else {
385                 if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
386                         ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
387                 } else {
388                         PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
389                         rte_mempool_put(mempool, (void *)ic_session);
390                         return -ENOTSUP;
391                 }
392         }
393
394         ic_session->key = aead_xform->key.data;
395         memcpy(&ic_session->salt,
396                &aead_xform->key.data[aead_xform->key.length], 4);
397         ic_session->spi = conf->ipsec.spi;
398         ic_session->dev = eth_dev;
399
400         set_sec_session_private_data(session, ic_session);
401
402         if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
403                 if (ixgbe_crypto_add_sa(ic_session)) {
404                         PMD_DRV_LOG(ERR, "Failed to add SA\n");
405                         rte_mempool_put(mempool, (void *)ic_session);
406                         return -EPERM;
407                 }
408         }
409
410         return 0;
411 }
412
413 static unsigned int
414 ixgbe_crypto_session_get_size(__rte_unused void *device)
415 {
416         return sizeof(struct ixgbe_crypto_session);
417 }
418
419 static int
420 ixgbe_crypto_remove_session(void *device,
421                 struct rte_security_session *session)
422 {
423         struct rte_eth_dev *eth_dev = device;
424         struct ixgbe_crypto_session *ic_session =
425                 (struct ixgbe_crypto_session *)
426                 get_sec_session_private_data(session);
427         struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
428
429         if (eth_dev != ic_session->dev) {
430                 PMD_DRV_LOG(ERR, "Session not bound to this device\n");
431                 return -ENODEV;
432         }
433
434         if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
435                 PMD_DRV_LOG(ERR, "Failed to remove session\n");
436                 return -EFAULT;
437         }
438
439         rte_mempool_put(mempool, (void *)ic_session);
440
441         return 0;
442 }
443
444 static inline uint8_t
445 ixgbe_crypto_compute_pad_len(struct rte_mbuf *m)
446 {
447         if (m->nb_segs == 1) {
448                 /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
449                  * payload padding size is stored at <pkt_len - 18>
450                  */
451                 uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
452                                         rte_pktmbuf_pkt_len(m) -
453                                         (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
454                 return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
455         }
456         return 0;
457 }
458
459 static int
460 ixgbe_crypto_update_mb(void *device __rte_unused,
461                 struct rte_security_session *session,
462                        struct rte_mbuf *m, void *params __rte_unused)
463 {
464         struct ixgbe_crypto_session *ic_session =
465                         get_sec_session_private_data(session);
466         if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
467                 union ixgbe_crypto_tx_desc_md *mdata =
468                         (union ixgbe_crypto_tx_desc_md *)&m->udata64;
469                 mdata->enc = 1;
470                 mdata->sa_idx = ic_session->sa_index;
471                 mdata->pad_len = ixgbe_crypto_compute_pad_len(m);
472         }
473         return 0;
474 }
475
476
477 static const struct rte_security_capability *
478 ixgbe_crypto_capabilities_get(void *device __rte_unused)
479 {
480         static const struct rte_cryptodev_capabilities
481         aes_gcm_gmac_crypto_capabilities[] = {
482                 {       /* AES GMAC (128-bit) */
483                         .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
484                         {.sym = {
485                                 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
486                                 {.auth = {
487                                         .algo = RTE_CRYPTO_AUTH_AES_GMAC,
488                                         .block_size = 16,
489                                         .key_size = {
490                                                 .min = 16,
491                                                 .max = 16,
492                                                 .increment = 0
493                                         },
494                                         .digest_size = {
495                                                 .min = 16,
496                                                 .max = 16,
497                                                 .increment = 0
498                                         },
499                                         .iv_size = {
500                                                 .min = 12,
501                                                 .max = 12,
502                                                 .increment = 0
503                                         }
504                                 }, }
505                         }, }
506                 },
507                 {       /* AES GCM (128-bit) */
508                         .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
509                         {.sym = {
510                                 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
511                                 {.aead = {
512                                         .algo = RTE_CRYPTO_AEAD_AES_GCM,
513                                         .block_size = 16,
514                                         .key_size = {
515                                                 .min = 16,
516                                                 .max = 16,
517                                                 .increment = 0
518                                         },
519                                         .digest_size = {
520                                                 .min = 16,
521                                                 .max = 16,
522                                                 .increment = 0
523                                         },
524                                         .aad_size = {
525                                                 .min = 0,
526                                                 .max = 65535,
527                                                 .increment = 1
528                                         },
529                                         .iv_size = {
530                                                 .min = 12,
531                                                 .max = 12,
532                                                 .increment = 0
533                                         }
534                                 }, }
535                         }, }
536                 },
537                 {
538                         .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
539                         {.sym = {
540                                 .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
541                         }, }
542                 },
543         };
544
545         static const struct rte_security_capability
546         ixgbe_security_capabilities[] = {
547                 { /* IPsec Inline Crypto ESP Transport Egress */
548                         .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
549                         .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
550                         {.ipsec = {
551                                 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
552                                 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
553                                 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
554                                 .options = { 0 }
555                         } },
556                         .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
557                         .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
558                 },
559                 { /* IPsec Inline Crypto ESP Transport Ingress */
560                         .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
561                         .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
562                         {.ipsec = {
563                                 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
564                                 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
565                                 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
566                                 .options = { 0 }
567                         } },
568                         .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
569                         .ol_flags = 0
570                 },
571                 { /* IPsec Inline Crypto ESP Tunnel Egress */
572                         .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
573                         .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
574                         {.ipsec = {
575                                 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
576                                 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
577                                 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
578                                 .options = { 0 }
579                         } },
580                         .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
581                         .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
582                 },
583                 { /* IPsec Inline Crypto ESP Tunnel Ingress */
584                         .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
585                         .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
586                         {.ipsec = {
587                                 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
588                                 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
589                                 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
590                                 .options = { 0 }
591                         } },
592                         .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
593                         .ol_flags = 0
594                 },
595                 {
596                         .action = RTE_SECURITY_ACTION_TYPE_NONE
597                 }
598         };
599
600         return ixgbe_security_capabilities;
601 }
602
603
604 int
605 ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
606 {
607         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608         uint32_t reg;
609         uint64_t rx_offloads;
610         uint64_t tx_offloads;
611
612         rx_offloads = dev->data->dev_conf.rxmode.offloads;
613         tx_offloads = dev->data->dev_conf.txmode.offloads;
614
615         /* sanity checks */
616         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
617                 PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
618                 return -1;
619         }
620         if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
621                 PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
622                 return -1;
623         }
624
625
626         /* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
627         IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
628
629         /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
630          * hang will occur with heavy traffic.
631          */
632         reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
633         reg = (reg & 0xFFFFFFF0) | 0x3;
634         IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
635
636         reg  = IXGBE_READ_REG(hw, IXGBE_HLREG0);
637         reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
638         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
639
640         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
641                 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
642                 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
643                 if (reg != 0) {
644                         PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
645                         return -1;
646                 }
647         }
648         if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
649                 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
650                                 IXGBE_SECTXCTRL_STORE_FORWARD);
651                 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
652                 if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
653                         PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
654                         return -1;
655                 }
656         }
657
658         ixgbe_crypto_clear_ipsec_tables(dev);
659
660         return 0;
661 }
662
663 int
664 ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
665                                       const void *ip_spec,
666                                       uint8_t is_ipv6)
667 {
668         struct ixgbe_crypto_session *ic_session
669                 = get_sec_session_private_data(sess);
670
671         if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
672                 if (is_ipv6) {
673                         const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
674                         ic_session->src_ip.type = IPv6;
675                         ic_session->dst_ip.type = IPv6;
676                         rte_memcpy(ic_session->src_ip.ipv6,
677                                    ipv6->hdr.src_addr, 16);
678                         rte_memcpy(ic_session->dst_ip.ipv6,
679                                    ipv6->hdr.dst_addr, 16);
680                 } else {
681                         const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
682                         ic_session->src_ip.type = IPv4;
683                         ic_session->dst_ip.type = IPv4;
684                         ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
685                         ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
686                 }
687                 return ixgbe_crypto_add_sa(ic_session);
688         }
689
690         return 0;
691 }
692
693 static struct rte_security_ops ixgbe_security_ops = {
694         .session_create = ixgbe_crypto_create_session,
695         .session_update = NULL,
696         .session_get_size = ixgbe_crypto_session_get_size,
697         .session_stats_get = NULL,
698         .session_destroy = ixgbe_crypto_remove_session,
699         .set_pkt_metadata = ixgbe_crypto_update_mb,
700         .capabilities_get = ixgbe_crypto_capabilities_get
701 };
702
703 static int
704 ixgbe_crypto_capable(struct rte_eth_dev *dev)
705 {
706         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
707         uint32_t reg_i, reg, capable = 1;
708         /* test if rx crypto can be enabled and then write back initial value*/
709         reg_i = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
710         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
711         reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
712         if (reg != 0)
713                 capable = 0;
714         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg_i);
715         return capable;
716 }
717
718 int
719 ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
720 {
721         struct rte_security_ctx *ctx = NULL;
722
723         if (ixgbe_crypto_capable(dev)) {
724                 ctx = rte_malloc("rte_security_instances_ops",
725                                  sizeof(struct rte_security_ctx), 0);
726                 if (ctx) {
727                         ctx->device = (void *)dev;
728                         ctx->ops = &ixgbe_security_ops;
729                         ctx->sess_cnt = 0;
730                         dev->security_ctx = ctx;
731                 } else {
732                         return -ENOMEM;
733                 }
734         }
735         return 0;
736 }