Adding security in eth device configure.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
- IEEE1588 timestamping
- HW offloaded `ethdev Rx queue` to `eventdev event queue` packet injection
- Support Rx interrupt
+- Inline IPsec processing support
Prerequisites
-------------
For example to select the legacy mode(RSS tag adder as XOR)::
-w 0002:02:00.0,tag_as_xor=1
+- ``Max SPI for inbound inline IPsec`` (default ``1``)
+
+ Max SPI supported for inbound inline IPsec processing can be specified by
+ ``ipsec_in_max_spi`` ``devargs`` parameter.
+
+ For example::
+ -w 0002:02:00.0,ipsec_in_max_spi=128
+
+ With the above configuration, application can enable inline IPsec processing
+ on 128 SAs (SPI 0-127).
+
.. note::
Above devarg parameters are configurable per device, user needs to pass the
~~~~~~~~~~~~~~~~~~~~~
OCTEON TX2 SDP interface support is limited to PF device, No VF support.
+Inline Protocol Processing
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+``net_octeontx2`` pmd doesn't support the following features for packets to be
+inline protocol processed.
+- TSO offload
+- VLAN/QinQ offload
+- Fragmentation
+
Debugging Options
-----------------
armv8 crypto library is not used anymore. Library name is changed
from armv8_crypto to AArch64crypto.
+* **Added inline IPsec support to Marvell OCTEON TX2 PMD.**
+
+ Added inline IPsec support to Marvell OCTEON TX2 PMD. With the feature,
+ applications would be able to offload entire IPsec offload to the hardware.
+ For the configured sessions, hardware will do the lookup and perform
+ decryption and IPsec transformation. For the outbound path, application
+ can submit a plain packet to the PMD, and it would be sent out on wire
+ after doing encryption and IPsec transformation of the packet.
+
* **Added Marvell OCTEON TX2 End Point rawdev PMD.**
Added a new OCTEON TX2 rawdev PMD for End Point mode of operation.
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_IPSEC_FP_H__
+#define __OTX2_IPSEC_FP_H__
+
+struct otx2_ipsec_fp_sa_ctl {
+ rte_be32_t spi : 32;
+ uint64_t exp_proto_inter_frag : 8;
+ uint64_t rsvd_42_40 : 3;
+ uint64_t esn_en : 1;
+ uint64_t rsvd_45_44 : 2;
+ uint64_t encap_type : 2;
+ uint64_t enc_type : 3;
+ uint64_t rsvd_48 : 1;
+ uint64_t auth_type : 4;
+ uint64_t valid : 1;
+ uint64_t direction : 1;
+ uint64_t outer_ip_ver : 1;
+ uint64_t inner_ip_ver : 1;
+ uint64_t ipsec_mode : 1;
+ uint64_t ipsec_proto : 1;
+ uint64_t aes_key_len : 2;
+};
+
+struct otx2_ipsec_fp_in_sa {
+ /* w0 */
+ struct otx2_ipsec_fp_sa_ctl ctl;
+
+ /* w1 */
+ uint8_t nonce[4]; /* Only for AES-GCM */
+ uint32_t unused;
+
+ /* w2 */
+ uint32_t esn_low;
+ uint32_t esn_hi;
+
+ /* w3-w6 */
+ uint8_t cipher_key[32];
+
+ /* w7-w12 */
+ uint8_t hmac_key[48];
+
+ RTE_STD_C11
+ union {
+ void *userdata;
+ uint64_t udata64;
+ };
+
+ uint64_t reserved1;
+ uint64_t reserved2;
+};
+
+#endif /* __OTX2_IPSEC_FP_H__ */
LIB = librte_pmd_octeontx2.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/octeontx2
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2
CFLAGS += -O3
LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2 -lrte_eal -lrte_net
LDLIBS += -lrte_ethdev -lrte_bus_pci -lrte_kvargs -lrte_mbuf -lrte_mempool -lm
+LDLIBS += -lrte_cryptodev -lrte_eventdev -lrte_security
include $(RTE_SDK)/mk/rte.lib.mk
'otx2_ethdev_devargs.c'
)
-deps += ['bus_pci', 'cryptodev', 'security']
+deps += ['bus_pci', 'cryptodev', 'eventdev', 'security']
deps += ['common_octeontx2', 'mempool_octeontx2']
cflags += ['-flax-vector-conversions']
cflags += flag
endif
endforeach
+
+includes += include_directories('../../common/cpt')
+includes += include_directories('../../crypto/octeontx2')
aq->op = NIX_AQ_INSTOP_INIT;
aq->rq.sso_ena = 0;
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+ aq->rq.ipsech_ena = 1;
+
aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
aq->rq.spb_ena = 0;
aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
/* Free the resources allocated from the previous configure */
if (dev->configured == 1) {
+ otx2_eth_sec_fini(eth_dev);
otx2_nix_rxchan_bpid_cfg(eth_dev, false);
otx2_nix_vlan_fini(eth_dev);
otx2_nix_mc_addr_list_uninstall(eth_dev);
goto cq_fini;
}
+ /* Enable security */
+ rc = otx2_eth_sec_init(eth_dev);
+ if (rc)
+ goto cq_fini;
+
rc = otx2_nix_mc_addr_list_install(eth_dev);
if (rc < 0) {
otx2_err("Failed to install mc address list rc=%d", rc);
- goto cq_fini;
+ goto sec_fini;
}
/*
uninstall_mc_list:
otx2_nix_mc_addr_list_uninstall(eth_dev);
+sec_fini:
+ otx2_eth_sec_fini(eth_dev);
cq_fini:
oxt2_nix_unregister_cq_irqs(eth_dev);
q_irq_fini:
if (rc)
otx2_err("Failed to cleanup npa lf, rc=%d", rc);
+ /* Disable security */
+ otx2_eth_sec_fini(eth_dev);
+
/* Destroy security ctx */
otx2_eth_sec_ctx_destroy(eth_dev);
bool mc_tbl_set;
struct otx2_nix_mc_filter_tbl mc_fltr_tbl;
bool sdp_link; /* SDP flag */
+ /* Inline IPsec params */
+ uint16_t ipsec_in_max_spi;
} __rte_cache_aligned;
struct otx2_eth_txq {
return 0;
}
+static int
+parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint32_t val;
+
+ val = atoi(value);
+
+ *(uint16_t *)extra_args = val;
+
+ return 0;
+}
+
static int
parse_flag(const char *key, const char *value, void *extra_args)
{
}
#define OTX2_RSS_RETA_SIZE "reta_size"
+#define OTX2_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
#define OTX2_SCL_ENABLE "scalar_enable"
#define OTX2_MAX_SQB_COUNT "max_sqb_count"
#define OTX2_FLOW_PREALLOC_SIZE "flow_prealloc_size"
uint16_t flow_prealloc_size = 8;
uint16_t switch_header_type = 0;
uint16_t flow_max_priority = 3;
+ uint16_t ipsec_in_max_spi = 1;
uint16_t scalar_enable = 0;
uint16_t rss_tag_as_xor = 0;
struct rte_kvargs *kvlist;
rte_kvargs_process(kvlist, OTX2_RSS_RETA_SIZE,
&parse_reta_size, &rss_size);
+ rte_kvargs_process(kvlist, OTX2_IPSEC_IN_MAX_SPI,
+ &parse_ipsec_in_max_spi, &ipsec_in_max_spi);
rte_kvargs_process(kvlist, OTX2_SCL_ENABLE,
&parse_flag, &scalar_enable);
rte_kvargs_process(kvlist, OTX2_MAX_SQB_COUNT,
rte_kvargs_free(kvlist);
null_devargs:
+ dev->ipsec_in_max_spi = ipsec_in_max_spi;
dev->scalar_ena = scalar_enable;
dev->rss_tag_as_xor = rss_tag_as_xor;
dev->max_sqb_count = sqb_count;
RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2,
OTX2_RSS_RETA_SIZE "=<64|128|256>"
+ OTX2_IPSEC_IN_MAX_SPI "=<1-65535>"
OTX2_SCL_ENABLE "=1"
OTX2_MAX_SQB_COUNT "=<8-512>"
OTX2_FLOW_PREALLOC_SIZE "=<1-32>"
*/
#include <rte_ethdev.h>
+#include <rte_eventdev.h>
#include <rte_malloc.h>
+#include <rte_memzone.h>
#include <rte_security.h>
+#include "otx2_ethdev.h"
#include "otx2_ethdev_sec.h"
+#include "otx2_ipsec_fp.h"
+
+#define ETH_SEC_MAX_PKT_LEN 1450
+
+struct eth_sec_tag_const {
+ RTE_STD_C11
+ union {
+ struct {
+ uint32_t rsvd_11_0 : 12;
+ uint32_t port : 8;
+ uint32_t event_type : 4;
+ uint32_t rsvd_31_24 : 8;
+ };
+ uint32_t u32;
+ };
+};
+
+static inline void
+in_sa_mz_name_get(char *name, int size, uint16_t port)
+{
+ snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
+}
int
otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
{
rte_free(eth_dev->security_ctx);
}
+
+static int
+eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ struct nix_inline_ipsec_lf_cfg *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct eth_sec_tag_const tag_const;
+ char name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL)
+ return -EINVAL;
+
+ req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+ req->enable = 1;
+ req->sa_base_addr = mz->iova;
+
+ req->ipsec_cfg0.tt = tt;
+
+ tag_const.u32 = 0;
+ tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
+ tag_const.port = port;
+ req->ipsec_cfg0.tag_const = tag_const.u32;
+
+ req->ipsec_cfg0.sa_pow2_size =
+ rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
+ req->ipsec_cfg0.lenm1_max = ETH_SEC_MAX_PKT_LEN - 1;
+
+ req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
+ req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
+{
+ const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ char name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ int mz_sz, ret;
+ uint16_t nb_sa;
+
+ RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
+ !RTE_IS_POWER_OF_2(sa_width));
+
+ if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
+ !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ return 0;
+
+ nb_sa = dev->ipsec_in_max_spi;
+ mz_sz = nb_sa * sa_width;
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
+ RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
+
+ if (mz == NULL) {
+ otx2_err("Could not allocate inbound SA DB");
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz_sz);
+
+ ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
+ if (ret < 0) {
+ otx2_err("Could not configure inline IPsec");
+ goto sec_fini;
+ }
+
+ return 0;
+
+sec_fini:
+ otx2_err("Could not configure device for security");
+ otx2_eth_sec_fini(eth_dev);
+ return ret;
+}
+
+void
+otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ char name[RTE_MEMZONE_NAMESIZE];
+
+ if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
+ !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ return;
+
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ rte_memzone_free(rte_memzone_lookup(name));
+}
void otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev);
+int otx2_eth_sec_init(struct rte_eth_dev *eth_dev);
+
+void otx2_eth_sec_fini(struct rte_eth_dev *eth_dev);
+
#endif /* __OTX2_ETHDEV_SEC_H__ */