From 9d127f44048508612a47d700c95fc979b57b7d74 Mon Sep 17 00:00:00 2001 From: Vamsi Attunuru Date: Fri, 25 Feb 2022 12:24:45 +0530 Subject: [PATCH] net/cnxk: make inline inbound device usage as default Currently inline inbound device usage is not default for eventdev, patch renames force_inl_dev dev arg to no_inl_dev and enables inline inbound device by default. Signed-off-by: Vamsi Attunuru Acked-by: Jerin Jacob --- doc/guides/nics/cnxk.rst | 10 +++++----- drivers/event/cnxk/cnxk_eventdev_adptr.c | 4 ++-- drivers/net/cnxk/cn9k_ethdev.c | 1 + drivers/net/cnxk/cnxk_ethdev.h | 4 ++-- drivers/net/cnxk/cnxk_ethdev_devargs.c | 11 +++++------ 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst index be51ca2146..31c801fa04 100644 --- a/doc/guides/nics/cnxk.rst +++ b/doc/guides/nics/cnxk.rst @@ -275,7 +275,7 @@ Runtime Config Options With the above configuration, two CPT LF's are setup and distributed among all the Tx queues for outbound processing. -- ``Force using inline ipsec device for inbound`` (default ``0``) +- ``Disable using inline ipsec device for inbound`` (default ``0``) In CN10K, in event mode, driver can work in two modes, @@ -285,13 +285,13 @@ Runtime Config Options 2. Both Inbound encrypted traffic and plain traffic post decryption are received by ethdev. - By default event mode works without using inline device i.e mode ``2``. - This behaviour can be changed to pick mode ``1`` by using - ``force_inb_inl_dev`` ``devargs`` parameter. + By default event mode works using inline device i.e mode ``1``. + This behaviour can be changed to pick mode ``2`` by using + ``no_inl_dev`` ``devargs`` parameter. For example:: - -a 0002:02:00.0,force_inb_inl_dev=1 -a 0002:03:00.0,force_inb_inl_dev=1 + -a 0002:02:00.0,no_inl_dev=1 -a 0002:03:00.0,no_inl_dev=1 With the above configuration, inbound encrypted traffic from both the ports is received by ipsec inline device. diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c index 7b580ca98f..3b63d78d97 100644 --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c @@ -263,9 +263,9 @@ cnxk_sso_rx_adapter_queue_add( /* Switch to use PF/VF's NIX LF instead of inline device for inbound * when all the RQ's are switched to event dev mode. We do this only - * when using inline device is not forced by dev args. + * when dev arg no_inl_dev=1 is selected. */ - if (!cnxk_eth_dev->inb.force_inl_dev && + if (cnxk_eth_dev->inb.no_inl_dev && cnxk_eth_dev->nb_rxq_sso == cnxk_eth_dev->nb_rxq) cnxk_nix_inb_mode_set(cnxk_eth_dev, false); diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c index ca17cbec12..eda33dc8c5 100644 --- a/drivers/net/cnxk/cn9k_ethdev.c +++ b/drivers/net/cnxk/cn9k_ethdev.c @@ -731,6 +731,7 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) } dev->hwcap = 0; + dev->inb.no_inl_dev = 1; /* Register up msg callbacks for PTP information */ roc_nix_ptp_info_cb_register(&dev->nix, cn9k_nix_ptp_info_update_cb); diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h index a4d3b2f0a0..ccdf496860 100644 --- a/drivers/net/cnxk/cnxk_ethdev.h +++ b/drivers/net/cnxk/cnxk_ethdev.h @@ -282,8 +282,8 @@ struct cnxk_eth_dev_sec_inb { /* Using inbound with inline device */ bool inl_dev; - /* Device argument to force inline device for inb */ - bool force_inl_dev; + /* Device argument to disable inline device usage for inb */ + bool no_inl_dev; /* Active sessions */ uint16_t nb_sess; diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c index 8a71644899..9b2beb6743 100644 --- a/drivers/net/cnxk/cnxk_ethdev_devargs.c +++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c @@ -241,7 +241,7 @@ parse_sdp_channel_mask(const char *key, const char *value, void *extra_args) #define CNXK_IPSEC_IN_MAX_SPI "ipsec_in_max_spi" #define CNXK_IPSEC_OUT_MAX_SA "ipsec_out_max_sa" #define CNXK_OUTB_NB_DESC "outb_nb_desc" -#define CNXK_FORCE_INB_INL_DEV "force_inb_inl_dev" +#define CNXK_NO_INL_DEV "no_inl_dev" #define CNXK_OUTB_NB_CRYPTO_QS "outb_nb_crypto_qs" #define CNXK_SDP_CHANNEL_MASK "sdp_channel_mask" #define CNXK_FLOW_PRE_L2_INFO "flow_pre_l2_info" @@ -257,7 +257,6 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) uint16_t flow_prealloc_size = 1; uint16_t switch_header_type = 0; uint16_t flow_max_priority = 3; - uint16_t force_inb_inl_dev = 0; uint16_t outb_nb_crypto_qs = 1; uint32_t ipsec_in_min_spi = 0; uint16_t outb_nb_desc = 8200; @@ -266,6 +265,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) uint16_t scalar_enable = 0; uint8_t lock_rx_ctx = 0; struct rte_kvargs *kvlist; + uint16_t no_inl_dev = 0; memset(&sdp_chan, 0, sizeof(sdp_chan)); memset(&pre_l2_info, 0, sizeof(struct flow_pre_l2_size_info)); @@ -302,8 +302,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) &outb_nb_desc); rte_kvargs_process(kvlist, CNXK_OUTB_NB_CRYPTO_QS, &parse_outb_nb_crypto_qs, &outb_nb_crypto_qs); - rte_kvargs_process(kvlist, CNXK_FORCE_INB_INL_DEV, &parse_flag, - &force_inb_inl_dev); + rte_kvargs_process(kvlist, CNXK_NO_INL_DEV, &parse_flag, &no_inl_dev); rte_kvargs_process(kvlist, CNXK_SDP_CHANNEL_MASK, &parse_sdp_channel_mask, &sdp_chan); rte_kvargs_process(kvlist, CNXK_FLOW_PRE_L2_INFO, @@ -312,7 +311,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) null_devargs: dev->scalar_ena = !!scalar_enable; - dev->inb.force_inl_dev = !!force_inb_inl_dev; + dev->inb.no_inl_dev = !!no_inl_dev; dev->inb.max_spi = ipsec_in_max_spi; dev->outb.max_sa = ipsec_out_max_sa; dev->outb.nb_desc = outb_nb_desc; @@ -350,5 +349,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk, CNXK_OUTB_NB_DESC "=<1-65535>" CNXK_FLOW_PRE_L2_INFO "=<0-255>/<1-255>/<0-1>" CNXK_OUTB_NB_CRYPTO_QS "=<1-64>" - CNXK_FORCE_INB_INL_DEV "=1" + CNXK_NO_INL_DEV "=0" CNXK_SDP_CHANNEL_MASK "=<1-4095>/<1-4095>"); -- 2.39.5