Add device arguments to lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
Example:
-w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
-w 0002:0e:00.0,tim_ring_ctl=[2-1023-1-0]
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+
+ -w 0002:0e:00.0,npa_lock_mask=0xf
+
Debugging Options
~~~~~~~~~~~~~~~~~
provide ``max_pools`` parameter to the first PCIe device probed by the given
application.
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+
+ -w 0002:02:00.0,npa_lock_mask=0xf
+
Debugging Options
~~~~~~~~~~~~~~~~~
Setting this flag to 1 to select the legacy mode.
For example to select the legacy mode(RSS tag adder as XOR)::
+
-w 0002:02:00.0,tag_as_xor=1
- ``Max SPI for inbound inline IPsec`` (default ``1``)
``ipsec_in_max_spi`` ``devargs`` parameter.
For example::
+
-w 0002:02:00.0,ipsec_in_max_spi=128
With the above configuration, application can enable inline IPsec processing
parameters to all the PCIe devices if application requires to configure on
all the ethdev ports.
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+
+ -w 0002:02:00.0,npa_lock_mask=0xf
+
.. _otx2_tmapi:
Traffic Management API
SRCS-y += otx2_sec_idev.c
LDLIBS += -lrte_eal
-LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_ethdev -lrte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
endif
endforeach
-deps = ['eal', 'pci', 'ethdev']
+deps = ['eal', 'pci', 'ethdev', 'kvargs']
includes += include_directories('../../common/octeontx2',
'../../mempool/octeontx2', '../../bus/pci')
return cnt ? 0 : -EINVAL;
}
+static int
+parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint64_t val;
+
+ val = strtoull(value, NULL, 16);
+
+ *(uint64_t *)extra_args = val;
+
+ return 0;
+}
+
+/*
+ * @internal
+ * Parse common device arguments
+ */
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
+{
+
+ struct otx2_idev_cfg *idev;
+ uint64_t npa_lock_mask = 0;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev == NULL)
+ return;
+
+ rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
+ &parse_npa_lock_mask, &npa_lock_mask);
+
+ idev->npa_lock_mask = npa_lock_mask;
+}
+
/**
* @internal
*/
#include <rte_atomic.h>
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_kvargs.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_io.h>
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif
+#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
+
/* Intra device related functions */
struct otx2_npa_lf;
struct otx2_idev_cfg {
rte_atomic16_t npa_refcnt;
uint16_t npa_refcnt_u16;
};
+ uint64_t npa_lock_mask;
};
__rte_internal
int otx2_npa_lf_active(void *dev);
__rte_internal
int otx2_npa_lf_obj_ref(void);
+__rte_internal
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist);
/* Log */
extern int otx2_logtype_base;
otx2_npa_lf_obj_ref;
otx2_npa_pf_func_get;
otx2_npa_set_defaults;
+ otx2_parse_common_devargs;
otx2_register_irq;
otx2_sec_idev_cfg_init;
otx2_sec_idev_tx_cpt_qp_add;
&single_ws);
rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
dev);
-
+ otx2_parse_common_devargs(kvlist);
dev->dual_ws = !single_ws;
rte_kvargs_free(kvlist);
}
RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
OTX2_SSO_SINGLE_WS "=1"
OTX2_SSO_GGRP_QOS "=<string>"
- OTX2_SSO_SELFTEST "=1");
+ OTX2_SSO_SELFTEST "=1"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");
goto exit;
rte_kvargs_process(kvlist, OTX2_MAX_POOLS, &parse_max_pools, &aura_sz);
+ otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist);
exit:
return aura_sz;
RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map);
RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(mempool_octeontx2,
- OTX2_MAX_POOLS "=<128-1048576>");
+ OTX2_MAX_POOLS "=<128-1048576>"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");
struct npa_aq_enq_req *aura_init_req, *pool_init_req;
struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct otx2_idev_cfg *idev;
int rc, off;
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -ENOMEM;
+
aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
aura_init_req->aura_id = aura_id;
return 0;
else
return NPA_LF_ERR_AURA_POOL_INIT;
+
+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
+ return 0;
+
+ aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_LOCK;
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!pool_init_req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to LOCK AURA context");
+ return -ENOMEM;
+ }
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!pool_init_req) {
+ otx2_err("Failed to LOCK POOL context");
+ return -ENOMEM;
+ }
+ }
+ pool_init_req->aura_id = aura_id;
+ pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_init_req->op = NPA_AQ_INSTOP_LOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to lock POOL ctx to NDC");
+ return -ENOMEM;
+ }
+
+ return 0;
}
static int
struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
struct otx2_mbox_dev *mdev = &mbox->dev[0];
struct ndc_sync_op *ndc_req;
+ struct otx2_idev_cfg *idev;
int rc, off;
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -EINVAL;
+
/* Procedure for disabling an aura/pool */
rte_delay_us(10);
npa_lf_aura_op_alloc(aura_handle, 0);
otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
return NPA_LF_ERR_AURA_POOL_FINI;
}
+
+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
+ return 0;
+
+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to unlock AURA ctx to NDC");
+ return -EINVAL;
+ }
+
+ pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ pool_req->aura_id = aura_id;
+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to unlock POOL ctx to NDC");
+ return -EINVAL;
+ }
+
return 0;
}
&parse_switch_header_type, &switch_header_type);
rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR,
&parse_flag, &rss_tag_as_xor);
+ otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist);
null_devargs:
OTX2_FLOW_PREALLOC_SIZE "=<1-32>"
OTX2_FLOW_MAX_PRIORITY "=<1-32>"
OTX2_SWITCH_HEADER_TYPE "=<higig2|dsa|chlen90b>"
- OTX2_RSS_TAG_AS_XOR "=1");
+ OTX2_RSS_TAG_AS_XOR "=1"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");