#include "roc_api.h"
#include "roc_priv.h"
+uint32_t soft_exp_consumer_cnt;
+
PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
static int
nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
{
- uint16_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
+ uint32_t ipsec_in_min_spi = roc_nix->ipsec_in_min_spi;
+ uint32_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct roc_nix_ipsec_cfg cfg;
+ uint64_t max_sa, i;
size_t inb_sa_sz;
- int rc, i;
void *sa;
+ int rc;
+
+ max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
/* CN9K SA size is different */
if (roc_model_is_cn9k())
/* Alloc contiguous memory for Inbound SA's */
nix->inb_sa_sz = inb_sa_sz;
- nix->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
+ nix->inb_spi_mask = max_sa - 1;
+ nix->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
ROC_NIX_INL_SA_BASE_ALIGN);
if (!nix->inb_sa_base) {
plt_err("Failed to allocate memory for Inbound SA");
return -ENOMEM;
}
if (roc_model_is_cn10k()) {
- for (i = 0; i < ipsec_in_max_spi; i++) {
+ for (i = 0; i < max_sa; i++) {
sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
roc_ot_ipsec_inb_sa_init(sa, true);
}
memset(&cfg, 0, sizeof(cfg));
cfg.sa_size = inb_sa_sz;
cfg.iova = (uintptr_t)nix->inb_sa_base;
- cfg.max_sa = ipsec_in_max_spi + 1;
+ cfg.max_sa = max_sa;
cfg.tt = SSO_TT_ORDERED;
/* Setup device specific inb SA table */
}
uint32_t
-roc_nix_inl_inb_sa_max_spi(struct roc_nix *roc_nix, bool inb_inl_dev)
+roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix, bool inb_inl_dev,
+ uint32_t *min_spi, uint32_t *max_spi)
{
struct idev_cfg *idev = idev_get_cfg();
+ uint32_t min = 0, max = 0, mask = 0;
struct nix_inl_dev *inl_dev;
- struct nix *nix;
+ struct nix *nix = NULL;
if (idev == NULL)
return 0;
if (!inb_inl_dev && roc_nix == NULL)
return -EINVAL;
- if (roc_nix) {
+ inl_dev = idev->nix_inl_dev;
+ if (inb_inl_dev) {
+ if (inl_dev == NULL)
+ goto exit;
+ min = inl_dev->ipsec_in_min_spi;
+ max = inl_dev->ipsec_in_max_spi;
+ mask = inl_dev->inb_spi_mask;
+ } else {
nix = roc_nix_to_nix_priv(roc_nix);
if (!nix->inl_inb_ena)
- return 0;
+ goto exit;
+ min = roc_nix->ipsec_in_min_spi;
+ max = roc_nix->ipsec_in_max_spi;
+ mask = nix->inb_spi_mask;
}
-
- if (inb_inl_dev) {
- inl_dev = idev->nix_inl_dev;
- if (inl_dev)
- return inl_dev->ipsec_in_max_spi;
- return 0;
- }
-
- return roc_nix->ipsec_in_max_spi;
+exit:
+ if (min_spi)
+ *min_spi = min;
+ if (max_spi)
+ *max_spi = max;
+ return mask;
}
uint32_t
uintptr_t
roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix, bool inb_inl_dev, uint32_t spi)
{
+ uint32_t max_spi, min_spi, mask;
uintptr_t sa_base;
- uint32_t max_spi;
uint64_t sz;
sa_base = roc_nix_inl_inb_sa_base_get(roc_nix, inb_inl_dev);
if (!sa_base)
return 0;
- /* Check if SPI is in range */
- max_spi = roc_nix_inl_inb_sa_max_spi(roc_nix, inb_inl_dev);
- if (spi > max_spi) {
- plt_err("Inbound SA SPI %u exceeds max %u", spi, max_spi);
- return 0;
- }
-
/* Get SA size */
sz = roc_nix_inl_inb_sa_sz(roc_nix, inb_inl_dev);
if (!sz)
return 0;
+ if (roc_nix->custom_sa_action)
+ return (sa_base + (spi * sz));
+
+ /* Check if SPI is in range */
+ mask = roc_nix_inl_inb_spi_range(roc_nix, inb_inl_dev, &min_spi,
+ &max_spi);
+ if (spi > max_spi || spi < min_spi)
+ plt_warn("Inbound SA SPI %u not in range (%u..%u)", spi,
+ min_spi, max_spi);
+
/* Basic logic of SPI->SA for now */
- return (sa_base + (spi * sz));
+ return (sa_base + ((spi & mask) * sz));
+}
+
+int
+roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct roc_cpt *roc_cpt;
+ struct roc_cpt_rxc_time_cfg cfg;
+
+ PLT_SET_USED(max_frags);
+ roc_cpt = idev->cpt;
+ if (!roc_cpt) {
+ plt_err("Cannot support inline inbound, cryptodev not probed");
+ return -ENOTSUP;
+ }
+
+ cfg.step = (max_wait_time * 1000 / ROC_NIX_INL_REAS_ACTIVE_LIMIT);
+ cfg.zombie_limit = ROC_NIX_INL_REAS_ZOMBIE_LIMIT;
+ cfg.zombie_thres = ROC_NIX_INL_REAS_ZOMBIE_THRESHOLD;
+ cfg.active_limit = ROC_NIX_INL_REAS_ACTIVE_LIMIT;
+ cfg.active_thres = ROC_NIX_INL_REAS_ACTIVE_THRESHOLD;
+
+ return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
}
int
struct dev *dev = &nix->dev;
struct msix_offset_rsp *rsp;
struct nix_inl_dev *inl_dev;
+ size_t sa_sz, ring_sz;
uint16_t sso_pffunc;
uint8_t eng_grpmask;
- uint64_t blkaddr;
+ uint64_t blkaddr, i;
+ uint64_t *ring_base;
uint16_t nb_lf;
void *sa_base;
- size_t sa_sz;
- int i, j, rc;
+ int j, rc;
void *sa;
if (idev == NULL)
nix->nb_cpt_lf = nb_lf;
nix->outb_err_sso_pffunc = sso_pffunc;
nix->inl_outb_ena = true;
+ nix->outb_se_ring_cnt =
+ roc_nix->ipsec_out_max_sa / ROC_IPSEC_ERR_RING_MAX_ENTRY + 1;
+ nix->outb_se_ring_base =
+ roc_nix->port_id * ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
+
+ if (inl_dev == NULL) {
+ nix->outb_se_ring_cnt = 0;
+ return 0;
+ }
+
+ /* Allocate memory to be used as a ring buffer to poll for
+ * soft expiry event from ucode
+ */
+ ring_sz = (ROC_IPSEC_ERR_RING_MAX_ENTRY + 1) * sizeof(uint64_t);
+ ring_base = inl_dev->sa_soft_exp_ring;
+ for (i = 0; i < nix->outb_se_ring_cnt; i++) {
+ ring_base[nix->outb_se_ring_base + i] =
+ PLT_U64_CAST(plt_zmalloc(ring_sz, 0));
+ if (!ring_base[nix->outb_se_ring_base + i]) {
+ plt_err("Couldn't allocate memory for soft exp ring");
+ while (i--)
+ plt_free(PLT_PTR_CAST(
+ ring_base[nix->outb_se_ring_base + i]));
+ rc = -ENOMEM;
+ goto lf_fini;
+ }
+ }
+
return 0;
lf_fini:
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
+ struct idev_cfg *idev = idev_get_cfg();
struct dev *dev = &nix->dev;
+ struct nix_inl_dev *inl_dev;
+ uint64_t *ring_base;
int i, rc, ret = 0;
if (!nix->inl_outb_ena)
plt_free(nix->outb_sa_base);
nix->outb_sa_base = NULL;
+ if (idev && idev->nix_inl_dev) {
+ inl_dev = idev->nix_inl_dev;
+ ring_base = inl_dev->sa_soft_exp_ring;
+
+ for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
+ if (ring_base[i])
+ plt_free(PLT_PTR_CAST(ring_base[i]));
+ }
+ }
+
ret |= rc;
return ret;
}
nix->inb_inl_dev = use_inl_dev;
}
+int
+roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev;
+ uint16_t ring_idx, i;
+
+ if (!idev || !idev->nix_inl_dev)
+ return 0;
+
+ inl_dev = idev->nix_inl_dev;
+
+ for (i = 0; i < nix->outb_se_ring_cnt; i++) {
+ ring_idx = nix->outb_se_ring_base + i;
+
+ if (poll)
+ plt_bitmap_set(inl_dev->soft_exp_ring_bmap, ring_idx);
+ else
+ plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, ring_idx);
+ }
+
+ if (poll)
+ soft_exp_consumer_cnt++;
+ else
+ soft_exp_consumer_cnt--;
+
+ return 0;
+}
+
bool
roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
{
memset(&cfg, 0, sizeof(cfg));
cfg.sa_size = nix->inb_sa_sz;
cfg.iova = (uintptr_t)nix->inb_sa_base;
- cfg.max_sa = roc_nix->ipsec_in_max_spi + 1;
+ cfg.max_sa = nix->inb_spi_mask + 1;
cfg.tt = tt;
cfg.tag_const = tag_const;