Synchronize inline session create and destroy using spinlock.
Also move security related error prints outside the spinlock.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
struct rte_crypto_sym_xform *crypto;
struct cnxk_eth_sec_sess *eth_sec;
bool inbound, inl_dev;
+ rte_spinlock_t *lock;
+ char tbuf[128] = {0};
int rc = 0;
if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
sess_priv.u64 = 0;
+ lock = inbound ? &dev->inb.lock : &dev->outb.lock;
+ rte_spinlock_lock(lock);
+
/* Acquire lock on inline dev for inbound */
if (inbound && inl_dev)
roc_nix_inl_dev_lock();
/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
sa = roc_nix_inl_inb_sa_get(&dev->nix, inl_dev, ipsec->spi);
if (!sa && dev->inb.inl_dev) {
- plt_err("Failed to create ingress sa, inline dev "
- "not found or spi not in range");
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to create ingress sa, inline dev "
+ "not found or spi not in range");
rc = -ENOTSUP;
goto mempool_put;
} else if (!sa) {
- plt_err("Failed to create ingress sa");
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to create ingress sa");
rc = -EFAULT;
goto mempool_put;
}
/* Check if SA is already in use */
if (inb_sa->w2.s.valid) {
- plt_err("Inbound SA with SPI %u already in use",
- ipsec->spi);
+ snprintf(tbuf, sizeof(tbuf),
+ "Inbound SA with SPI %u already in use",
+ ipsec->spi);
rc = -EBUSY;
goto mempool_put;
}
/* Fill inbound sa params */
rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto);
if (rc) {
- plt_err("Failed to init inbound sa, rc=%d", rc);
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to init inbound sa, rc=%d", rc);
goto mempool_put;
}
/* Fill outbound sa params */
rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
if (rc) {
- plt_err("Failed to init outbound sa, rc=%d", rc);
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to init outbound sa, rc=%d", rc);
rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
goto mempool_put;
}
}
if (inbound && inl_dev)
roc_nix_inl_dev_unlock();
+ rte_spinlock_unlock(lock);
plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
inbound ? "inbound" : "outbound", eth_sec->spi,
mempool_put:
if (inbound && inl_dev)
roc_nix_inl_dev_unlock();
+ rte_spinlock_unlock(lock);
+
rte_mempool_put(mempool, eth_sec);
+ if (rc)
+ plt_err("%s", tbuf);
return rc;
}
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct cnxk_eth_sec_sess *eth_sec;
struct rte_mempool *mp;
+ rte_spinlock_t *lock;
void *sa_dptr;
eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
if (!eth_sec)
return -ENOENT;
+ lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
+ rte_spinlock_lock(lock);
+
if (eth_sec->inl_dev)
roc_nix_inl_dev_lock();
if (eth_sec->inl_dev)
roc_nix_inl_dev_unlock();
+ rte_spinlock_unlock(lock);
+
plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
eth_sec->sa_idx, eth_sec->inl_dev);
struct cn9k_sec_sess_priv sess_priv;
struct rte_crypto_sym_xform *crypto;
struct cnxk_eth_sec_sess *eth_sec;
+ rte_spinlock_t *lock;
+ char tbuf[128] = {0};
bool inbound;
int rc = 0;
return -ENOMEM;
}
+ lock = inbound ? &dev->inb.lock : &dev->outb.lock;
+ rte_spinlock_lock(lock);
+
memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
sess_priv.u64 = 0;
* device always for CN9K.
*/
inb_sa = (struct roc_onf_ipsec_inb_sa *)
- roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
+ roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
if (!inb_sa) {
- plt_err("Failed to create ingress sa");
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to create ingress sa");
rc = -EFAULT;
goto mempool_put;
}
/* Check if SA is already in use */
if (inb_sa->ctl.valid) {
- plt_err("Inbound SA with SPI %u already in use",
- ipsec->spi);
+ snprintf(tbuf, sizeof(tbuf),
+ "Inbound SA with SPI %u already in use",
+ ipsec->spi);
rc = -EBUSY;
goto mempool_put;
}
/* Fill inbound sa params */
rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
if (rc) {
- plt_err("Failed to init inbound sa, rc=%d", rc);
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to init inbound sa, rc=%d", rc);
goto mempool_put;
}
/* Fill outbound sa params */
rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
if (rc) {
- plt_err("Failed to init outbound sa, rc=%d", rc);
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to init outbound sa, rc=%d", rc);
rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
goto mempool_put;
}
/* Sync SA content */
plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+ rte_spinlock_unlock(lock);
+
plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
inbound ? "inbound" : "outbound", eth_sec->spi,
eth_sec->sa_idx);
return 0;
mempool_put:
+ rte_spinlock_unlock(lock);
rte_mempool_put(mempool, eth_sec);
+ if (rc)
+ plt_err("%s", tbuf);
return rc;
}
struct roc_onf_ipsec_inb_sa *inb_sa;
struct cnxk_eth_sec_sess *eth_sec;
struct rte_mempool *mp;
+ rte_spinlock_t *lock;
eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
if (!eth_sec)
return -ENOENT;
+ lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
+ rte_spinlock_lock(lock);
+
if (eth_sec->inb) {
inb_sa = eth_sec->sa;
/* Disable SA */
/* Sync SA content */
plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+ rte_spinlock_unlock(lock);
+
plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
eth_sec->sa_idx);
sec_ctx->flags =
(RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
eth_dev->security_ctx = sec_ctx;
- TAILQ_INIT(&dev->inb.list);
- TAILQ_INIT(&dev->outb.list);
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
dev->configured = 0;
dev->ptype_disable = 0;
+ TAILQ_INIT(&dev->inb.list);
+ TAILQ_INIT(&dev->outb.list);
+ rte_spinlock_init(&dev->inb.lock);
+ rte_spinlock_init(&dev->outb.lock);
+
/* For vfs, returned max_entries will be 0. but to keep default mac
* address, one entry must be allocated. so setting up to 1.
*/
/* DPTR for WRITE_SA microcode op */
void *sa_dptr;
+
+ /* Lock to synchronize sa setup/release */
+ rte_spinlock_t lock;
};
/* Outbound security data */
/* DPTR for WRITE_SA microcode op */
void *sa_dptr;
+
+ /* Lock to synchronize sa setup/release */
+ rte_spinlock_t lock;
};
struct cnxk_eth_dev {