return NULL;
}
+uint64_t *
+roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev;
+
+ if (!idev || !idev->nix_inl_dev)
+ return NULL;
+
+ inl_dev = idev->nix_inl_dev;
+
+ return (uint64_t *)&inl_dev->sa_soft_exp_ring[nix->outb_se_ring_base];
+}
+
void
roc_idev_cpt_set(struct roc_cpt *cpt)
{
struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
+uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
+
#endif /* _ROC_IDEV_H_ */
(PLT_ALIGN_CEIL(ROC_AR_WIN_SIZE_MAX, BITS_PER_LONG_LONG) / \
BITS_PER_LONG_LONG)
+#define ROC_IPSEC_ERR_RING_MAX_ENTRY 65536
+
+union roc_ot_ipsec_err_ring_head {
+ uint64_t u64;
+ struct {
+ uint16_t tail_pos;
+ uint16_t tail_gen;
+ uint16_t head_pos;
+ uint16_t head_gen;
+ } s;
+};
+
+union roc_ot_ipsec_err_ring_entry {
+ uint64_t u64;
+ struct {
+ uint64_t data0 : 44;
+ uint64_t data1 : 9;
+ uint64_t rsvd : 3;
+ uint64_t comp_code : 8;
+ } s;
+};
+
/* Common bit fields between inbound and outbound SA */
union roc_ot_ipsec_sa_word2 {
struct {
uint64_t count_mib_pkts : 1;
uint64_t hw_ctx_off : 7;
- uint64_t rsvd1 : 32;
+ uint64_t ctx_id : 16;
+ uint64_t rsvd1 : 16;
uint64_t ctx_push_size : 7;
uint64_t rsvd2 : 1;
#include "roc_api.h"
#include "roc_priv.h"
+uint32_t soft_exp_consumer_cnt;
+
PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
nix->nb_cpt_lf = nb_lf;
nix->outb_err_sso_pffunc = sso_pffunc;
nix->inl_outb_ena = true;
+ nix->outb_se_ring_cnt =
+ roc_nix->ipsec_out_max_sa / ROC_IPSEC_ERR_RING_MAX_ENTRY + 1;
+ nix->outb_se_ring_base =
+ roc_nix->port_id * ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
+
+ if (inl_dev == NULL) {
+ nix->outb_se_ring_cnt = 0;
+ return 0;
+ }
+
+ /* Allocate memory to be used as a ring buffer to poll for
+ * soft expiry event from ucode
+ */
+ for (i = 0; i < nix->outb_se_ring_cnt; i++) {
+ inl_dev->sa_soft_exp_ring[nix->outb_se_ring_base + i] =
+ plt_zmalloc((ROC_IPSEC_ERR_RING_MAX_ENTRY + 1) *
+ sizeof(uint64_t),
+ 0);
+ if (!inl_dev->sa_soft_exp_ring[i]) {
+ plt_err("Couldn't allocate memory for soft exp ring");
+ while (i--)
+ plt_free(inl_dev->sa_soft_exp_ring
+ [nix->outb_se_ring_base + i]);
+ rc = -ENOMEM;
+ goto lf_fini;
+ }
+ }
+
return 0;
lf_fini:
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
+ struct idev_cfg *idev = idev_get_cfg();
struct dev *dev = &nix->dev;
+ struct nix_inl_dev *inl_dev;
int i, rc, ret = 0;
if (!nix->inl_outb_ena)
plt_free(nix->outb_sa_base);
nix->outb_sa_base = NULL;
+ if (idev && idev->nix_inl_dev) {
+ inl_dev = idev->nix_inl_dev;
+
+ for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
+ if (inl_dev->sa_soft_exp_ring[i])
+ plt_free(inl_dev->sa_soft_exp_ring[i]);
+ }
+ }
+
ret |= rc;
return ret;
}
nix->inb_inl_dev = use_inl_dev;
}
+int
+roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev;
+ uint16_t ring_idx, i;
+
+ if (!idev || !idev->nix_inl_dev)
+ return 0;
+
+ inl_dev = idev->nix_inl_dev;
+
+ for (i = 0; i < nix->outb_se_ring_cnt; i++) {
+ ring_idx = nix->outb_se_ring_base + i;
+
+ if (poll)
+ plt_bitmap_set(inl_dev->soft_exp_ring_bmap, ring_idx);
+ else
+ plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, ring_idx);
+ }
+
+ if (poll)
+ soft_exp_consumer_cnt++;
+ else
+ soft_exp_consumer_cnt--;
+
+ return 0;
+}
+
bool
roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
{
/* Alignment of SA Base */
#define ROC_NIX_INL_SA_BASE_ALIGN BIT_ULL(16)
+#define ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT 25
+
+#define ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2 16
+
+#define ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS 4
+
+#define ROC_NIX_MAX_TOTAL_OUTB_IPSEC_SA \
+ (ROC_IPSEC_ERR_RING_MAX_ENTRY * ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS)
+
+#define ROC_NIX_INL_MAX_SOFT_EXP_RNGS \
+ (PLT_MAX_ETHPORTS * ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS)
+
static inline struct roc_onf_ipsec_inb_sa *
roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx)
{
}
/* Inline device SSO Work callback */
-typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args);
+typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
+ uint32_t soft_exp_event);
struct roc_nix_inl_dev {
/* Input parameters */
bool wqe_skip;
uint8_t spb_drop_pc;
uint8_t lpb_drop_pc;
+ bool set_soft_exp_poll;
/* End of input parameters */
-#define ROC_NIX_INL_MEM_SZ (1280)
+#define ROC_NIX_INL_MEM_SZ (2304)
uint8_t reserved[ROC_NIX_INL_MEM_SZ] __plt_cache_aligned;
} __plt_cache_aligned;
int __roc_api roc_nix_inl_cb_register(roc_nix_inl_sso_work_cb_t cb, void *args);
int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
void *args);
+int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
+ bool poll);
/* NIX Inline/Outbound API */
enum roc_nix_inl_sa_sync_op {
ROC_NIX_INL_SA_OP_FLUSH,
#include "roc_api.h"
#include "roc_priv.h"
+#include <unistd.h>
+
#define NIX_AURA_DROP_PC_DFLT 40
/* Default Rx Config for Inline NIX LF */
ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 | \
ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
+extern uint32_t soft_exp_consumer_cnt;
+static bool soft_exp_poll_thread_exit = true;
+
uint16_t
nix_inl_dev_pffunc_get(void)
{
}
static void
-nix_inl_selftest_work_cb(uint64_t *gw, void *args)
+nix_inl_selftest_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
{
uintptr_t work = gw[1];
+ (void)soft_exp_event;
*((uintptr_t *)args + (gw[0] & 0x1)) = work;
plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
return rc;
}
+static void
+inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx)
+{
+ union roc_ot_ipsec_err_ring_head head;
+ struct roc_ot_ipsec_outb_sa *sa;
+ uint16_t head_l, tail_l;
+ uint64_t *ring_base;
+ uint32_t port_id;
+
+ port_id = ring_idx / ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
+ ring_base = inl_dev->sa_soft_exp_ring[ring_idx];
+ if (!ring_base) {
+ plt_err("Invalid soft exp ring base");
+ return;
+ }
+
+ head.u64 = __atomic_load_n(ring_base, __ATOMIC_ACQUIRE);
+ head_l = head.s.head_pos;
+ tail_l = head.s.tail_pos;
+
+ while (tail_l != head_l) {
+ union roc_ot_ipsec_err_ring_entry entry;
+ int poll_counter = 0;
+
+ while (poll_counter++ <
+ ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT) {
+ plt_delay_us(20);
+ entry.u64 = __atomic_load_n(ring_base + tail_l + 1,
+ __ATOMIC_ACQUIRE);
+ if (likely(entry.u64))
+ break;
+ }
+
+ entry.u64 = plt_be_to_cpu_64(entry.u64);
+ sa = (struct roc_ot_ipsec_outb_sa *)(((uint64_t)entry.s.data1
+ << 51) |
+ (entry.s.data0 << 7));
+
+ if (sa != NULL) {
+ uint64_t tmp = ~(uint32_t)0x0;
+ inl_dev->work_cb(&tmp, sa, (port_id << 8) | 0x1);
+ __atomic_store_n(ring_base + tail_l + 1, 0ULL,
+ __ATOMIC_RELAXED);
+ __atomic_add_fetch((uint32_t *)ring_base, 1,
+ __ATOMIC_ACQ_REL);
+ } else
+ plt_err("Invalid SA");
+
+ tail_l++;
+ }
+}
+
+static void *
+nix_inl_outb_poll_thread(void *args)
+{
+ struct nix_inl_dev *inl_dev = args;
+ uint32_t poll_freq;
+ uint32_t i;
+ bool bit;
+
+ poll_freq = inl_dev->soft_exp_poll_freq;
+
+ while (!soft_exp_poll_thread_exit) {
+ if (soft_exp_consumer_cnt) {
+ for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
+ bit = plt_bitmap_get(
+ inl_dev->soft_exp_ring_bmap, i);
+ if (bit)
+ inl_outb_soft_exp_poll(inl_dev, i);
+ }
+ }
+ usleep(poll_freq);
+ }
+
+ return 0;
+}
+
+static int
+nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev)
+{
+ struct plt_bitmap *bmap;
+ size_t bmap_sz;
+ uint32_t i;
+ void *mem;
+ int rc;
+
+ /* Allocate a bitmap that pool thread uses to get the port_id
+ * that's corresponding to the inl_outb_soft_exp_ring
+ */
+ bmap_sz =
+ plt_bitmap_get_memory_footprint(ROC_NIX_INL_MAX_SOFT_EXP_RNGS);
+ mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ plt_err("soft expiry ring bmap alloc failed");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ bmap = plt_bitmap_init(ROC_NIX_INL_MAX_SOFT_EXP_RNGS, mem, bmap_sz);
+ if (!bmap) {
+ plt_err("soft expiry ring bmap init failed");
+ plt_free(mem);
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ inl_dev->soft_exp_ring_bmap_mem = mem;
+ inl_dev->soft_exp_ring_bmap = bmap;
+
+ for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++)
+ plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, i);
+
+ soft_exp_consumer_cnt = 0;
+ soft_exp_poll_thread_exit = false;
+ inl_dev->soft_exp_poll_freq = 100;
+ rc = plt_ctrl_thread_create(&inl_dev->soft_exp_poll_thread,
+ "OUTB_SOFT_EXP_POLL_THREAD", NULL,
+ nix_inl_outb_poll_thread, inl_dev);
+ if (rc) {
+ plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
+ plt_free(inl_dev->soft_exp_ring_bmap_mem);
+ }
+
+exit:
+ return rc;
+}
+
int
roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
{
if (rc)
goto sso_release;
+ if (roc_inl_dev->set_soft_exp_poll) {
+ rc = nix_inl_outb_poll_thread_setup(inl_dev);
+ if (rc)
+ goto cpt_release;
+ }
+
/* Perform selftest if asked for */
if (inl_dev->selftest) {
rc = nix_inl_selftest();
inl_dev = idev->nix_inl_dev;
pci_dev = inl_dev->pci_dev;
+ if (roc_inl_dev->set_soft_exp_poll) {
+ soft_exp_poll_thread_exit = true;
+ pthread_join(inl_dev->soft_exp_poll_thread, NULL);
+ plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
+ plt_free(inl_dev->soft_exp_ring_bmap_mem);
+ }
+
/* Flush Inbound CTX cache entries */
nix_inl_cpt_ctx_cache_sync(inl_dev);
/* Do we have any work? */
if (work) {
if (inl_dev->work_cb)
- inl_dev->work_cb(gw.u64, inl_dev->cb_args);
+ inl_dev->work_cb(gw.u64, inl_dev->cb_args, false);
else
plt_warn("Undelivered inl dev work gw0: %p gw1: %p",
(void *)gw.u64[0], (void *)gw.u64[1]);
*/
#ifndef _ROC_NIX_INL_PRIV_H_
#define _ROC_NIX_INL_PRIV_H_
+#include <pthread.h>
+#include <sys/types.h>
struct nix_inl_dev {
/* Base device object */
/* CPT data */
struct roc_cpt_lf cpt_lf;
+ /* OUTB soft expiry poll thread */
+ pthread_t soft_exp_poll_thread;
+ uint32_t soft_exp_poll_freq;
+ void *sa_soft_exp_ring[ROC_NIX_INL_MAX_SOFT_EXP_RNGS];
+
+ /* Soft expiry ring bitmap */
+ struct plt_bitmap *soft_exp_ring_bmap;
+
+ /* bitmap memory */
+ void *soft_exp_ring_bmap_mem;
+
/* Device arguments */
uint8_t selftest;
uint16_t channel;
uint16_t outb_err_sso_pffunc;
struct roc_cpt_lf *cpt_lf_base;
uint16_t nb_cpt_lf;
+ uint16_t outb_se_ring_cnt;
+ uint16_t outb_se_ring_base;
/* Mode provided by driver */
bool inb_inl_dev;
#include <rte_ether.h>
#include <rte_interrupts.h>
#include <rte_io.h>
+#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#define BITMASK_ULL GENMASK_ULL
#define PLT_ALIGN_CEIL RTE_ALIGN_CEIL
#define PLT_INIT RTE_INIT
+#define PLT_MAX_ETHPORTS RTE_MAX_ETHPORTS
#define PLT_TAILQ_FOREACH_SAFE RTE_TAILQ_FOREACH_SAFE
#ifndef PLT_ETHER_ADDR_LEN
#define plt_intr_disable rte_intr_disable
#define plt_thread_is_intr rte_thread_is_intr
#define plt_intr_callback_fn rte_intr_callback_fn
+#define plt_ctrl_thread_create rte_ctrl_thread_create
#define plt_intr_efd_counter_size_get rte_intr_efd_counter_size_get
#define plt_intr_efd_counter_size_set rte_intr_efd_counter_size_set
roc_hash_sha512_gen;
roc_idev_cpt_get;
roc_idev_cpt_set;
+ roc_nix_inl_outb_ring_base_get;
roc_idev_lmt_base_addr_get;
roc_idev_npa_maxpools_get;
roc_idev_npa_maxpools_set;
roc_nix_inl_outb_sa_base_get;
roc_nix_inl_outb_sso_pffunc_get;
roc_nix_inl_outb_is_enabled;
+ roc_nix_inl_outb_soft_exp_poll_switch;
roc_nix_inl_sa_sync;
roc_nix_inl_ctx_write;
roc_nix_inl_dev_pffunc_get;
void cn10k_eth_sec_ops_override(void);
/* SSO Work callback */
-void cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args);
+void cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args,
+ uint32_t soft_exp_event);
#define LMT_OFF(lmt_addr, lmt_num, offset) \
(void *)((uintptr_t)(lmt_addr) + \
#include <cn10k_ethdev.h>
#include <cnxk_security.h>
+#include <roc_priv.h>
static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
{ /* AES GCM */
}
void
-cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
+cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
{
struct rte_eth_event_ipsec_desc desc;
struct cn10k_sec_sess_priv sess_priv;
}
/* Fall through */
default:
- plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
- gw[0], gw[1]);
+ if (soft_exp_event & 0x1) {
+ sa = (struct roc_ot_ipsec_outb_sa *)args;
+ priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
+ desc.metadata = (uint64_t)priv->userdata;
+ desc.subtype = RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY;
+ eth_dev = &rte_eth_devices[soft_exp_event >> 8];
+ rte_eth_dev_callback_process(eth_dev,
+ RTE_ETH_EVENT_IPSEC, &desc);
+ } else {
+ plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
+ gw[0], gw[1]);
+ }
return;
}
free(iv_str);
}
+static int
+cn10k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix,
+ struct roc_ot_ipsec_outb_sa *sa, void *sa_cptr,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ uint32_t sa_idx)
+{
+ uint64_t *ring_base, ring_addr;
+
+ if (ipsec_xfrm->life.bytes_soft_limit |
+ ipsec_xfrm->life.packets_soft_limit) {
+ ring_base = roc_nix_inl_outb_ring_base_get(roc_nix);
+ if (ring_base == NULL)
+ return -ENOTSUP;
+
+ ring_addr = ring_base[sa_idx >>
+ ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
+ sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
+ sa->ctx.err_ctl.s.address = ring_addr >> 3;
+ sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
+ }
+
+ return 0;
+}
+
static int
cn10k_eth_sec_session_create(void *device,
struct rte_security_session_conf *conf,
if (iv_str)
outb_dbg_iv_update(outb_sa_dptr, iv_str);
+ /* Fill outbound sa misc params */
+ rc = cn10k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr,
+ outb_sa, ipsec, sa_idx);
+ if (rc) {
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to init outb sa misc params, rc=%d",
+ rc);
+ rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+ goto mempool_put;
+ }
+
/* Save userdata */
outb_priv->userdata = conf->userdata;
outb_priv->sa_idx = sa_idx;
/* Disable Rx via NPC */
roc_nix_npc_rx_ena_dis(&dev->nix, false);
+ roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
+
/* Stop rx queues and free up pkts pending */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rc = dev_ops->rx_queue_stop(eth_dev, i);
cnxk_nix_toggle_flag_link_cfg(dev, false);
+ roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, true);
+
return 0;
rx_disable:
inl_dev->attach_cptlf = true;
/* WQE skip is one for DPDK */
inl_dev->wqe_skip = true;
+ inl_dev->set_soft_exp_poll = true;
rc = roc_nix_inl_dev_init(inl_dev);
if (rc) {
plt_err("Failed to init nix inl device, rc=%d(%s)", rc,