uint64_t aura_cfg;
};
-struct __attribute__((__packed__)) gen_req {
+struct __rte_packed gen_req {
uint32_t value;
};
-struct __attribute__((__packed__)) idn_req {
+struct __rte_packed idn_req {
uint8_t domain_id;
};
-struct __attribute__((__packed__)) gen_resp {
+struct __rte_packed gen_resp {
uint16_t domain_id;
uint16_t vfid;
};
-struct __attribute__((__packed__)) dcfg_resp {
+struct __rte_packed dcfg_resp {
uint8_t sso_count;
uint8_t ssow_count;
uint8_t fpa_count;
static struct octeontx_fpadev fpadev;
-int octeontx_logtype_fpavf;
-int octeontx_logtype_fpavf_mbox;
-
-RTE_INIT(otx_pool_init_log)
-{
- octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
- if (octeontx_logtype_fpavf >= 0)
- rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(octeontx_logtype_fpavf, pmd.mempool.octeontx, NOTICE);
/* lock is taken by caller */
static int
octeontx_fpa_gpool_alloc(unsigned int object_size)
{
+ uint16_t global_domain = octeontx_get_global_domain();
struct fpavf_res *res = NULL;
- uint16_t gpool;
unsigned int sz128;
+ int i;
sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
- for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
+ for (i = 0; i < FPA_VF_MAX; i++) {
/* Skip VF that is not mapped Or _inuse */
- if ((fpadev.pool[gpool].bar0 == NULL) ||
- (fpadev.pool[gpool].is_inuse == true))
+ if ((fpadev.pool[i].bar0 == NULL) ||
+ (fpadev.pool[i].is_inuse == true) ||
+ (fpadev.pool[i].domain_id != global_domain))
continue;
- res = &fpadev.pool[gpool];
+ res = &fpadev.pool[i];
RTE_ASSERT(res->domain_id != (uint16_t)~0);
RTE_ASSERT(res->vf_id != (uint16_t)~0);
if (res->sz128 == 0) {
res->sz128 = sz128;
+ fpavf_log_dbg("gpool %d blk_sz %d\n", res->vf_id,
+ sz128);
- fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
- return gpool;
+ return res->vf_id;
}
}
return -ENOSPC;
}
+static __rte_always_inline struct fpavf_res *
+octeontx_get_fpavf(uint16_t gpool)
+{
+ uint16_t global_domain = octeontx_get_global_domain();
+ int i;
+
+ for (i = 0; i < FPA_VF_MAX; i++) {
+ if (fpadev.pool[i].domain_id != global_domain)
+ continue;
+ if (fpadev.pool[i].vf_id != gpool)
+ continue;
+
+ return &fpadev.pool[i];
+ }
+
+ return NULL;
+}
+
/* lock is taken by caller */
static __rte_always_inline uintptr_t
octeontx_fpa_gpool2handle(uint16_t gpool)
struct fpavf_res *res = NULL;
RTE_ASSERT(gpool < FPA_VF_MAX);
+ res = octeontx_get_fpavf(gpool);
+ if (res == NULL)
+ return 0;
- res = &fpadev.pool[gpool];
return (uintptr_t)res->bar0 | gpool;
}
continue;
/* validate gpool */
- if (gpool != i)
+ if (gpool != fpadev.pool[i].vf_id)
return false;
res = &fpadev.pool[i];
struct octeontx_mbox_fpa_cfg cfg;
int ret = -1;
- fpa = &fpadev.pool[gpool];
+ fpa = octeontx_get_fpavf(gpool);
+ if (fpa == NULL)
+ return -EINVAL;
+
memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
FPA_LN_SIZE;
POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
POOL_ENA;
- cfg.aid = FPA_AURA_IDX(gpool);
+ cfg.aid = 0;
cfg.pool_cfg = reg;
cfg.pool_stack_base = phys_addr;
cfg.pool_stack_end = phys_addr + memsz;
struct fpavf_res *fpa = NULL;
int ret = -1;
- fpa = &fpadev.pool[gpool_index];
+ fpa = octeontx_get_fpavf(gpool_index);
+ if (fpa == NULL)
+ return -EINVAL;
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_CONFIGSET;
hdr.vfid = gpool_index;
hdr.res_code = 0;
memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
- cfg.aid = FPA_AURA_IDX(gpool_index);
+ cfg.aid = 0;
ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
goto err;
}
- cfg.aid = FPA_AURA_IDX(gpool_index);
+ cfg.aid = 0;
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_DETACHAURA;
hdr.vfid = gpool_index;
static __rte_always_inline int
octeontx_fpavf_free(unsigned int gpool)
{
+ struct fpavf_res *res = octeontx_get_fpavf(gpool);
int ret = 0;
if (gpool >= FPA_MAX_POOL) {
}
/* Pool is free */
- fpadev.pool[gpool].is_inuse = false;
+ if (res != NULL)
+ res->is_inuse = false;
err:
return ret;
static __rte_always_inline int
octeontx_gpool_free(uint16_t gpool)
{
- if (fpadev.pool[gpool].sz128 != 0) {
- fpadev.pool[gpool].sz128 = 0;
+ struct fpavf_res *res = octeontx_get_fpavf(gpool);
+
+ if (res && res->sz128 != 0) {
+ res->sz128 = 0;
return 0;
}
return -EINVAL;
/* get the gpool */
gpool = octeontx_fpa_bufpool_gpool(handle);
- res = &fpadev.pool[gpool];
- return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
+ res = octeontx_get_fpavf(gpool);
+ return res ? FPA_CACHE_LINE_2_OBJSZ(res->sz128) : 0;
}
int
RTE_SET_USED(node_id);
RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
+ octeontx_mbox_init();
object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
if (object_size > FPA_MAX_OBJ_SIZE) {
errno = EINVAL;
uint16_t domain_id;
uint16_t vf_id;
uint64_t stack_ln_ptr;
+ static uint16_t vf_idx;
val = fpavf_read64((void *)((uintptr_t)bar0 +
FPA_VF_VHAURA_CNT_THRESHOLD(0)));
stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
FPA_VF_VHPOOL_THRESHOLD(0)));
- if (vf_id >= FPA_VF_MAX) {
+ if (vf_idx >= FPA_VF_MAX) {
fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
- return -1;
- }
-
- if (fpadev.pool[vf_id].is_inuse) {
- fpavf_log_err("vf_id %d is_inuse\n", vf_id);
- return -1;
+ return -E2BIG;
}
- fpadev.pool[vf_id].domain_id = domain_id;
- fpadev.pool[vf_id].vf_id = vf_id;
- fpadev.pool[vf_id].bar0 = bar0;
- fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
+ fpadev.pool[vf_idx].domain_id = domain_id;
+ fpadev.pool[vf_idx].vf_id = vf_id;
+ fpadev.pool[vf_idx].bar0 = bar0;
+ fpadev.pool[vf_idx].stack_ln_ptr = stack_ln_ptr;
/* SUCCESS */
- return vf_id;
+ return vf_idx++;
}
/* FPAVF pcie device aka mempool probe */