Sync mail box data structures to version 1.1.3.
Add mail box version verification and defer initializing octeontx
devices if mail box version mismatches.
Update OCTEON TX limitaion with max mempool size used.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Reviewed-by: Jerin Jacob <jerinj@marvell.com>
When timvf is used as Event timer adapter event schedule type
``RTE_SCHED_TYPE_PARALLEL`` is not supported.
+
+Max mempool size
+~~~~~~~~~~~~~~~~
+
+Max mempool size when using OCTEON TX Eventdev (SSO) should be limited to 128K.
+When running dpdk-test-eventdev on OCTEON TX the application can limit the
+number of mbufs by using the option ``--pool_sz 131072``
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
up to 32k bytes can still reach the host interface.
+
+Maximum mempool size
+~~~~~~~~~~~~~~~~~~~~
+
+The maximum mempool size supplied to Rx queue setup should be less than 128K.
+When running testpmd on OCTEON TX the application can limit the number of mbufs
+by using the option ``--total-num-mbufs=131072``.
struct mbox {
int init_once;
+ uint8_t ready;
uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
uint16_t tag_own; /* Last tag which was written to own channel */
};
};
+/* MBOX interface version message */
+struct mbox_intf_ver {
+ uint32_t platform:12;
+ uint32_t major:10;
+ uint32_t minor:10;
+};
+
int octeontx_logtype_mbox;
RTE_INIT(otx_init_log)
return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
}
+
+static int
+octeontx_start_domain(void)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ int result = -EINVAL;
+
+ hdr.coproc = NO_COPROC;
+ hdr.msg = RM_START_APP;
+
+ result = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (result != 0) {
+ mbox_log_err("Could not start domain. Err=%d. FuncErr=%d\n",
+ result, hdr.res_code);
+ result = -EINVAL;
+ }
+
+ return result;
+}
+
+static int
+octeontx_check_mbox_version(struct mbox_intf_ver app_intf_ver,
+ struct mbox_intf_ver *intf_ver)
+{
+ struct mbox_intf_ver kernel_intf_ver = {0};
+ struct octeontx_mbox_hdr hdr = {0};
+ int result = 0;
+
+
+ hdr.coproc = NO_COPROC;
+ hdr.msg = RM_INTERFACE_VERSION;
+
+ result = octeontx_mbox_send(&hdr, &app_intf_ver, sizeof(app_intf_ver),
+ &kernel_intf_ver, sizeof(kernel_intf_ver));
+ if (result != sizeof(kernel_intf_ver)) {
+ mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d\n",
+ result, hdr.res_code);
+ result = -EINVAL;
+ }
+
+ if (intf_ver)
+ *intf_ver = kernel_intf_ver;
+
+ if (app_intf_ver.platform != kernel_intf_ver.platform ||
+ app_intf_ver.major != kernel_intf_ver.major ||
+ app_intf_ver.minor != kernel_intf_ver.minor)
+ result = -EINVAL;
+
+ return result;
+}
+
+int
+octeontx_mbox_init(void)
+{
+ const struct mbox_intf_ver MBOX_INTERFACE_VERSION = {
+ .platform = 0x01,
+ .major = 0x01,
+ .minor = 0x03
+ };
+ struct mbox_intf_ver rm_intf_ver = {0};
+ struct mbox *m = &octeontx_mbox;
+ int ret;
+
+ if (m->ready)
+ return 0;
+
+ ret = octeontx_start_domain();
+ if (ret < 0) {
+ m->init_once = 0;
+ return ret;
+ }
+
+ ret = octeontx_check_mbox_version(MBOX_INTERFACE_VERSION,
+ &rm_intf_ver);
+ if (ret < 0) {
+ mbox_log_err("MBOX version: Kernel(%d.%d.%d) != DPDK(%d.%d.%d)",
+ rm_intf_ver.platform, rm_intf_ver.major,
+ rm_intf_ver.minor, MBOX_INTERFACE_VERSION.platform,
+ MBOX_INTERFACE_VERSION.major,
+ MBOX_INTERFACE_VERSION.minor);
+ m->init_once = 0;
+ return -EINVAL;
+ }
+
+ m->ready = 1;
+ rte_mb();
+
+ return 0;
+}
#define SSOW_BAR4_LEN (64 * 1024)
#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+#define NO_COPROC 0x0
+#define RM_START_APP 0x1
+#define RM_INTERFACE_VERSION 0x2
+
+
#define MBOX_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\
"%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
uint16_t vfid; /* VF index or pf resource index local to the domain */
uint8_t coproc; /* Coprocessor id */
uint8_t msg; /* Message id */
+ uint8_t oob; /* out of band data */
uint8_t res_code; /* Functional layer response code */
};
+int octeontx_mbox_init(void);
int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
int octeontx_mbox_set_reg(uint8_t *reg);
int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
global:
octeontx_logtype_mbox;
+ octeontx_mbox_init;
octeontx_mbox_send;
octeontx_mbox_set_ram_mbox_base;
octeontx_mbox_set_reg;
}
struct ssovf_mbox_grp_pri {
+ uint8_t vhgrp_id;
uint8_t wgt_left; /* Read only */
uint8_t weight;
uint8_t affinity;
hdr.msg = SSO_GRP_SET_PRIORITY;
hdr.vfid = queue;
+ grp.vhgrp_id = queue;
grp.weight = 0xff;
grp.affinity = 0xff;
grp.priority = prio / 32; /* Normalize to 0 to 7 */
pki_qos.mmask.f_grptag_ok = 1;
pki_qos.mmask.f_grptag_bad = 1;
- pki_qos.tag_type = queue_conf->ev.sched_type;
+ pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type;
pki_qos.qos_entry.port_add = 0;
pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
return 0;
}
+ octeontx_mbox_init();
ret = ssovf_info(&oinfo);
if (ret) {
ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
#define SSO_MAX_VHGRP (64)
#define SSO_MAX_VHWS (32)
-#define SSO_VHGRP_AQ_THR (0x1E0ULL)
-
struct ssovf_res {
uint16_t domain;
uint16_t vfid;
RTE_SET_USED(node_id);
RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
+ octeontx_mbox_init();
object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
if (object_size > FPA_MAX_OBJ_SIZE) {
errno = EINVAL;
#define MBOX_BGX_PORT_SET_BP 11
#define MBOX_BGX_PORT_SET_BCAST 12
#define MBOX_BGX_PORT_SET_MCAST 13
+#define MBOX_BGX_PORT_SET_MTU 14
/* BGX port configuration parameters: */
typedef struct octeontx_mbox_bgx_port_conf {
typedef struct octeontx_mbox_bgx_port_status {
uint8_t link_up;
uint8_t bp;
+ uint8_t duplex;
+ uint32_t speed;
} octeontx_mbox_bgx_port_status_t;
/* BGX port statistics: */
#define MBOX_PKI_PORT_RESET_STATS 18
#define MBOX_PKI_GET_PORT_CONFIG 19
#define MBOX_PKI_GET_PORT_QOS_CONFIG 20
+#define MBOX_PKI_PORT_ALLOC_QPG 21
+#define MBOX_PKI_PORT_FREE_QPG 22
+#define MBOX_PKI_SET_PORT_CONFIG 23
#define MBOX_PKI_MAX_QOS_ENTRY 64
struct {
uint8_t fcs_pres:1;
uint8_t fcs_skip:1;
+ uint8_t inst_skip:1;
uint8_t parse_mode:1;
uint8_t mpls_parse:1;
uint8_t inst_hdr_parse:1;
} mmask;
uint8_t fcs_pres;
uint8_t fcs_skip;
+ uint8_t inst_skip;
uint8_t parse_mode;
uint8_t mpls_parse;
uint8_t inst_hdr_parse;
uint16_t gaura;
uint8_t grptag_ok;
uint8_t grptag_bad;
+ uint8_t ena_red;
+ uint8_t ena_drop;
+ uint8_t tag_type;
};
/* pki flow/style enable qos */
struct mbox_pki_qos_entry qos_entry[MBOX_PKI_MAX_QOS_ENTRY];
} mbox_pki_qos_cfg_t;
-/* pki flow/style enable qos */
+/* pki flow/style modify qos */
typedef struct mbox_pki_port_modify_qos_entry {
uint8_t port_type;
uint16_t index;
uint8_t f_grptag_bad:1;
uint8_t f_tag_type:1;
} mmask;
- uint8_t tag_type;
struct mbox_pki_qos_entry qos_entry;
} mbox_pki_mod_qos_t;
-/* pki flow/style enable qos */
+/* pki flow/style delete qos */
typedef struct mbox_pki_port_delete_qos_entry {
uint8_t port_type;
uint16_t index;
uint8_t grptag_bad;
uint8_t ena_red;
uint8_t ena_drop;
+ uint8_t tag_type;
};
#define PKO_MAX_QOS_ENTRY 64
uint8_t f_grptag_bad:1;
uint8_t f_tag_type:1;
} mmask;
- uint8_t tag_type;
struct pki_qos_entry qos_entry;
} pki_mod_qos_t;