uint32_t flags2;
#define BNXT_FLAGS2_PTP_TIMESYNC_ENABLED BIT(0)
#define BNXT_FLAGS2_PTP_ALARM_SCHEDULED BIT(1)
+#define BNXT_FLAGS2_ACCUM_STATS_EN BIT(2)
#define BNXT_P5_PTP_TIMESYNC_ENABLED(bp) \
((bp)->flags2 & BNXT_FLAGS2_PTP_TIMESYNC_ENABLED)
+#define BNXT_ACCUM_STATS_EN(bp) \
+ ((bp)->flags2 & BNXT_FLAGS2_ACCUM_STATS_EN)
uint16_t chip_num;
#define CHIP_NUM_58818 0xd818
bnxt_ulp_create_vfr_default_rules(struct rte_eth_dev *vfr_ethdev);
int32_t
bnxt_ulp_delete_vfr_default_rules(struct bnxt_representor *vfr);
+void bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type,
+ uint8_t *mac, uint8_t *parent_mac);
uint16_t bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type);
+uint16_t bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type);
struct bnxt *bnxt_get_bp(uint16_t port);
uint16_t bnxt_get_svif(uint16_t port_id, bool func_svif,
enum bnxt_ulp_intf_type type);
{ .vendor_id = 0, /* sentinel */ },
};
+#define BNXT_DEVARG_ACCUM_STATS "accum-stats"
#define BNXT_DEVARG_FLOW_XSTAT "flow-xstat"
#define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows"
#define BNXT_DEVARG_REPRESENTOR "representor"
static const char *const bnxt_dev_args[] = {
BNXT_DEVARG_REPRESENTOR,
+ BNXT_DEVARG_ACCUM_STATS,
BNXT_DEVARG_FLOW_XSTAT,
BNXT_DEVARG_MAX_NUM_KFLOWS,
BNXT_DEVARG_REP_BASED_PF,
NULL
};
+/*
+ * accum-stats == false to disable flow counter accumulation
+ * accum-stats == true to enable flow counter accumulation
+ */
+#define BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats) ((accum_stats) > 1)
+
/*
* flow_xstat == false to disable the feature
* flow_xstat == true to enable the feature
return func_svif ? bp->func_svif : bp->port_svif;
}
+void
+bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type,
+ uint8_t *mac, uint8_t *parent_mac)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF &&
+ type != BNXT_ULP_INTF_TYPE_PF)
+ return;
+
+ eth_dev = &rte_eth_devices[port];
+ bp = eth_dev->data->dev_private;
+ memcpy(mac, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF)
+ memcpy(parent_mac, bp->parent->mac_addr, RTE_ETHER_ADDR_LEN);
+}
+
+uint16_t
+bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF)
+ return 0;
+
+ eth_dev = &rte_eth_devices[port];
+ bp = eth_dev->data->dev_private;
+
+ return bp->parent->vnic;
+}
uint16_t
bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
{
return 0;
}
+static int
+bnxt_parse_devarg_accum_stats(__rte_unused const char *key,
+ const char *value, void *opaque_arg)
+{
+ struct bnxt *bp = opaque_arg;
+ unsigned long accum_stats;
+ char *end = NULL;
+
+ if (!value || !opaque_arg) {
+ PMD_DRV_LOG(ERR,
+ "Invalid parameter passed to accum-stats devargs.\n");
+ return -EINVAL;
+ }
+
+ accum_stats = strtoul(value, &end, 10);
+ if (end == NULL || *end != '\0' ||
+ (accum_stats == ULONG_MAX && errno == ERANGE)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid parameter passed to accum-stats devargs.\n");
+ return -EINVAL;
+ }
+
+ if (BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid value passed to accum-stats devargs.\n");
+ return -EINVAL;
+ }
+
+ if (accum_stats) {
+ bp->flags2 |= BNXT_FLAGS2_ACCUM_STATS_EN;
+ PMD_DRV_LOG(INFO, "Host-based accum-stats feature enabled.\n");
+ } else {
+ bp->flags2 &= ~BNXT_FLAGS2_ACCUM_STATS_EN;
+ PMD_DRV_LOG(INFO, "Host-based accum-stats feature disabled.\n");
+ }
+
+ return 0;
+}
+
static int
bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
const char *value, void *opaque_arg)
if (ret)
goto err;
+ /*
+ * Handler for "accum-stats" devarg.
+ * Invoked as for ex: "-a 0000:00:0d.0,accum-stats=1"
+ */
+ rte_kvargs_process(kvlist, BNXT_DEVARG_ACCUM_STATS,
+ bnxt_parse_devarg_accum_stats, bp);
/*
* Handler for "max_num_kflows" devarg.
* Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32"
return rc;
}
+ params.bp = bp;
rc = tf_open_session(&bp->tfp, ¶ms);
if (rc) {
BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
}
/* create the default rules */
bnxt_ulp_create_df_rules(bp);
+
+ if (BNXT_ACCUM_STATS_EN(bp))
+ bp->ulp_ctx->cfg_data->accum_stats = true;
+
BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
bp->eth_dev->data->port_id);
return rc;
#define BNXT_ULP_TUN_ENTRY_INVALID -1
#define BNXT_ULP_MAX_TUN_CACHE_ENTRIES 16
struct bnxt_tun_cache_entry tun_tbl[BNXT_ULP_MAX_TUN_CACHE_ENTRIES];
+ bool accum_stats;
};
struct bnxt_ulp_context {
/* copy the device port id and direction for further processing */
ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_INCOMING_IF,
dev->data->port_id);
+ ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_DEV_PORT_ID,
+ dev->data->port_id);
ULP_COMP_FLD_IDX_WR(¶ms, BNXT_ULP_CF_IDX_SVIF_FLAG,
BNXT_ULP_INVALID_SVIF_VAL);
/* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
- sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
- sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
+ /* Some dpdk applications may accumulate the flow counters while some
+ * may not. In cases where the application is accumulating the counters
+ * the PMD need not do the accumulation itself and viceversa to report
+ * the correct flow counters.
+ */
+ if (ctxt->cfg_data->accum_stats) {
+ sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
+ sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
+ } else {
+ sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
+ sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
+ }
/* Update the parent counters if it is child flow */
if (sw_acc_tbl_entry->parent_flow_id) {
pthread_mutex_unlock(&ulp_fc_info->fc_lock);
} else if (params.resource_sub_type ==
BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
- /* Get the stats from the parent child table */
- ulp_flow_db_parent_flow_count_get(ctxt,
- flow_id,
- &count->hits,
- &count->bytes);
+ /* Get stats from the parent child table */
+ ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
+ &count->hits, &count->bytes,
+ count->reset);
count->hits_set = 1;
count->bytes_set = 1;
} else {
uint32_t a_idx = idx / ULP_INDEX_BITMAP_SIZE;
if (flag) {
- if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
+ if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR || flow_type ==
+ BNXT_ULP_FDB_TYPE_RID)
ULP_INDEX_BITMAP_SET(f_tbl->active_reg_flows[a_idx],
idx);
- else
+ if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT || flow_type ==
+ BNXT_ULP_FDB_TYPE_RID)
ULP_INDEX_BITMAP_SET(f_tbl->active_dflt_flows[a_idx],
idx);
} else {
- if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
+ if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR || flow_type ==
+ BNXT_ULP_FDB_TYPE_RID)
ULP_INDEX_BITMAP_RESET(f_tbl->active_reg_flows[a_idx],
idx);
- else
+ if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT || flow_type ==
+ BNXT_ULP_FDB_TYPE_RID)
ULP_INDEX_BITMAP_RESET(f_tbl->active_dflt_flows[a_idx],
idx);
}
{
struct bnxt_ulp_flow_tbl *f_tbl = &flow_db->flow_tbl;
uint32_t a_idx = idx / ULP_INDEX_BITMAP_SIZE;
-
- if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
- return ULP_INDEX_BITMAP_GET(f_tbl->active_reg_flows[a_idx],
- idx);
- else
- return ULP_INDEX_BITMAP_GET(f_tbl->active_dflt_flows[a_idx],
- idx);
+ uint32_t reg, dflt;
+
+ reg = ULP_INDEX_BITMAP_GET(f_tbl->active_reg_flows[a_idx], idx);
+ dflt = ULP_INDEX_BITMAP_GET(f_tbl->active_dflt_flows[a_idx], idx);
+
+ switch (flow_type) {
+ case BNXT_ULP_FDB_TYPE_REGULAR:
+ return (reg && !dflt);
+ case BNXT_ULP_FDB_TYPE_DEFAULT:
+ return (!reg && dflt);
+ case BNXT_ULP_FDB_TYPE_RID:
+ return (reg && dflt);
+ default:
+ return 0;
+ }
}
static inline enum tf_dir
}
/* Store the handle as 64bit only for EM table entries */
- if (params->resource_func != BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE &&
- params->resource_func != BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE) {
+ if (params->resource_func != BNXT_ULP_RESOURCE_FUNC_EM_TABLE) {
resource_info->resource_hndl = (uint32_t)params->resource_hndl;
resource_info->resource_type = params->resource_type;
resource_info->resource_sub_type = params->resource_sub_type;
params->direction = ulp_flow_db_resource_dir_get(resource_info);
params->resource_func = ulp_flow_db_resource_func_get(resource_info);
- if (params->resource_func == BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE ||
- params->resource_func == BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE) {
+ if (params->resource_func == BNXT_ULP_RESOURCE_FUNC_EM_TABLE) {
params->resource_hndl = resource_info->resource_em_handle;
} else if (params->resource_func & ULP_FLOW_DB_RES_FUNC_NEED_LOWER) {
params->resource_hndl = resource_info->resource_hndl;
return -ENOMEM;
}
size = (flow_tbl->num_flows / sizeof(uint64_t)) + 1;
- size = ULP_BYTE_ROUND_OFF_8(size);
+ size = ULP_BYTE_ROUND_OFF_8(size);
flow_tbl->active_reg_flows = rte_zmalloc("active reg flows", size,
ULP_BUFFER_ALIGN_64_BYTE);
if (!flow_tbl->active_reg_flows) {
return -EINVAL;
}
- if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+ if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
BNXT_TF_DBG(ERR, "Invalid flow type\n");
return -EINVAL;
}
return -EINVAL;
}
- if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+ if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
BNXT_TF_DBG(ERR, "Invalid flow type\n");
return -EINVAL;
}
/* check if the flow is active or not */
if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) {
- BNXT_TF_DBG(ERR, "flow does not exist\n");
+ BNXT_TF_DBG(ERR, "flow does not exist %x:%x\n", flow_type, fid);
return -EINVAL;
}
return -EINVAL;
}
- if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+ if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
BNXT_TF_DBG(ERR, "Invalid flow type\n");
return -EINVAL;
}
/* check if the flow is active or not */
if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) {
- BNXT_TF_DBG(ERR, "flow does not exist\n");
+ BNXT_TF_DBG(ERR, "flow does not exist %x:%x\n", flow_type, fid);
return -EINVAL;
}
enum bnxt_ulp_fdb_type flow_type,
uint32_t fid)
{
- struct bnxt_ulp_flow_db *flow_db;
+ struct bnxt_tun_cache_entry *tun_tbl;
struct bnxt_ulp_flow_tbl *flow_tbl;
+ struct bnxt_ulp_flow_db *flow_db;
flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
if (!flow_db) {
return -EINVAL;
}
- if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+ if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
BNXT_TF_DBG(ERR, "Invalid flow type\n");
return -EINVAL;
}
/* check if the flow is active or not */
if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) {
- BNXT_TF_DBG(ERR, "flow does not exist\n");
+ BNXT_TF_DBG(ERR, "flow does not exist %x:%x\n", flow_type, fid);
return -EINVAL;
}
flow_tbl->head_index--;
BNXT_TF_DBG(ERR, "FlowDB: Head Ptr is zero\n");
return -ENOENT;
}
+
flow_tbl->flow_tbl_stack[flow_tbl->head_index] = fid;
/* Clear the flows bitmap */
if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
ulp_flow_db_func_id_set(flow_db, fid, 0);
+ tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(ulp_ctxt);
+ if (!tun_tbl)
+ return -EINVAL;
+
+ ulp_clear_tun_inner_entry(tun_tbl, fid);
+
/* all good, return success */
return 0;
}
/*
- * Get the flow database entry details
+ *Get the flow database entry details
*
* ulp_ctxt [in] Ptr to ulp_context
* flow_type [in] - specify default or regular
return -EINVAL;
}
- if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+ if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
BNXT_TF_DBG(ERR, "Invalid flow type\n");
return -EINVAL;
}
uint64_t *active_flows;
struct bnxt_ulp_flow_tbl *flowtbl = &flow_db->flow_tbl;
- if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
+ if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR) {
active_flows = flowtbl->active_reg_flows;
- else
+ } else if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT) {
active_flows = flowtbl->active_dflt_flows;
+ } else {
+ BNXT_TF_DBG(ERR, "Invalid flow type %x\n", flow_type);
+ return -EINVAL;
+ }
do {
/* increment the flow id to find the next valid flow id */
return -EINVAL;
}
- if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+ if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
BNXT_TF_DBG(ERR, "Invalid flow type\n");
return -EINVAL;
}
}
} else if (resource_func ==
- BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE ||
- resource_func ==
- BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE) {
+ BNXT_ULP_RESOURCE_FUNC_EM_TABLE) {
ulp_flow_db_res_info_to_params(fid_res,
params);
return 0;
return -EINVAL;
}
- if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+ if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
BNXT_TF_DBG(ERR, "Invalid flow type\n");
return -EINVAL;
}
*/
int32_t
ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt,
- uint32_t parent_fid,
- uint64_t *packet_count,
- uint64_t *byte_count)
+ uint32_t parent_fid, uint64_t *packet_count,
+ uint64_t *byte_count, uint8_t count_reset)
{
struct bnxt_ulp_flow_db *flow_db;
struct ulp_fdb_parent_child_db *p_pdb;
p_pdb->parent_flow_tbl[idx].pkt_count;
*byte_count =
p_pdb->parent_flow_tbl[idx].byte_count;
+ if (count_reset) {
+ p_pdb->parent_flow_tbl[idx].pkt_count = 0;
+ p_pdb->parent_flow_tbl[idx].byte_count = 0;
+ }
}
return 0;
}
ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt,
uint32_t parent_fid,
uint64_t *packet_count,
- uint64_t *byte_count);
+ uint64_t *byte_count,
+ uint8_t count_reset);
/*
* reset the parent accumulation counters
#include "ulp_flow_db.h"
#include "tf_util.h"
#include "ulp_template_db_tbl.h"
+#include "ulp_port_db.h"
static uint8_t mapper_fld_ones[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
int32_t rc;
fparms.dir = res->direction;
- if (res->resource_func == BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE)
- fparms.mem = TF_MEM_EXTERNAL;
- else
- fparms.mem = TF_MEM_INTERNAL;
fparms.flow_handle = res->resource_hndl;
rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp, &fparms.tbl_scope_id);
return rc;
}
+static int32_t
+ulp_mapper_field_port_db_process(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_field_info *fld,
+ uint32_t port_id,
+ uint16_t val16,
+ uint8_t **val)
+{
+ enum bnxt_ulp_port_table port_data = val16;
+
+ switch (port_data) {
+ case BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_MAC:
+ if (ulp_port_db_parent_mac_addr_get(parms->ulp_ctx, port_id,
+ val)) {
+ BNXT_TF_DBG(ERR, "Invalid port id %u\n", port_id);
+ return -EINVAL;
+ }
+ break;
+ default:
+ BNXT_TF_DBG(ERR, "Invalid port_data %s\n", fld->description);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int32_t
ulp_mapper_field_process_inc_dec(struct bnxt_ulp_mapper_field_info *fld,
struct ulp_blob *blob,
uint16_t const_val = 0;
uint32_t update_flag = 0;
uint64_t src1_val64;
+ uint32_t port_id;
/* process the field opcode */
if (fld->field_opc != BNXT_ULP_FIELD_OPC_COND_OP) {
name);
return -EINVAL;
}
+ } else if (fld->field_opc == BNXT_ULP_FIELD_OPC_PORT_TABLE) {
+ port_id = ULP_COMP_FLD_IDX_RD(parms, idx);
+ if (ulp_mapper_field_port_db_process(parms, fld,
+ port_id, const_val,
+ &val)) {
+ BNXT_TF_DBG(ERR, "%s field port table failed\n",
+ name);
+ return -EINVAL;
+ }
+ if (!ulp_blob_push(blob, val, bitlen)) {
+ BNXT_TF_DBG(ERR, "%s push to blob failed\n",
+ name);
+ return -EINVAL;
+ }
} else {
src1_val64 = ULP_COMP_FLD_IDX_RD(parms, idx);
if (ulp_mapper_field_process_inc_dec(fld, blob,
return rc;
}
/* do the transpose for the internal EM keys */
- if (tbl->resource_func == BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE)
+ if (tbl->resource_type == TF_MEM_INTERNAL)
ulp_blob_perform_byte_reverse(&key);
rc = bnxt_ulp_cntxt_tbl_scope_id_get(parms->ulp_ctx,
case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
rc = ulp_mapper_tcam_tbl_process(parms, tbl);
break;
- case BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE:
- case BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE:
+ case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
rc = ulp_mapper_em_tbl_process(parms, tbl);
break;
case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
rc = ulp_mapper_tcam_entry_free(ulp, tfp, res);
break;
- case BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE:
- case BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE:
+ case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
rc = ulp_mapper_em_entry_free(ulp, tfp, res);
break;
case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
func->ifindex = ifindex;
}
+ /* When there is no match, the default action is to send the packet to
+ * the kernel. And to send it to the kernel, we need the PF's vnic id.
+ */
+ func->func_parent_vnic = bnxt_get_parent_vnic_id(port_id, intf->type);
+ bnxt_get_iface_mac(port_id, intf->type, func->func_mac,
+ func->func_parent_mac);
+
port_data = &port_db->phy_port_list[func->phy_port_id];
if (!port_data->port_valid) {
port_data->port_svif =
}
return 0;
}
+
+/*
+ * Api to get the parent mac address for a given port id.
+ *
+ * ulp_ctxt [in] Ptr to ulp context
+ * port_id [in].device port id
+ * mac_addr [out] mac address
+ *
+ * Returns 0 on success or negative number on failure.
+ */
+int32_t
+ulp_port_db_parent_mac_addr_get(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t port_id, uint8_t **mac_addr)
+{
+ struct bnxt_ulp_port_db *port_db;
+ uint16_t func_id;
+
+ port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt);
+ if (ulp_port_db_port_func_id_get(ulp_ctxt, port_id, &func_id)) {
+ BNXT_TF_DBG(ERR, "Invalid port_id %x\n", port_id);
+ return -EINVAL;
+ }
+
+ if (!port_db->ulp_func_id_tbl[func_id].func_valid) {
+ BNXT_TF_DBG(ERR, "Invalid func_id %x\n", func_id);
+ return -ENOENT;
+ }
+ *mac_addr = port_db->ulp_func_id_tbl[func_id].func_parent_mac;
+ return 0;
+}
uint16_t func_spif;
uint16_t func_parif;
uint16_t func_vnic;
+ uint8_t func_mac[RTE_ETHER_ADDR_LEN];
+ uint16_t func_parent_vnic;
+ uint8_t func_parent_mac[RTE_ETHER_ADDR_LEN];
uint16_t phy_port_id;
uint16_t ifindex;
};
ulp_port_db_port_func_id_get(struct bnxt_ulp_context *ulp_ctxt,
uint16_t port_id, uint16_t *func_id);
+/*
+ * Api to get the parent mac address for a given port id.
+ *
+ * ulp_ctxt [in] Ptr to ulp context
+ * port_id [in].device port id
+ * mac_addr [out] mac address
+ *
+ * Returns 0 on success or negative number on failure.
+ */
+int32_t
+ulp_port_db_parent_mac_addr_get(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t port_id, uint8_t **mac_addr);
#endif /* _ULP_PORT_DB_H_ */
#include "ulp_flow_db.h"
#include "ulp_mapper.h"
#include "ulp_tun.h"
+#include "ulp_template_db_tbl.h"
/* Local defines for the parsing functions */
#define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
parif);
}
+ if (mtype == BNXT_ULP_INTF_TYPE_PF) {
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
+ 1);
+ }
}
}
field = ulp_rte_parser_fld_copy(field,
&vlan_tag,
sizeof(vlan_tag));
+
field = ulp_rte_parser_fld_copy(field,
&vlan_spec->inner_type,
sizeof(vlan_spec->inner_type));
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
}
+ /* Some of the PMD applications may set the protocol field
+ * in the IPv4 spec but don't set the mask. So, consider
+ * the mask in the proto value calculation.
+ */
+ if (ipv4_mask)
+ proto &= ipv4_mask->hdr.next_proto_id;
+
if (proto == IPPROTO_GRE)
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
}
+ /* Some of the PMD applications may set the protocol field
+ * in the IPv6 spec but don't set the mask. So, consider
+ * the mask in proto value calculation.
+ */
+ if (ipv6_mask)
+ proto &= ipv6_mask->hdr.proto;
+
if (proto == IPPROTO_GRE)
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
uint32_t idx = params->field_idx;
uint32_t size;
- uint16_t dport = 0, sport = 0;
+ uint16_t dport = 0;
uint32_t cnt;
cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
&udp_spec->hdr.src_port,
size);
- sport = udp_spec->hdr.src_port;
size = sizeof(udp_spec->hdr.dst_port);
field = ulp_rte_parser_fld_copy(field,
&udp_spec->hdr.dst_port,
ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
+ if (udp_mask && udp_mask->hdr.src_port)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
+ 1);
+ if (udp_mask && udp_mask->hdr.dst_port)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
+ 1);
} else {
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
+ if (udp_mask && udp_mask->hdr.src_port)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
+ 1);
+ if (udp_mask && udp_mask->hdr.dst_port)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
+ 1);
/* Update the field protocol hdr bitmap */
ulp_rte_l4_proto_type_update(params, dport);
struct ulp_rte_hdr_field *field;
struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
uint32_t idx = params->field_idx;
- uint16_t dport = 0, sport = 0;
uint32_t size;
uint32_t cnt;
* header fields
*/
if (tcp_spec) {
- sport = tcp_spec->hdr.src_port;
size = sizeof(tcp_spec->hdr.src_port);
field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
&tcp_spec->hdr.src_port,
size);
- dport = tcp_spec->hdr.dst_port;
size = sizeof(tcp_spec->hdr.dst_port);
field = ulp_rte_parser_fld_copy(field,
&tcp_spec->hdr.dst_port,
ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
+ if (tcp_mask && tcp_mask->hdr.src_port)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
+ 1);
+ if (tcp_mask && tcp_mask->hdr.dst_port)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
+ 1);
} else {
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
+ if (tcp_mask && tcp_mask->hdr.src_port)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
+ 1);
+ if (tcp_mask && tcp_mask->hdr.dst_port)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
+ 1);
}
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
return BNXT_TF_RC_SUCCESS;
ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
return BNXT_TF_RC_SUCCESS;
}
+
+int32_t
+ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params)
+{
+ const struct rte_flow_action_sample *sample;
+ int ret;
+
+ sample = action_item->conf;
+
+ /* if SAMPLE bit is set it means this sample action is nested within the
+ * actions of another sample action; this is not allowed
+ */
+ if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
+ BNXT_ULP_ACT_BIT_SAMPLE))
+ return BNXT_TF_RC_ERROR;
+
+ /* a sample action is only allowed as a shared action */
+ if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
+ BNXT_ULP_ACT_BIT_SHARED))
+ return BNXT_TF_RC_ERROR;
+
+ /* only a ratio of 1 i.e. 100% is supported */
+ if (sample->ratio != 1)
+ return BNXT_TF_RC_ERROR;
+
+ if (!sample->actions)
+ return BNXT_TF_RC_ERROR;
+
+ /* parse the nested actions for a sample action */
+ ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
+ if (ret == BNXT_TF_RC_SUCCESS)
+ /* Update the act_bitmap with sample */
+ ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SAMPLE);
+
+ return ret;
+}
ulp_rte_jump_act_handler(const struct rte_flow_action *action_item,
struct ulp_rte_parser_params *params);
+int32_t
+ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params);
+
+int32_t
+ulp_rte_shared_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params);
+
#endif /* _ULP_RTE_PARSER_H_ */
BNXT_ULP_CF_IDX_I_L3 = 14,
BNXT_ULP_CF_IDX_O_L4 = 15,
BNXT_ULP_CF_IDX_I_L4 = 16,
- BNXT_ULP_CF_IDX_O_L4_SPORT = 17,
- BNXT_ULP_CF_IDX_O_L4_DPORT = 18,
- BNXT_ULP_CF_IDX_I_L4_SPORT = 19,
- BNXT_ULP_CF_IDX_I_L4_DPORT = 20,
+ BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT = 17,
+ BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT = 18,
+ BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT = 19,
+ BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT = 20,
BNXT_ULP_CF_IDX_DEV_PORT_ID = 21,
BNXT_ULP_CF_IDX_DRV_FUNC_SVIF = 22,
BNXT_ULP_CF_IDX_DRV_FUNC_SPIF = 23,
BNXT_ULP_CF_IDX_ACT_PORT_TYPE = 40,
BNXT_ULP_CF_IDX_MATCH_PORT_TYPE = 41,
BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP = 42,
- BNXT_ULP_CF_IDX_VF_TO_VF = 43,
- BNXT_ULP_CF_IDX_L3_HDR_CNT = 44,
- BNXT_ULP_CF_IDX_L4_HDR_CNT = 45,
- BNXT_ULP_CF_IDX_VFR_MODE = 46,
- BNXT_ULP_CF_IDX_L3_TUN = 47,
- BNXT_ULP_CF_IDX_L3_TUN_DECAP = 48,
- BNXT_ULP_CF_IDX_FID = 49,
- BNXT_ULP_CF_IDX_HDR_SIG_ID = 50,
- BNXT_ULP_CF_IDX_FLOW_SIG_ID = 51,
- BNXT_ULP_CF_IDX_WC_MATCH = 52,
- BNXT_ULP_CF_IDX_LAST = 53
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF = 43,
+ BNXT_ULP_CF_IDX_VF_TO_VF = 44,
+ BNXT_ULP_CF_IDX_L3_HDR_CNT = 45,
+ BNXT_ULP_CF_IDX_L4_HDR_CNT = 46,
+ BNXT_ULP_CF_IDX_VFR_MODE = 47,
+ BNXT_ULP_CF_IDX_L3_TUN = 48,
+ BNXT_ULP_CF_IDX_L3_TUN_DECAP = 49,
+ BNXT_ULP_CF_IDX_FID = 50,
+ BNXT_ULP_CF_IDX_HDR_SIG_ID = 51,
+ BNXT_ULP_CF_IDX_FLOW_SIG_ID = 52,
+ BNXT_ULP_CF_IDX_WC_MATCH = 53,
+ BNXT_ULP_CF_IDX_LAST = 54
};
enum bnxt_ulp_cond_list_opc {
BNXT_ULP_FIELD_OPC_SRC1_MINUS_CONST = 2,
BNXT_ULP_FIELD_OPC_SRC1_PLUS_CONST_POST = 3,
BNXT_ULP_FIELD_OPC_SRC1_MINUS_CONST_POST = 4,
- BNXT_ULP_FIELD_OPC_LAST = 5
+ BNXT_ULP_FIELD_OPC_PORT_TABLE = 5,
+ BNXT_ULP_FIELD_OPC_LAST = 6
};
enum bnxt_ulp_field_src {
BNXT_ULP_MEM_TYPE_OPC_LAST = 3
};
+enum bnxt_ulp_port_table {
+ BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_MAC = 0,
+ BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_VNIC = 1,
+ BNXT_ULP_PORT_TABLE_DRV_FUNC_SVIF = 2,
+ BNXT_ULP_PORT_TABLE_DRV_FUNC_SPIF = 3,
+ BNXT_ULP_PORT_TABLE_DRV_FUNC_PARIF = 4,
+ BNXT_ULP_PORT_TABLE_DRV_FUNC_VNIC = 5,
+ BNXT_ULP_PORT_TABLE_DRV_FUNC_PHY_PORT = 6,
+ BNXT_ULP_PORT_TABLE_DRV_FUNC_MAC = 7,
+ BNXT_ULP_PORT_TABLE_VF_FUNC_SVIF = 8,
+ BNXT_ULP_PORT_TABLE_VF_FUNC_SPIF = 9,
+ BNXT_ULP_PORT_TABLE_VF_FUNC_PARIF = 10,
+ BNXT_ULP_PORT_TABLE_VF_FUNC_VNIC = 11,
+ BNXT_ULP_PORT_TABLE_VF_FUNC_MAC = 12,
+ BNXT_ULP_PORT_TABLE_PHY_PORT_SVIF = 13,
+ BNXT_ULP_PORT_TABLE_PHY_PORT_SPIF = 14,
+ BNXT_ULP_PORT_TABLE_PHY_PORT_PARIF = 15,
+ BNXT_ULP_PORT_TABLE_PHY_PORT_VPORT = 16,
+ BNXT_ULP_PORT_TABLE_LAST = 17
+};
+
enum bnxt_ulp_pri_opc {
BNXT_ULP_PRI_OPC_NOT_USED = 0,
BNXT_ULP_PRI_OPC_CONST = 1,
enum bnxt_ulp_resource_func {
BNXT_ULP_RESOURCE_FUNC_INVALID = 0x00,
- BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE = 0x20,
- BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE = 0x40,
+ BNXT_ULP_RESOURCE_FUNC_EM_TABLE = 0x20,
+ BNXT_ULP_RESOURCE_FUNC_RSVD1 = 0x40,
BNXT_ULP_RESOURCE_FUNC_RSVD2 = 0x60,
BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE = 0x80,
BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE = 0x81,
.encap_num_fields = 0
},
{ /* class_tid: 1, stingray, table: em.int_0 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_INTERNAL,
.direction = TF_DIR_RX,
.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
.encap_num_fields = 0
},
{ /* class_tid: 1, stingray, table: eem.ext_0 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_EXTERNAL,
.direction = TF_DIR_RX,
.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_EXT,
.result_num_fields = 5
},
{ /* class_tid: 1, wh_plus, table: em.ipv4 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_INTERNAL,
.direction = TF_DIR_RX,
.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
.result_num_fields = 9
},
{ /* class_tid: 1, wh_plus, table: eem.ipv4 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_EXTERNAL,
.direction = TF_DIR_RX,
.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_EXT,
.result_num_fields = 9
},
{ /* class_tid: 1, wh_plus, table: em.ipv6 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_INTERNAL,
.direction = TF_DIR_RX,
.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
.result_num_fields = 9
},
{ /* class_tid: 1, wh_plus, table: eem.ipv6 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_EXTERNAL,
.direction = TF_DIR_RX,
.execute_info = {
.result_num_fields = 5
},
{ /* class_tid: 2, wh_plus, table: em.ipv4 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_INTERNAL,
.direction = TF_DIR_TX,
.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
.result_num_fields = 9
},
{ /* class_tid: 2, wh_plus, table: eem.ipv4 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_EXTERNAL,
.direction = TF_DIR_TX,
.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_EXT,
.result_num_fields = 9
},
{ /* class_tid: 2, wh_plus, table: em.ipv6 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_INTERNAL,
.direction = TF_DIR_TX,
.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
.result_num_fields = 9
},
{ /* class_tid: 2, wh_plus, table: eem.ipv6 */
- .resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+ .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
.resource_type = TF_MEM_EXTERNAL,
.direction = TF_DIR_TX,
.execute_info = {
.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
.field_src1 = BNXT_ULP_FIELD_SRC_CF,
.field_opr1 = {
- (BNXT_ULP_CF_IDX_O_L4_SPORT >> 8) & 0xff,
- BNXT_ULP_CF_IDX_O_L4_SPORT & 0xff}
+ (BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT & 0xff}
},
{
.description = "em_key_mask.6",
.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
.field_src1 = BNXT_ULP_FIELD_SRC_CF,
.field_opr1 = {
- (BNXT_ULP_CF_IDX_O_L4_DPORT >> 8) & 0xff,
- BNXT_ULP_CF_IDX_O_L4_DPORT & 0xff}
+ (BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT & 0xff}
},
{
.description = "em_key_mask.7",
.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
.field_src1 = BNXT_ULP_FIELD_SRC_CF,
.field_opr1 = {
- (BNXT_ULP_CF_IDX_O_L4_SPORT >> 8) & 0xff,
- BNXT_ULP_CF_IDX_O_L4_SPORT & 0xff}
+ (BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT & 0xff}
},
{
.description = "em_key_mask.7",
.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
.field_src1 = BNXT_ULP_FIELD_SRC_CF,
.field_opr1 = {
- (BNXT_ULP_CF_IDX_O_L4_DPORT >> 8) & 0xff,
- BNXT_ULP_CF_IDX_O_L4_DPORT & 0xff}
+ (BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT & 0xff}
},
{
.description = "em_key_mask.8",
.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
.field_src1 = BNXT_ULP_FIELD_SRC_CF,
.field_opr1 = {
- (BNXT_ULP_CF_IDX_O_L4_SPORT >> 8) & 0xff,
- BNXT_ULP_CF_IDX_O_L4_SPORT & 0xff}
+ (BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT & 0xff}
},
{
.description = "em_key_mask.6",
.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
.field_src1 = BNXT_ULP_FIELD_SRC_CF,
.field_opr1 = {
- (BNXT_ULP_CF_IDX_O_L4_DPORT >> 8) & 0xff,
- BNXT_ULP_CF_IDX_O_L4_DPORT & 0xff}
+ (BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT & 0xff}
},
{
.description = "em_key_mask.7",
.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
.field_src1 = BNXT_ULP_FIELD_SRC_CF,
.field_opr1 = {
- (BNXT_ULP_CF_IDX_O_L4_SPORT >> 8) & 0xff,
- BNXT_ULP_CF_IDX_O_L4_SPORT & 0xff}
+ (BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT & 0xff}
},
{
.description = "em_key_mask.7",
.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
.field_src1 = BNXT_ULP_FIELD_SRC_CF,
.field_opr1 = {
- (BNXT_ULP_CF_IDX_O_L4_DPORT >> 8) & 0xff,
- BNXT_ULP_CF_IDX_O_L4_DPORT & 0xff}
+ (BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT & 0xff}
},
{
.description = "em_key_mask.8",
/* Structure to be used for passing all the parser functions */
struct ulp_rte_parser_params {
+ STAILQ_ENTRY(ulp_rte_parser_params) next;
struct ulp_rte_hdr_bitmap hdr_bitmap;
struct ulp_rte_hdr_bitmap hdr_fp_bit;
struct ulp_rte_field_bitmap fld_bitmap;
goto err;
/* Store the tunnel dmac in the tunnel cache table and use it while
- * programming tunnel flow F2.
+ * programming tunnel inner flow.
*/
memcpy(tun_entry->t_dmac,
¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
RTE_ETHER_ADDR_LEN);
- tun_entry->valid = true;
- tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
+ tun_entry->tun_flow_info[params->port_id].state =
+ BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
tun_entry->outer_tun_flow_id = params->fid;
- /* F1 and it's related F2s are correlated based on
- * Tunnel Destination IP Address.
+ /* Tunnel outer flow and it's related inner flows are correlated
+ * based on Tunnel Destination IP Address.
*/
if (tun_entry->t_dst_ip_valid)
goto done;
/* This function programs the inner tunnel flow in the hardware. */
static void
-ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry)
+ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
+ struct ulp_rte_parser_params *tun_o_params)
{
struct bnxt_ulp_mapper_create_parms mparms = { 0 };
- struct ulp_rte_parser_params *params;
+ struct ulp_per_port_flow_info *flow_info;
+ struct ulp_rte_parser_params *inner_params;
int ret;
- /* F2 doesn't have tunnel dmac, use the tunnel dmac that was
- * stored during F1 programming.
+ /* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
+ * dmac that was stored during F1 programming.
*/
- params = &tun_entry->first_inner_tun_params;
- memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
- tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
- params->parent_fid = tun_entry->outer_tun_flow_id;
- params->fid = tun_entry->first_inner_tun_flow_id;
-
- bnxt_ulp_init_mapper_params(&mparms, params,
- BNXT_ULP_FDB_TYPE_REGULAR);
-
- ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
- if (ret)
- PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
+ flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
+ STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
+ memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
+ tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
+ inner_params->parent_fid = tun_entry->outer_tun_flow_id;
+
+ bnxt_ulp_init_mapper_params(&mparms, inner_params,
+ BNXT_ULP_FDB_TYPE_REGULAR);
+
+ ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
+ if (ret)
+ PMD_DRV_LOG(ERR,
+ "Failed to create inner tun flow, FID:%u.",
+ inner_params->fid);
+ }
}
/* This function either install outer tunnel flow & inner tunnel flow
struct bnxt_tun_cache_entry *tun_entry,
uint16_t tun_idx)
{
- enum bnxt_ulp_tun_flow_state flow_state;
int ret;
- flow_state = tun_entry->state;
ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
- if (ret)
+ if (ret == BNXT_TF_RC_ERROR) {
+ PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
return ret;
+ }
- /* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
- * F1, that means F2 is not deferred. Hence, no need to install F2.
+ /* Install any cached tunnel inner flows that came before tunnel
+ * outer flow.
*/
- if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
- ulp_install_inner_tun_flow(tun_entry);
+ ulp_install_inner_tun_flow(tun_entry, params);
- return 0;
+ return BNXT_TF_RC_FID;
}
/* This function will be called if inner tunnel flow request comes before
* outer tunnel flow request.
*/
static int32_t
-ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
+ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
struct bnxt_tun_cache_entry *tun_entry)
{
+ struct ulp_rte_parser_params *inner_tun_params;
+ struct ulp_per_port_flow_info *flow_info;
int ret;
ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
if (ret != BNXT_TF_RC_SUCCESS)
return BNXT_TF_RC_ERROR;
- /* If Tunnel F2 flow comes first then we can't install it in the
- * hardware, because, F2 flow will not have L2 context information.
- * So, just cache the F2 information and program it in the context
- * of F1 flow installation.
+ /* If Tunnel inner flow comes first then we can't install it in the
+ * hardware, because, Tunnel inner flow will not have L2 context
+ * information. So, just cache the Tunnel inner flow information
+ * and program it in the context of F1 flow installation.
*/
- memcpy(&tun_entry->first_inner_tun_params, params,
- sizeof(struct ulp_rte_parser_params));
-
- tun_entry->first_inner_tun_flow_id = params->fid;
- tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
+ flow_info = &tun_entry->tun_flow_info[params->port_id];
+ inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
+ sizeof(struct ulp_rte_parser_params), 0);
+ if (!inner_tun_params)
+ return BNXT_TF_RC_ERROR;
+ memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
+ STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
+ next);
+ flow_info->tun_i_cnt++;
- /* F1 and it's related F2s are correlated based on
+ /* F1 and it's related Tunnel inner flows are correlated based on
* Tunnel Destination IP Address. It could be already set, if
* the inner flow got offloaded first.
*/
int32_t
ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
{
- bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
- bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
+ bool inner_tun_sig, cache_inner_tun_flow;
+ bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
enum bnxt_ulp_tun_flow_state flow_state;
struct bnxt_tun_cache_entry *tun_entry;
uint32_t l3_tun, l3_tun_decap;
if (rc == BNXT_TF_RC_ERROR)
return rc;
- flow_state = tun_entry->state;
+ if (params->port_id >= RTE_MAX_ETHPORTS)
+ return BNXT_TF_RC_ERROR;
+ flow_state = tun_entry->tun_flow_info[params->port_id].state;
/* Outer tunnel flow validation */
- outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
- outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
+ outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
- outer_tun_sig);
+ outer_tun_flow);
/* Inner tunnel flow validation */
inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
- first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
+ cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
inner_tun_sig);
inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
- inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
- inner_tun_sig);
if (outer_tun_reject) {
tun_entry->outer_tun_rej_cnt++;
BNXT_TF_DBG(ERR,
"Tunnel F1 flow rejected, COUNT: %d\n",
tun_entry->outer_tun_rej_cnt);
- /* Inner tunnel flow is rejected if it comes between first inner
- * tunnel flow and outer flow requests.
- */
- } else if (inner_tun_reject) {
- tun_entry->inner_tun_rej_cnt++;
- BNXT_TF_DBG(ERR,
- "Tunnel F2 flow rejected, COUNT: %d\n",
- tun_entry->inner_tun_rej_cnt);
}
- if (outer_tun_reject || inner_tun_reject)
+ if (outer_tun_reject)
return BNXT_TF_RC_ERROR;
- else if (first_inner_tun_flow)
- return ulp_post_process_first_inner_tun_flow(params, tun_entry);
+ else if (cache_inner_tun_flow)
+ return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
else if (outer_tun_flow)
return ulp_post_process_outer_tun_flow(params, tun_entry,
tun_idx);
return BNXT_TF_RC_NORMAL;
}
+void
+ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
+{
+ struct ulp_per_port_flow_info *flow_info;
+ int i, j;
+
+ for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[i].tun_flow_info[j];
+ STAILQ_INIT(&flow_info->tun_i_prms_list);
+ }
+ }
+}
+
void
ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
{
+ struct ulp_rte_parser_params *inner_params;
+ struct ulp_per_port_flow_info *flow_info;
+ int j;
+
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
+ STAILQ_FOREACH(inner_params,
+ &flow_info->tun_i_prms_list,
+ next) {
+ STAILQ_REMOVE(&flow_info->tun_i_prms_list,
+ inner_params,
+ ulp_rte_parser_params, next);
+ rte_free(inner_params);
+ }
+ }
+
memset(&tun_tbl[tun_idx], 0,
- sizeof(struct bnxt_tun_cache_entry));
+ sizeof(struct bnxt_tun_cache_entry));
+
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
+ STAILQ_INIT(&flow_info->tun_i_prms_list);
+ }
+}
+
+static bool
+ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
+ struct ulp_per_port_flow_info *flow_info,
+ uint32_t fid)
+{
+ struct ulp_rte_parser_params *inner_params;
+ int j;
+
+ STAILQ_FOREACH(inner_params,
+ &flow_info->tun_i_prms_list,
+ next) {
+ if (inner_params->fid == fid) {
+ STAILQ_REMOVE(&flow_info->tun_i_prms_list,
+ inner_params,
+ ulp_rte_parser_params,
+ next);
+ rte_free(inner_params);
+ flow_info->tun_i_cnt--;
+ /* When a dpdk application offloads a duplicate
+ * tunnel inner flow on a port that it is not
+ * destined to, there won't be a tunnel outer flow
+ * associated with these duplicate tunnel inner flows.
+ * So, when the last tunnel inner flow ages out, the
+ * driver has to clear the tunnel entry, otherwise
+ * the tunnel entry cannot be reused.
+ */
+ if (!flow_info->tun_i_cnt &&
+ flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
+ memset(tun_entry, 0,
+ sizeof(struct bnxt_tun_cache_entry));
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++)
+ STAILQ_INIT(&flow_info->tun_i_prms_list);
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* When a dpdk application offloads the same tunnel inner flow
+ * on all the uplink ports, a tunnel inner flow entry is cached
+ * even if it is not for the right uplink port. Such tunnel
+ * inner flows will eventually get aged out as there won't be
+ * any traffic on these ports. When such a flow destroy is
+ * called, cleanup the tunnel inner flow entry.
+ */
+void
+ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
+{
+ struct ulp_per_port_flow_info *flow_info;
+ int i, j;
+
+ for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
+ if (!tun_tbl[i].t_dst_ip_valid)
+ continue;
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[i].tun_flow_info[j];
+ if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
+ flow_info, fid) == true)
+ return;
+ }
+ }
}
#include <stdbool.h>
#include <sys/queue.h>
+#include "rte_version.h"
#include "rte_ethdev.h"
#include "ulp_template_db_enum.h"
#include "ulp_template_struct.h"
-#define BNXT_OUTER_TUN_SIGNATURE(l3_tun, params) \
+#if RTE_VERSION_NUM(17, 11, 10, 16) == RTE_VERSION
+#define RTE_ETHER_ADDR_LEN ETHER_ADDR_LEN
+#endif
+
+#define BNXT_OUTER_TUN_FLOW(l3_tun, params) \
((l3_tun) && \
ULP_BITMAP_ISSET((params)->act_bitmap.bits, \
BNXT_ULP_ACT_BIT_JUMP))
!ULP_BITMAP_ISSET((params)->hdr_bitmap.bits, \
BNXT_ULP_HDR_BIT_O_ETH))
-#define BNXT_FIRST_INNER_TUN_FLOW(state, inner_tun_sig) \
+#define BNXT_CACHE_INNER_TUN_FLOW(state, inner_tun_sig) \
((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
#define BNXT_INNER_TUN_FLOW(state, inner_tun_sig) \
((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
-#define BNXT_OUTER_TUN_FLOW(outer_tun_sig) ((outer_tun_sig))
/* It is invalid to get another outer flow offload request
* for the same tunnel, while the outer flow is already offloaded.
*/
#define BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig) \
((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
-/* It is invalid to get another inner flow offload request
- * for the same tunnel, while the outer flow is not yet offloaded.
- */
-#define BNXT_REJECT_INNER_TUN_FLOW(state, inner_tun_sig) \
- ((state) == BNXT_ULP_FLOW_STATE_TUN_I_CACHED && (inner_tun_sig))
#define ULP_TUN_O_DMAC_HDR_FIELD_INDEX 1
#define ULP_TUN_O_IPV4_DIP_INDEX 19
* requests arrive.
*
* If inner tunnel flow offload request arrives first then the flow
- * state will change from BNXT_ULP_FLOW_STATE_NORMAL to
- * BNXT_ULP_FLOW_STATE_TUN_I_CACHED and the following outer tunnel
- * flow offload request will change the state of the flow to
- * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from BNXT_ULP_FLOW_STATE_TUN_I_CACHED.
+ * state will remain in BNXT_ULP_FLOW_STATE_NORMAL state.
+ * The following outer tunnel flow offload request will change the
+ * state of the flow to BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from
+ * BNXT_ULP_FLOW_STATE_NORMAL.
*
* If outer tunnel flow offload request arrives first then the flow state
* will change from BNXT_ULP_FLOW_STATE_NORMAL to
enum bnxt_ulp_tun_flow_state {
BNXT_ULP_FLOW_STATE_NORMAL = 0,
BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
- BNXT_ULP_FLOW_STATE_TUN_I_CACHED
+};
+
+struct ulp_per_port_flow_info {
+ enum bnxt_ulp_tun_flow_state state;
+ uint32_t tun_i_cnt;
+ STAILQ_HEAD(, ulp_rte_parser_params) tun_i_prms_list;
};
struct bnxt_tun_cache_entry {
- enum bnxt_ulp_tun_flow_state state;
- bool valid;
bool t_dst_ip_valid;
uint8_t t_dmac[RTE_ETHER_ADDR_LEN];
union {
uint8_t t_dst_ip6[16];
};
uint32_t outer_tun_flow_id;
- uint32_t first_inner_tun_flow_id;
uint16_t outer_tun_rej_cnt;
- uint16_t inner_tun_rej_cnt;
- struct ulp_rte_parser_params first_inner_tun_params;
+ struct ulp_per_port_flow_info tun_flow_info[RTE_MAX_ETHPORTS];
};
+void
+ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl);
+
void
ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);
+void
+ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid);
+
#endif