flows to be directed to one or more queues associated with the VNIC id.
This implementation is supported only when TRUFLOW functionality is disabled.
+- An application can issue a VXLAN decap offload request using rte_flow API
+ either as a single rte_flow request or a combination of two stages.
+ The PMD currently supports the two stage offload design.
+ In this approach the offload request may come as two flow offload requests
+ Flow1 & Flow2. The match criteria for Flow1 is O_DMAC, O_SMAC, O_DST_IP,
+ O_UDP_DPORT and actions are COUNT, MARK, JUMP. The match criteria for Flow2
+ is O_SRC_IP, O_DST_IP, VNI and inner header fields.
+ Flow1 and Flow2 flow offload requests can come in any order. If Flow2 flow
+ offload request comes first then Flow2 can’t be offloaded as there is
+ no O_DMAC information in Flow2. In this case, Flow2 will be deferred until
+ Flow1 flow offload request arrives. When Flow1 flow offload request is
+ received it will have O_DMAC information. Using Flow1’s O_DMAC, driver
+ creates an L2 context entry in the hardware as part of offloading Flow1.
+ Flow2 will now use Flow1’s O_DMAC to get the L2 context id associated with
+ this O_DMAC and other flow fields that are cached already at the time
+ of deferring Flow2 for offloading. Flow2 that arrive after Flow1 is offloaded
+ will be directly programmed and not cached.
+
Note: A VNIC represents a virtual interface in the hardware. It is a resource
in the RX path of the chip and is used to setup various target actions such as
RSS, MAC filtering etc. for the physical function in use.
* Updated HWRM structures to 1.10.1.70 version.
* Added TRUFLOW support for Stingray devices.
* Added support for representors on MAIA cores of SR.
+ * Added support for VXLAN decap offload using rte_flow.
* **Updated Cisco enic driver.**
'tf_ulp/ulp_port_db.c',
'tf_ulp/ulp_def_rules.c',
'tf_ulp/ulp_fc_mgr.c',
+ 'tf_ulp/ulp_tun.c',
'tf_ulp/ulp_template_db_wh_plus_act.c',
'tf_ulp/ulp_template_db_wh_plus_class.c',
'tf_ulp/ulp_template_db_stingray_act.c',
enum bnxt_tf_rc {
BNXT_TF_RC_PARSE_ERR = -2,
BNXT_TF_RC_ERROR = -1,
- BNXT_TF_RC_SUCCESS = 0
+ BNXT_TF_RC_SUCCESS = 0,
+ BNXT_TF_RC_NORMAL = 1,
+ BNXT_TF_RC_FID = 2,
};
/* eth IPv4 Type */
return ulp_ctx->cfg_data->flow_db;
}
+/* Function to get the tunnel cache table info from the ulp context. */
+struct bnxt_tun_cache_entry *
+bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (!ulp_ctx || !ulp_ctx->cfg_data)
+ return NULL;
+
+ return ulp_ctx->cfg_data->tun_tbl;
+}
+
/* Function to get the ulp context from eth device. */
struct bnxt_ulp_context *
bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
#include "rte_ethdev.h"
#include "ulp_template_db_enum.h"
+#include "ulp_tun.h"
+#include "bnxt_tf_common.h"
/* NAT defines to reuse existing inner L2 SMAC and DMAC */
#define BNXT_ULP_NAT_INNER_L2_HEADER_SMAC 0x2000
struct bnxt_ulp_df_rule_info df_rule_info[RTE_MAX_ETHPORTS];
struct bnxt_ulp_vfr_rule_info vfr_rule_info[RTE_MAX_ETHPORTS];
enum bnxt_ulp_flow_mem_type mem_type;
+#define BNXT_ULP_TUN_ENTRY_INVALID -1
+#define BNXT_ULP_MAX_TUN_CACHE_ENTRIES 16
+ struct bnxt_tun_cache_entry tun_tbl[BNXT_ULP_MAX_TUN_CACHE_ENTRIES];
};
struct bnxt_ulp_context {
struct bnxt_ulp_flow_db *
bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx);
+/* Function to get the tunnel cache table info from the ulp context. */
+struct bnxt_tun_cache_entry *
+bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx);
+
/* Function to get the ulp context from eth device. */
struct bnxt_ulp_context *
bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev);
void
bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx);
+int32_t
+ulp_post_process_tun_flow(struct ulp_rte_parser_params *params);
+
#endif /* _BNXT_ULP_H_ */
void
bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
struct ulp_rte_parser_params *params,
- uint32_t priority, uint32_t class_id,
- uint32_t act_tmpl, uint16_t func_id,
- uint32_t fid,
enum bnxt_ulp_fdb_type flow_type)
{
- mapper_cparms->app_priority = priority;
- mapper_cparms->dir_attr = params->dir_attr;
-
- mapper_cparms->class_tid = class_id;
- mapper_cparms->act_tid = act_tmpl;
- mapper_cparms->func_id = func_id;
- mapper_cparms->hdr_bitmap = ¶ms->hdr_bitmap;
- mapper_cparms->hdr_field = params->hdr_field;
- mapper_cparms->comp_fld = params->comp_fld;
- mapper_cparms->act = ¶ms->act_bitmap;
- mapper_cparms->act_prop = ¶ms->act_prop;
- mapper_cparms->flow_type = flow_type;
- mapper_cparms->flow_id = fid;
+ mapper_cparms->flow_type = flow_type;
+ mapper_cparms->app_priority = params->priority;
+ mapper_cparms->dir_attr = params->dir_attr;
+ mapper_cparms->class_tid = params->class_id;
+ mapper_cparms->act_tid = params->act_tmpl;
+ mapper_cparms->func_id = params->func_id;
+ mapper_cparms->hdr_bitmap = ¶ms->hdr_bitmap;
+ mapper_cparms->hdr_field = params->hdr_field;
+ mapper_cparms->comp_fld = params->comp_fld;
+ mapper_cparms->act = ¶ms->act_bitmap;
+ mapper_cparms->act_prop = ¶ms->act_prop;
+ mapper_cparms->flow_id = params->fid;
+ mapper_cparms->parent_flow = params->parent_flow;
+ mapper_cparms->parent_fid = params->parent_fid;
}
/* Function to create the rte flow. */
struct ulp_rte_parser_params params;
struct bnxt_ulp_context *ulp_ctx;
int rc, ret = BNXT_TF_RC_ERROR;
- uint32_t class_id, act_tmpl;
struct rte_flow *flow_id;
uint16_t func_id;
uint32_t fid;
pattern, actions,
error) == BNXT_TF_RC_ERROR) {
BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
- goto parse_err1;
+ goto flow_error;
}
ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
if (!ulp_ctx) {
BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
- goto parse_err1;
+ goto flow_error;
}
/* Initialize the parser params */
dev->data->port_id,
&func_id)) {
BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
- goto parse_err1;
+ goto flow_error;
}
/* Protect flow creation */
if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
- goto parse_err1;
+ goto flow_error;
}
/* Allocate a Flow ID for attaching all resources for the flow to.
func_id, &fid);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to allocate flow table entry\n");
- goto parse_err2;
+ goto release_lock;
}
/* Parse the rte flow pattern */
ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms);
if (ret != BNXT_TF_RC_SUCCESS)
- goto parse_err3;
+ goto free_fid;
/* Parse the rte flow action */
ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms);
if (ret != BNXT_TF_RC_SUCCESS)
- goto parse_err3;
+ goto free_fid;
+ params.fid = fid;
+ params.func_id = func_id;
+ params.priority = attr->priority;
/* Perform the rte flow post process */
ret = bnxt_ulp_rte_parser_post_process(¶ms);
- if (ret != BNXT_TF_RC_SUCCESS)
- goto parse_err3;
+ if (ret == BNXT_TF_RC_ERROR)
+ goto free_fid;
+ else if (ret == BNXT_TF_RC_FID)
+ goto return_fid;
- ret = ulp_matcher_pattern_match(¶ms, &class_id);
+ ret = ulp_matcher_pattern_match(¶ms, ¶ms.class_id);
if (ret != BNXT_TF_RC_SUCCESS)
- goto parse_err3;
+ goto free_fid;
- ret = ulp_matcher_action_match(¶ms, &act_tmpl);
+ ret = ulp_matcher_action_match(¶ms, ¶ms.act_tmpl);
if (ret != BNXT_TF_RC_SUCCESS)
- goto parse_err3;
+ goto free_fid;
- bnxt_ulp_init_mapper_params(&mapper_cparms, ¶ms, attr->priority,
- class_id, act_tmpl, func_id, fid,
+ bnxt_ulp_init_mapper_params(&mapper_cparms, ¶ms,
BNXT_ULP_FDB_TYPE_REGULAR);
/* Call the ulp mapper to create the flow in the hardware. */
ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms);
if (ret)
- goto parse_err3;
+ goto free_fid;
+return_fid:
bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
flow_id = (struct rte_flow *)((uintptr_t)fid);
return flow_id;
-parse_err3:
+free_fid:
ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid);
-parse_err2:
+release_lock:
bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
-parse_err1:
+flow_error:
rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to create flow.");
return NULL;
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct ulp_rte_parser_params params;
+ struct ulp_rte_parser_params params;
+ struct bnxt_ulp_context *ulp_ctx;
uint32_t class_id, act_tmpl;
int ret = BNXT_TF_RC_ERROR;
- struct bnxt_ulp_context *ulp_ctx;
if (bnxt_ulp_flow_validate_args(attr,
pattern, actions,
/* Perform the rte flow post process */
ret = bnxt_ulp_rte_parser_post_process(¶ms);
- if (ret != BNXT_TF_RC_SUCCESS)
+ if (ret == BNXT_TF_RC_ERROR)
goto parse_error;
+ else if (ret == BNXT_TF_RC_FID)
+ return 0;
ret = ulp_matcher_pattern_match(¶ms, &class_id);
struct rte_flow *flow,
struct rte_flow_error *error)
{
- int ret = 0;
struct bnxt_ulp_context *ulp_ctx;
uint32_t flow_id;
uint16_t func_id;
+ int ret;
ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
if (!ulp_ctx) {
#include "ulp_mapper.h"
#include "ulp_flow_db.h"
#include "ulp_fc_mgr.h"
+#include "ulp_tun.h"
#define ULP_FLOW_DB_RES_DIR_BIT 31
#define ULP_FLOW_DB_RES_DIR_MASK 0x80000000
}
}
+/* internal validation function for parent flow tbl */
+static struct bnxt_ulp_flow_db *
+ulp_flow_db_parent_arg_validation(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t fid)
+{
+ struct bnxt_ulp_flow_db *flow_db;
+
+ flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
+ if (!flow_db) {
+ BNXT_TF_DBG(ERR, "Invalid Arguments\n");
+ return NULL;
+ }
+
+ /* check for max flows */
+ if (fid >= flow_db->flow_tbl.num_flows || !fid) {
+ BNXT_TF_DBG(ERR, "Invalid flow index\n");
+ return NULL;
+ }
+
+ /* No support for parent child db then just exit */
+ if (!flow_db->parent_child_db.entries_count) {
+ BNXT_TF_DBG(ERR, "parent child db not supported\n");
+ return NULL;
+ }
+
+ return flow_db;
+}
+
+/*
+ * Set the tunnel index in the parent flow
+ *
+ * ulp_ctxt [in] Ptr to ulp_context
+ * parent_idx [in] The parent index of the parent flow entry
+ *
+ * returns index on success and negative on failure.
+ */
+static int32_t
+ulp_flow_db_parent_tun_idx_set(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t parent_idx, uint8_t tun_idx)
+{
+ struct bnxt_ulp_flow_db *flow_db;
+ struct ulp_fdb_parent_child_db *p_pdb;
+
+ flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
+ if (!flow_db) {
+ BNXT_TF_DBG(ERR, "Invalid Arguments\n");
+ return -EINVAL;
+ }
+
+ /* check for parent idx validity */
+ p_pdb = &flow_db->parent_child_db;
+ if (parent_idx >= p_pdb->entries_count ||
+ !p_pdb->parent_flow_tbl[parent_idx].parent_fid) {
+ BNXT_TF_DBG(ERR, "Invalid parent flow index %x\n", parent_idx);
+ return -EINVAL;
+ }
+
+ p_pdb->parent_flow_tbl[parent_idx].tun_idx = tun_idx;
+ return 0;
+}
+
+/*
+ * Get the tunnel index from the parent flow
+ *
+ * ulp_ctxt [in] Ptr to ulp_context
+ * parent_fid [in] The flow id of the parent flow entry
+ *
+ * returns 0 if counter accum is set else -1.
+ */
+static int32_t
+ulp_flow_db_parent_tun_idx_get(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t parent_fid, uint8_t *tun_idx)
+{
+ struct bnxt_ulp_flow_db *flow_db;
+ struct ulp_fdb_parent_child_db *p_pdb;
+ uint32_t idx;
+
+ /* validate the arguments */
+ flow_db = ulp_flow_db_parent_arg_validation(ulp_ctxt, parent_fid);
+ if (!flow_db) {
+ BNXT_TF_DBG(ERR, "parent child db validation failed\n");
+ return -EINVAL;
+ }
+
+ p_pdb = &flow_db->parent_child_db;
+ for (idx = 0; idx < p_pdb->entries_count; idx++) {
+ if (p_pdb->parent_flow_tbl[idx].parent_fid == parent_fid) {
+ *tun_idx = p_pdb->parent_flow_tbl[idx].tun_idx;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
/*
* Initialize the flow database. Memory is allocated in this
* call and assigned to the flow database.
struct bnxt_ulp_flow_tbl *flow_tbl;
struct ulp_fdb_resource_info *nxt_resource, *fid_resource;
uint32_t nxt_idx = 0;
+ struct bnxt_tun_cache_entry *tun_tbl;
+ uint8_t tun_idx = 0;
+ int rc;
flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
if (!flow_db) {
params->resource_hndl);
}
+ if (params->resource_func == BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW) {
+ tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(ulp_ctxt);
+ if (!tun_tbl)
+ return -EINVAL;
+
+ rc = ulp_flow_db_parent_tun_idx_get(ulp_ctxt, fid, &tun_idx);
+ if (rc)
+ return rc;
+
+ ulp_clear_tun_entry(tun_tbl, tun_idx);
+ }
+
/* all good, return success */
return 0;
}
return 0;
}
-/* internal validation function for parent flow tbl */
-static struct bnxt_ulp_flow_db *
-ulp_flow_db_parent_arg_validation(struct bnxt_ulp_context *ulp_ctxt,
- uint32_t fid)
-{
- struct bnxt_ulp_flow_db *flow_db;
-
- flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
- if (!flow_db) {
- BNXT_TF_DBG(ERR, "Invalid Arguments\n");
- return NULL;
- }
-
- /* check for max flows */
- if (fid >= flow_db->flow_tbl.num_flows || !fid) {
- BNXT_TF_DBG(ERR, "Invalid flow index\n");
- return NULL;
- }
-
- /* No support for parent child db then just exit */
- if (!flow_db->parent_child_db.entries_count) {
- BNXT_TF_DBG(ERR, "parent child db not supported\n");
- return NULL;
- }
-
- return flow_db;
-}
-
/*
* Allocate the entry in the parent-child database
*
struct ulp_flow_db_res_params fid_parms;
uint32_t sub_type = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT_ACC;
struct ulp_flow_db_res_params res_params;
- int32_t fid_idx;
+ int32_t fid_idx, rc;
/* create the child flow entry in parent flow table */
fid_idx = ulp_flow_db_parent_flow_alloc(parms->ulp_ctx, parms->fid);
return -1;
}
}
+
+ rc = ulp_flow_db_parent_tun_idx_set(parms->ulp_ctx, fid_idx,
+ parms->tun_idx);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Error setting tun_idx in the parent flow\n");
+ return rc;
+ }
+
return 0;
}
uint64_t pkt_count;
uint64_t byte_count;
uint64_t *child_fid_bitset;
+ uint32_t f2_cnt;
+ uint8_t tun_idx;
};
/* Structure to maintain parent-child flow relationships */
parms.parent_flow = cparms->parent_flow;
parms.parent_fid = cparms->parent_fid;
parms.fid = cparms->flow_id;
+ parms.tun_idx = cparms->tun_idx;
/* Get the device id from the ulp context */
if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &parms.dev_id)) {
struct bnxt_ulp_device_params *device_params;
uint32_t parent_fid;
uint32_t parent_flow;
+ uint8_t tun_idx;
};
struct bnxt_ulp_mapper_create_parms {
uint32_t parent_fid;
/* if set then create a parent flow */
uint32_t parent_flow;
+ uint8_t tun_idx;
};
/* Function to initialize any dynamic mapper data. */
#include "bnxt.h"
#include "ulp_template_db_enum.h"
#include "ulp_template_struct.h"
+#include "bnxt_ulp.h"
#include "bnxt_tf_common.h"
#include "ulp_rte_parser.h"
+#include "ulp_matcher.h"
#include "ulp_utils.h"
#include "tfp.h"
#include "ulp_port_db.h"
+#include "ulp_flow_db.h"
+#include "ulp_mapper.h"
+#include "ulp_tun.h"
/* Local defines for the parsing functions */
#define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
}
}
-/*
- * Function to handle the post processing of the parsing details
- */
-int32_t
-bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
+static int32_t
+ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
{
- enum bnxt_ulp_direction_type dir;
enum bnxt_ulp_intf_type match_port_type, act_port_type;
+ enum bnxt_ulp_direction_type dir;
uint32_t act_port_set;
/* Get the computed details */
return 0;
}
+/*
+ * Function to handle the post processing of the parsing details
+ */
+int32_t
+bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
+{
+ ulp_post_process_normal_flow(params);
+ return ulp_post_process_tun_flow(params);
+}
+
/*
* Function to compute the flow direction based on the match port details
*/
params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
/* Update the protocol hdr bitmap */
- if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
+ if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_ETH) ||
+ ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_IPV4) ||
+ ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_IPV6) ||
+ ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_UDP) ||
+ ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_TCP)) {
ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
inner_flag = 1;
} else {
return BNXT_TF_RC_ERROR;
}
+ if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_ETH) &&
+ !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_I_ETH)) {
+ /* Since F2 flow does not include eth item, when parser detects
+ * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
+ * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
+ * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
+ * This will allow the parser post processor to update the
+ * t_dmac in hdr_field[o_eth.dmac]
+ */
+ idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
+ BNXT_ULP_PROTO_HDR_VLAN_NUM);
+ params->field_idx = idx;
+ }
+
/*
* Copy the rte_flow_item for ipv4 into hdr_field using ipv4
* header fields
return BNXT_TF_RC_ERROR;
}
+ if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_ETH) &&
+ !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_I_ETH)) {
+ /* Since F2 flow does not include eth item, when parser detects
+ * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
+ * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
+ * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
+ * This will allow the parser post processor to update the
+ * t_dmac in hdr_field[o_eth.dmac]
+ */
+ idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
+ BNXT_ULP_PROTO_HDR_VLAN_NUM);
+ params->field_idx = idx;
+ }
+
/*
* Copy the rte_flow_item for ipv6 into hdr_field using ipv6
* header fields
ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
uint16_t dst_port)
{
- if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
+ if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
ULP_BITMAP_SET(param->hdr_fp_bit.bits,
BNXT_ULP_HDR_BIT_T_VXLAN);
+ ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ }
}
/* Function to handle the parsing of RTE Flow item UDP Header. */
field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
&udp_spec->hdr.src_port,
size);
+
size = sizeof(udp_spec->hdr.dst_port);
field = ulp_rte_parser_fld_copy(field,
&udp_spec->hdr.dst_port,
/* update the hdr_bitmap with vxlan */
ULP_BITMAP_SET(params->act_bitmap.bits,
BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
+ /* Update computational field with tunnel decap info */
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
return BNXT_TF_RC_SUCCESS;
}
#include "ulp_template_db_enum.h"
#include "ulp_template_struct.h"
#include "ulp_mapper.h"
+#include "bnxt_tf_common.h"
/* defines to be used in the tunnel header parsing */
#define BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS 2
void
bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
struct ulp_rte_parser_params *params,
- uint32_t priority, uint32_t class_id,
- uint32_t act_tmpl, uint16_t func_id,
- uint32_t flow_id,
enum bnxt_ulp_fdb_type flow_type);
/* Function to handle the parsing of the RTE port id. */
BNXT_ULP_CF_IDX_L4_HDR_CNT = 41,
BNXT_ULP_CF_IDX_VFR_MODE = 42,
BNXT_ULP_CF_IDX_LOOPBACK_PARIF = 43,
- BNXT_ULP_CF_IDX_LAST = 44
+ BNXT_ULP_CF_IDX_L3_TUN = 44,
+ BNXT_ULP_CF_IDX_L3_TUN_DECAP = 45,
+ BNXT_ULP_CF_IDX_LAST = 46
};
enum bnxt_ulp_cond_opcode {
struct ulp_rte_act_bitmap act_bitmap;
struct ulp_rte_act_prop act_prop;
uint32_t dir_attr;
+ uint32_t priority;
+ uint32_t fid;
+ uint32_t parent_flow;
+ uint32_t parent_fid;
+ uint16_t func_id;
+ uint32_t class_id;
+ uint32_t act_tmpl;
struct bnxt_ulp_context *ulp_ctx;
};
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <rte_malloc.h>
+
+#include "ulp_tun.h"
+#include "ulp_rte_parser.h"
+#include "ulp_template_db_enum.h"
+#include "ulp_template_struct.h"
+#include "ulp_matcher.h"
+#include "ulp_mapper.h"
+#include "ulp_flow_db.h"
+
+/* This function programs the outer tunnel flow in the hardware. */
+static int32_t
+ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
+ struct bnxt_tun_cache_entry *tun_entry,
+ uint16_t tun_idx)
+{
+ struct bnxt_ulp_mapper_create_parms mparms = { 0 };
+ int ret;
+
+ /* Reset the JUMP action bit in the action bitmap as we don't
+ * offload this action.
+ */
+ ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
+
+ ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
+
+ ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto err;
+
+ ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ goto err;
+
+ params->parent_flow = true;
+ bnxt_ulp_init_mapper_params(&mparms, params,
+ BNXT_ULP_FDB_TYPE_REGULAR);
+ mparms.tun_idx = tun_idx;
+
+ /* Call the ulp mapper to create the flow in the hardware. */
+ ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
+ if (ret)
+ goto err;
+
+ /* Store the tunnel dmac in the tunnel cache table and use it while
+ * programming tunnel flow F2.
+ */
+ memcpy(tun_entry->t_dmac,
+ ¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
+ RTE_ETHER_ADDR_LEN);
+
+ tun_entry->valid = true;
+ tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
+ tun_entry->outer_tun_flow_id = params->fid;
+
+ /* F1 and it's related F2s are correlated based on
+ * Tunnel Destination IP Address.
+ */
+ if (tun_entry->t_dst_ip_valid)
+ goto done;
+ if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
+ memcpy(&tun_entry->t_dst_ip,
+ ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
+ sizeof(rte_be32_t));
+ else
+ memcpy(tun_entry->t_dst_ip6,
+ ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
+ sizeof(tun_entry->t_dst_ip6));
+ tun_entry->t_dst_ip_valid = true;
+
+done:
+ return BNXT_TF_RC_FID;
+
+err:
+ memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
+ return BNXT_TF_RC_ERROR;
+}
+
+/* This function programs the inner tunnel flow in the hardware. */
+static void
+ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry)
+{
+ struct bnxt_ulp_mapper_create_parms mparms = { 0 };
+ struct ulp_rte_parser_params *params;
+ int ret;
+
+ /* F2 doesn't have tunnel dmac, use the tunnel dmac that was
+ * stored during F1 programming.
+ */
+ params = &tun_entry->first_inner_tun_params;
+ memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
+ tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
+ params->parent_fid = tun_entry->outer_tun_flow_id;
+ params->fid = tun_entry->first_inner_tun_flow_id;
+
+ bnxt_ulp_init_mapper_params(&mparms, params,
+ BNXT_ULP_FDB_TYPE_REGULAR);
+
+ ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
+}
+
+/* This function either install outer tunnel flow & inner tunnel flow
+ * or just the outer tunnel flow based on the flow state.
+ */
+static int32_t
+ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
+ struct bnxt_tun_cache_entry *tun_entry,
+ uint16_t tun_idx)
+{
+ enum bnxt_ulp_tun_flow_state flow_state;
+ int ret;
+
+ flow_state = tun_entry->state;
+ ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
+ if (ret)
+ return ret;
+
+ /* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
+ * F1, that means F2 is not deferred. Hence, no need to install F2.
+ */
+ if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
+ ulp_install_inner_tun_flow(tun_entry);
+
+ return 0;
+}
+
+/* This function will be called if inner tunnel flow request comes before
+ * outer tunnel flow request.
+ */
+static int32_t
+ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
+ struct bnxt_tun_cache_entry *tun_entry)
+{
+ int ret;
+
+ ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ return BNXT_TF_RC_ERROR;
+
+ ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
+ if (ret != BNXT_TF_RC_SUCCESS)
+ return BNXT_TF_RC_ERROR;
+
+ /* If Tunnel F2 flow comes first then we can't install it in the
+ * hardware, because, F2 flow will not have L2 context information.
+ * So, just cache the F2 information and program it in the context
+ * of F1 flow installation.
+ */
+ memcpy(&tun_entry->first_inner_tun_params, params,
+ sizeof(struct ulp_rte_parser_params));
+
+ tun_entry->first_inner_tun_flow_id = params->fid;
+ tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
+
+ /* F1 and it's related F2s are correlated based on
+ * Tunnel Destination IP Address. It could be already set, if
+ * the inner flow got offloaded first.
+ */
+ if (tun_entry->t_dst_ip_valid)
+ goto done;
+ if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
+ memcpy(&tun_entry->t_dst_ip,
+ ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
+ sizeof(rte_be32_t));
+ else
+ memcpy(tun_entry->t_dst_ip6,
+ ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
+ sizeof(tun_entry->t_dst_ip6));
+ tun_entry->t_dst_ip_valid = true;
+
+done:
+ return BNXT_TF_RC_FID;
+}
+
+/* This function will be called if inner tunnel flow request comes after
+ * the outer tunnel flow request.
+ */
+static int32_t
+ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
+ struct bnxt_tun_cache_entry *tun_entry)
+{
+ memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
+ tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
+
+ params->parent_fid = tun_entry->outer_tun_flow_id;
+
+ return BNXT_TF_RC_NORMAL;
+}
+
+static int32_t
+ulp_get_tun_entry(struct ulp_rte_parser_params *params,
+ struct bnxt_tun_cache_entry **tun_entry,
+ uint16_t *tun_idx)
+{
+ int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
+ struct bnxt_tun_cache_entry *tun_tbl;
+ bool tun_entry_found = false, free_entry_found = false;
+
+ tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
+ if (!tun_tbl)
+ return BNXT_TF_RC_ERROR;
+
+ for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
+ if (!memcmp(&tun_tbl[i].t_dst_ip,
+ ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
+ sizeof(rte_be32_t)) ||
+ !memcmp(&tun_tbl[i].t_dst_ip6,
+ ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
+ 16)) {
+ tun_entry_found = true;
+ break;
+ }
+
+ if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
+ first_free_entry = i;
+ free_entry_found = true;
+ }
+ }
+
+ if (tun_entry_found) {
+ *tun_entry = &tun_tbl[i];
+ *tun_idx = i;
+ } else {
+ if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
+ return BNXT_TF_RC_ERROR;
+ *tun_entry = &tun_tbl[first_free_entry];
+ *tun_idx = first_free_entry;
+ }
+
+ return 0;
+}
+
+int32_t
+ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
+{
+ bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
+ bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
+ enum bnxt_ulp_tun_flow_state flow_state;
+ struct bnxt_tun_cache_entry *tun_entry;
+ uint32_t l3_tun, l3_tun_decap;
+ uint16_t tun_idx;
+ int rc;
+
+ /* Computational fields that indicate it's a TUNNEL DECAP flow */
+ l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
+ l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_L3_TUN_DECAP);
+ if (!l3_tun)
+ return BNXT_TF_RC_NORMAL;
+
+ rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
+ if (rc == BNXT_TF_RC_ERROR)
+ return rc;
+
+ flow_state = tun_entry->state;
+ /* Outer tunnel flow validation */
+ outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
+ outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
+ outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
+ outer_tun_sig);
+
+ /* Inner tunnel flow validation */
+ inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
+ first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
+ inner_tun_sig);
+ inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
+ inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
+ inner_tun_sig);
+
+ if (outer_tun_reject) {
+ tun_entry->outer_tun_rej_cnt++;
+ BNXT_TF_DBG(ERR,
+ "Tunnel F1 flow rejected, COUNT: %d\n",
+ tun_entry->outer_tun_rej_cnt);
+ /* Inner tunnel flow is rejected if it comes between first inner
+ * tunnel flow and outer flow requests.
+ */
+ } else if (inner_tun_reject) {
+ tun_entry->inner_tun_rej_cnt++;
+ BNXT_TF_DBG(ERR,
+ "Tunnel F2 flow rejected, COUNT: %d\n",
+ tun_entry->inner_tun_rej_cnt);
+ }
+
+ if (outer_tun_reject || inner_tun_reject)
+ return BNXT_TF_RC_ERROR;
+ else if (first_inner_tun_flow)
+ return ulp_post_process_first_inner_tun_flow(params, tun_entry);
+ else if (outer_tun_flow)
+ return ulp_post_process_outer_tun_flow(params, tun_entry,
+ tun_idx);
+ else if (inner_tun_flow)
+ return ulp_post_process_inner_tun_flow(params, tun_entry);
+ else
+ return BNXT_TF_RC_NORMAL;
+}
+
+void
+ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
+{
+ memset(&tun_tbl[tun_idx], 0,
+ sizeof(struct bnxt_tun_cache_entry));
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_TUN_H_
+#define _BNXT_TUN_H_
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include "rte_ethdev.h"
+
+#include "ulp_template_db_enum.h"
+#include "ulp_template_struct.h"
+
+#define BNXT_OUTER_TUN_SIGNATURE(l3_tun, params) \
+ ((l3_tun) && \
+ ULP_BITMAP_ISSET((params)->act_bitmap.bits, \
+ BNXT_ULP_ACTION_BIT_JUMP))
+#define BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params) \
+ ((l3_tun) && (l3_tun_decap) && \
+ !ULP_BITMAP_ISSET((params)->hdr_bitmap.bits, \
+ BNXT_ULP_HDR_BIT_O_ETH))
+
+#define BNXT_FIRST_INNER_TUN_FLOW(state, inner_tun_sig) \
+ ((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
+#define BNXT_INNER_TUN_FLOW(state, inner_tun_sig) \
+ ((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
+#define BNXT_OUTER_TUN_FLOW(outer_tun_sig) ((outer_tun_sig))
+
+/* It is invalid to get another outer flow offload request
+ * for the same tunnel, while the outer flow is already offloaded.
+ */
+#define BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig) \
+ ((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
+/* It is invalid to get another inner flow offload request
+ * for the same tunnel, while the outer flow is not yet offloaded.
+ */
+#define BNXT_REJECT_INNER_TUN_FLOW(state, inner_tun_sig) \
+ ((state) == BNXT_ULP_FLOW_STATE_TUN_I_CACHED && (inner_tun_sig))
+
+#define ULP_TUN_O_DMAC_HDR_FIELD_INDEX 1
+#define ULP_TUN_O_IPV4_DIP_INDEX 19
+#define ULP_TUN_O_IPV6_DIP_INDEX 17
+
+/* When a flow offload request comes the following state transitions
+ * happen based on the order in which the outer & inner flow offload
+ * requests arrive.
+ *
+ * If inner tunnel flow offload request arrives first then the flow
+ * state will change from BNXT_ULP_FLOW_STATE_NORMAL to
+ * BNXT_ULP_FLOW_STATE_TUN_I_CACHED and the following outer tunnel
+ * flow offload request will change the state of the flow to
+ * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from BNXT_ULP_FLOW_STATE_TUN_I_CACHED.
+ *
+ * If outer tunnel flow offload request arrives first then the flow state
+ * will change from BNXT_ULP_FLOW_STATE_NORMAL to
+ * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD.
+ *
+ * Once the flow state is in BNXT_ULP_FLOW_STATE_TUN_O_OFFLD, any inner
+ * tunnel flow offload requests after that point will be treated as a
+ * normal flow and the tunnel flow state remains in
+ * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD
+ */
+enum bnxt_ulp_tun_flow_state {
+ BNXT_ULP_FLOW_STATE_NORMAL = 0,
+ BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
+ BNXT_ULP_FLOW_STATE_TUN_I_CACHED
+};
+
+struct bnxt_tun_cache_entry {
+ enum bnxt_ulp_tun_flow_state state;
+ bool valid;
+ bool t_dst_ip_valid;
+ uint8_t t_dmac[RTE_ETHER_ADDR_LEN];
+ union {
+ rte_be32_t t_dst_ip;
+ uint8_t t_dst_ip6[16];
+ };
+ uint32_t outer_tun_flow_id;
+ uint32_t first_inner_tun_flow_id;
+ uint16_t outer_tun_rej_cnt;
+ uint16_t inner_tun_rej_cnt;
+ struct ulp_rte_parser_params first_inner_tun_params;
+};
+
+void
+ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);
+
+#endif