memcpy(buffer, &temp_val, sizeof(uint32_t));
}
+/* Utility function to skip the void items. */
+static inline int32_t
+ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
+{
+ if (!*item)
+ return 0;
+ if (increment)
+ (*item)++;
+ while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
+ (*item)++;
+ if (*item)
+ return 1;
+ return 0;
+}
+
/*
* Function to handle the parsing of RTE Flows and placing
* the RTE flow items into the ulp structures.
return BNXT_TF_RC_SUCCESS;
}
+/*
+ * Function to handle the parsing of RTE Flows and placing
+ * the RTE flow actions into the ulp structures.
+ */
+int32_t
+bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
+ struct ulp_rte_act_bitmap *act_bitmap,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action *action_item = actions;
+ struct bnxt_ulp_rte_act_info *hdr_info;
+
+ /* Parse all the items in the pattern */
+ while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
+ /* get the header information from the flow_hdr_info table */
+ hdr_info = &ulp_act_info[action_item->type];
+ if (hdr_info->act_type ==
+ BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
+ BNXT_TF_DBG(ERR,
+ "Truflow parser does not support act %u\n",
+ action_item->type);
+ return BNXT_TF_RC_ERROR;
+ } else if (hdr_info->act_type ==
+ BNXT_ULP_ACT_TYPE_SUPPORTED) {
+ /* call the registered callback handler */
+ if (hdr_info->proto_act_func) {
+ if (hdr_info->proto_act_func(action_item,
+ act_bitmap,
+ act_prop) !=
+ BNXT_TF_RC_SUCCESS) {
+ return BNXT_TF_RC_ERROR;
+ }
+ }
+ }
+ action_item++;
+ }
+ return BNXT_TF_RC_SUCCESS;
+}
+
/* Function to handle the parsing of RTE Flow item PF Header. */
static int32_t
ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap,
{
return BNXT_TF_RC_SUCCESS;
}
+
+/* Function to handle the parsing of RTE Flow action void Header. */
+int32_t
+ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
+ struct ulp_rte_act_bitmap *act __rte_unused,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+{
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action Mark Header. */
+int32_t
+ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action_mark *mark;
+ uint32_t mark_id = 0;
+
+ mark = action_item->conf;
+ if (mark) {
+ mark_id = tfp_cpu_to_be_32(mark->id);
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
+ &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
+
+ /* Update the hdr_bitmap with vxlan */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
+ return BNXT_TF_RC_SUCCESS;
+ }
+ BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+}
+
+/* Function to handle the parsing of RTE Flow action RSS Header. */
+int32_t
+ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+{
+ const struct rte_flow_action_rss *rss;
+
+ rss = action_item->conf;
+ if (rss) {
+ /* Update the hdr_bitmap with vxlan */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_RSS);
+ return BNXT_TF_RC_SUCCESS;
+ }
+ BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+}
+
+/* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
+int32_t
+ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *ap)
+{
+ const struct rte_flow_action_vxlan_encap *vxlan_encap;
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ struct rte_flow_item_vxlan vxlan_spec;
+ uint32_t vlan_num = 0, vlan_size = 0;
+ uint32_t ip_size = 0, ip_type = 0;
+ uint32_t vxlan_size = 0;
+ uint8_t *buff;
+ /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
+ const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x40, 0x11};
+
+ vxlan_encap = action_item->conf;
+ if (!vxlan_encap) {
+ BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ item = vxlan_encap->definition;
+ if (!item) {
+ BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ if (!ulp_rte_item_skip_void(&item, 0))
+ return BNXT_TF_RC_ERROR;
+
+ /* must have ethernet header */
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ eth_spec = item->spec;
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
+ ulp_encap_buffer_copy(buff,
+ eth_spec->dst.addr_bytes,
+ BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
+
+ /* Goto the next item */
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+
+ /* May have vlan header */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_num++;
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
+ ulp_encap_buffer_copy(buff,
+ item->spec,
+ sizeof(struct rte_flow_item_vlan));
+
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+ }
+
+ /* may have two vlan headers */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_num++;
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
+ sizeof(struct rte_flow_item_vlan)],
+ item->spec,
+ sizeof(struct rte_flow_item_vlan));
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+ }
+ /* Update the vlan count and size of more than one */
+ if (vlan_num) {
+ vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
+ vlan_num = tfp_cpu_to_be_32(vlan_num);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
+ &vlan_num,
+ sizeof(uint32_t));
+ vlan_size = tfp_cpu_to_be_32(vlan_size);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
+ &vlan_size,
+ sizeof(uint32_t));
+ }
+
+ /* L3 must be IPv4, IPv6 */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ ipv4_spec = item->spec;
+ ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
+
+ /* copy the ipv4 details */
+ if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
+ ulp_encap_buffer_copy(buff,
+ def_ipv4_hdr,
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
+ BNXT_ULP_ENCAP_IPV4_ID_PROTO);
+ } else {
+ const uint8_t *tmp_buff;
+
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
+ ulp_encap_buffer_copy(buff,
+ &ipv4_spec->hdr.version_ihl,
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
+ tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
+ ulp_encap_buffer_copy(buff,
+ tmp_buff,
+ BNXT_ULP_ENCAP_IPV4_ID_PROTO);
+ }
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
+ BNXT_ULP_ENCAP_IPV4_ID_PROTO];
+ ulp_encap_buffer_copy(buff,
+ (const uint8_t *)&ipv4_spec->hdr.dst_addr,
+ BNXT_ULP_ENCAP_IPV4_DEST_IP);
+
+ /* Update the ip size details */
+ ip_size = tfp_cpu_to_be_32(ip_size);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
+ &ip_size, sizeof(uint32_t));
+
+ /* update the ip type */
+ ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
+ &ip_type, sizeof(uint32_t));
+
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ ipv6_spec = item->spec;
+ ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
+
+ /* copy the ipv4 details */
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
+ ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
+
+ /* Update the ip size details */
+ ip_size = tfp_cpu_to_be_32(ip_size);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
+ &ip_size, sizeof(uint32_t));
+
+ /* update the ip type */
+ ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
+ &ip_type, sizeof(uint32_t));
+
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+ } else {
+ BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ /* L4 is UDP */
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+ BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ /* copy the udp details */
+ ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
+ item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
+
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+
+ /* Finally VXLAN */
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ vxlan_size = sizeof(struct rte_flow_item_vxlan);
+ /* copy the vxlan details */
+ memcpy(&vxlan_spec, item->spec, vxlan_size);
+ vxlan_spec.flags = 0x08;
+ ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
+ (const uint8_t *)&vxlan_spec,
+ vxlan_size);
+ vxlan_size = tfp_cpu_to_be_32(vxlan_size);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
+ &vxlan_size, sizeof(uint32_t));
+
+ /*update the hdr_bitmap with vxlan */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action vxlan_encap Header */
+int32_t
+ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
+ __rte_unused,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+{
+ /* update the hdr_bitmap with vxlan */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action drop Header. */
+int32_t
+ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+{
+ /* Update the hdr_bitmap with drop */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_DROP);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action count. */
+int32_t
+ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+
+{
+ const struct rte_flow_action_count *act_count;
+
+ act_count = action_item->conf;
+ if (act_count) {
+ if (act_count->shared) {
+ BNXT_TF_DBG(ERR,
+ "Parse Error:Shared count not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
+ &act_count->id,
+ BNXT_ULP_ACT_PROP_SZ_COUNT);
+ }
+
+ /* Update the hdr_bitmap with count */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_COUNT);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action PF. */
+int32_t
+ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ uint8_t *svif_buf;
+ uint8_t *vnic_buffer;
+ uint32_t svif;
+
+ /* Update the hdr_bitmap with vnic bit */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
+
+ /* copy the PF of the current device into VNIC Property */
+ svif_buf = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
+ ulp_util_field_int_read(svif_buf, &svif);
+ vnic_buffer = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
+ ulp_util_field_int_write(vnic_buffer, svif);
+
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action VF. */
+int32_t
+ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action_vf *vf_action;
+
+ vf_action = action_item->conf;
+ if (vf_action) {
+ if (vf_action->original) {
+ BNXT_TF_DBG(ERR,
+ "Parse Error:VF Original not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ /* TBD: Update the computed VNIC using VF conversion */
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
+ &vf_action->id,
+ BNXT_ULP_ACT_PROP_SZ_VNIC);
+ }
+
+ /* Update the hdr_bitmap with count */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action port_id. */
+int32_t
+ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action_port_id *port_id;
+
+ port_id = act_item->conf;
+ if (port_id) {
+ if (port_id->original) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:Portid Original not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ /* TBD: Update the computed VNIC using port conversion */
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
+ &port_id->id,
+ BNXT_ULP_ACT_PROP_SZ_VNIC);
+ }
+
+ /* Update the hdr_bitmap with count */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action phy_port. */
+int32_t
+ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action_phy_port *phy_port;
+
+ phy_port = action_item->conf;
+ if (phy_port) {
+ if (phy_port->original) {
+ BNXT_TF_DBG(ERR,
+ "Parse Err:Port Original not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
+ &phy_port->index,
+ BNXT_ULP_ACT_PROP_SZ_VPORT);
+ }
+
+ /* Update the hdr_bitmap with count */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VPORT);
+ return BNXT_TF_RC_SUCCESS;
+}
#include "ulp_template_db.h"
#include "ulp_template_struct.h"
+/* defines to be used in the tunnel header parsing */
+#define BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS 2
+#define BNXT_ULP_ENCAP_IPV4_ID_PROTO 6
+#define BNXT_ULP_ENCAP_IPV4_DEST_IP 4
+#define BNXT_ULP_ENCAP_IPV4_SIZE 12
+#define BNXT_ULP_ENCAP_IPV6_SIZE 8
+#define BNXT_ULP_ENCAP_UDP_SIZE 4
+
/*
* Function to handle the parsing of RTE Flows and placing
* the RTE flow items into the ulp structures.
struct ulp_rte_hdr_bitmap *hdr_bitmap,
struct ulp_rte_hdr_field *hdr_field);
+/*
+ * Function to handle the parsing of RTE Flows and placing
+ * the RTE flow actions into the ulp structures.
+ */
+int32_t
+bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
+ struct ulp_rte_act_bitmap *act_bitmap,
+ struct ulp_rte_act_prop *act_prop);
+
/* Function to handle the parsing of RTE Flow item PF Header. */
int32_t
ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
uint32_t *field_idx,
uint32_t *vlan_idx);
-/* Function to handle the parsing of RTE Flow item port id Header. */
+/* Function to handle the parsing of RTE Flow item port Header. */
int32_t
ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
struct ulp_rte_hdr_bitmap *hdr_bitmap,
uint32_t *field_idx,
uint32_t *vlan_idx);
+/* Function to handle the parsing of RTE Flow action void Header. */
+int32_t
+ulp_rte_void_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action RSS Header. */
+int32_t
+ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action Mark Header. */
+int32_t
+ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
+int32_t
+ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
+int32_t
+ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action drop Header. */
+int32_t
+ulp_rte_drop_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action count. */
+int32_t
+ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action PF. */
+int32_t
+ulp_rte_pf_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action VF. */
+int32_t
+ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
+/* Function to handle the parsing of RTE Flow action port_id. */
+int32_t
+ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_p);
+
+/* Function to handle the parsing of RTE Flow action phy_port. */
+int32_t
+ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop);
+
#endif /* _ULP_RTE_PARSER_H_ */
BNXT_ULP_ACT_PROP_SZ_LAST
};
+struct bnxt_ulp_rte_act_info ulp_act_info[] = {
+ [RTE_FLOW_ACTION_TYPE_END] = {
+ .act_type = BNXT_ULP_ACT_TYPE_END,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_VOID] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_void_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_PASSTHRU] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_JUMP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_MARK] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_mark_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_FLAG] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_QUEUE] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_DROP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_drop_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_COUNT] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_count_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_RSS] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_rss_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_PF] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_pf_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_VF] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_vf_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_PHY_PORT] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_phy_port_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_PORT_ID] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_port_id_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_METER] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SECURITY] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_SET_MPLS_TTL] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_DEC_MPLS_TTL] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_SET_NW_TTL] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_OUT] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_IN] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_POP_MPLS] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_vxlan_encap_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_vxlan_decap_act_handler
+ },
+ [RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_RAW_ENCAP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_RAW_DECAP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_IPV4_DST] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_IPV6_DST] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_TP_SRC] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_TP_DST] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_MAC_SWAP] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_DEC_TTL] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_TTL] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_MAC_SRC] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_SET_MAC_DST] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_INC_TCP_ACK] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ },
+ [RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK] = {
+ .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
+ .proto_act_func = NULL
+ }
+};
+
struct bnxt_ulp_device_params ulp_device_params[] = {
[BNXT_ULP_DEVICE_ID_WH_PLUS] = {
.global_fid_enable = BNXT_ULP_SYM_YES,