+
+/* Function to handle the parsing of RTE Flow action void Header. */
+int32_t
+ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
+ struct ulp_rte_act_bitmap *act __rte_unused,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+{
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action Mark Header. */
+int32_t
+ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action_mark *mark;
+ uint32_t mark_id = 0;
+
+ mark = action_item->conf;
+ if (mark) {
+ mark_id = tfp_cpu_to_be_32(mark->id);
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
+ &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
+
+ /* Update the hdr_bitmap with vxlan */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
+ return BNXT_TF_RC_SUCCESS;
+ }
+ BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+}
+
+/* Function to handle the parsing of RTE Flow action RSS Header. */
+int32_t
+ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+{
+ const struct rte_flow_action_rss *rss;
+
+ rss = action_item->conf;
+ if (rss) {
+ /* Update the hdr_bitmap with vxlan */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_RSS);
+ return BNXT_TF_RC_SUCCESS;
+ }
+ BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+}
+
+/* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
+int32_t
+ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *ap)
+{
+ const struct rte_flow_action_vxlan_encap *vxlan_encap;
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ struct rte_flow_item_vxlan vxlan_spec;
+ uint32_t vlan_num = 0, vlan_size = 0;
+ uint32_t ip_size = 0, ip_type = 0;
+ uint32_t vxlan_size = 0;
+ uint8_t *buff;
+ /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
+ const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x40, 0x11};
+
+ vxlan_encap = action_item->conf;
+ if (!vxlan_encap) {
+ BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ item = vxlan_encap->definition;
+ if (!item) {
+ BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ if (!ulp_rte_item_skip_void(&item, 0))
+ return BNXT_TF_RC_ERROR;
+
+ /* must have ethernet header */
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ eth_spec = item->spec;
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
+ ulp_encap_buffer_copy(buff,
+ eth_spec->dst.addr_bytes,
+ BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
+
+ /* Goto the next item */
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+
+ /* May have vlan header */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_num++;
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
+ ulp_encap_buffer_copy(buff,
+ item->spec,
+ sizeof(struct rte_flow_item_vlan));
+
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+ }
+
+ /* may have two vlan headers */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_num++;
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
+ sizeof(struct rte_flow_item_vlan)],
+ item->spec,
+ sizeof(struct rte_flow_item_vlan));
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+ }
+ /* Update the vlan count and size of more than one */
+ if (vlan_num) {
+ vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
+ vlan_num = tfp_cpu_to_be_32(vlan_num);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
+ &vlan_num,
+ sizeof(uint32_t));
+ vlan_size = tfp_cpu_to_be_32(vlan_size);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
+ &vlan_size,
+ sizeof(uint32_t));
+ }
+
+ /* L3 must be IPv4, IPv6 */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ ipv4_spec = item->spec;
+ ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
+
+ /* copy the ipv4 details */
+ if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
+ ulp_encap_buffer_copy(buff,
+ def_ipv4_hdr,
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
+ BNXT_ULP_ENCAP_IPV4_ID_PROTO);
+ } else {
+ const uint8_t *tmp_buff;
+
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
+ ulp_encap_buffer_copy(buff,
+ &ipv4_spec->hdr.version_ihl,
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
+ tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
+ ulp_encap_buffer_copy(buff,
+ tmp_buff,
+ BNXT_ULP_ENCAP_IPV4_ID_PROTO);
+ }
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
+ BNXT_ULP_ENCAP_IPV4_ID_PROTO];
+ ulp_encap_buffer_copy(buff,
+ (const uint8_t *)&ipv4_spec->hdr.dst_addr,
+ BNXT_ULP_ENCAP_IPV4_DEST_IP);
+
+ /* Update the ip size details */
+ ip_size = tfp_cpu_to_be_32(ip_size);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
+ &ip_size, sizeof(uint32_t));
+
+ /* update the ip type */
+ ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
+ &ip_type, sizeof(uint32_t));
+
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ ipv6_spec = item->spec;
+ ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
+
+ /* copy the ipv4 details */
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
+ ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
+
+ /* Update the ip size details */
+ ip_size = tfp_cpu_to_be_32(ip_size);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
+ &ip_size, sizeof(uint32_t));
+
+ /* update the ip type */
+ ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
+ &ip_type, sizeof(uint32_t));
+
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+ } else {
+ BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
+ return BNXT_TF_RC_ERROR;
+ }
+
+ /* L4 is UDP */
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+ BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ /* copy the udp details */
+ ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
+ item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
+
+ if (!ulp_rte_item_skip_void(&item, 1))
+ return BNXT_TF_RC_ERROR;
+
+ /* Finally VXLAN */
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
+ return BNXT_TF_RC_ERROR;
+ }
+ vxlan_size = sizeof(struct rte_flow_item_vxlan);
+ /* copy the vxlan details */
+ memcpy(&vxlan_spec, item->spec, vxlan_size);
+ vxlan_spec.flags = 0x08;
+ ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
+ (const uint8_t *)&vxlan_spec,
+ vxlan_size);
+ vxlan_size = tfp_cpu_to_be_32(vxlan_size);
+ memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
+ &vxlan_size, sizeof(uint32_t));
+
+ /*update the hdr_bitmap with vxlan */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action vxlan_encap Header */
+int32_t
+ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
+ __rte_unused,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+{
+ /* update the hdr_bitmap with vxlan */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action drop Header. */
+int32_t
+ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+{
+ /* Update the hdr_bitmap with drop */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_DROP);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action count. */
+int32_t
+ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop __rte_unused)
+
+{
+ const struct rte_flow_action_count *act_count;
+
+ act_count = action_item->conf;
+ if (act_count) {
+ if (act_count->shared) {
+ BNXT_TF_DBG(ERR,
+ "Parse Error:Shared count not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
+ &act_count->id,
+ BNXT_ULP_ACT_PROP_SZ_COUNT);
+ }
+
+ /* Update the hdr_bitmap with count */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_COUNT);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action PF. */
+int32_t
+ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ uint8_t *svif_buf;
+ uint8_t *vnic_buffer;
+ uint32_t svif;
+
+ /* Update the hdr_bitmap with vnic bit */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
+
+ /* copy the PF of the current device into VNIC Property */
+ svif_buf = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
+ ulp_util_field_int_read(svif_buf, &svif);
+ vnic_buffer = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
+ ulp_util_field_int_write(vnic_buffer, svif);
+
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action VF. */
+int32_t
+ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action_vf *vf_action;
+
+ vf_action = action_item->conf;
+ if (vf_action) {
+ if (vf_action->original) {
+ BNXT_TF_DBG(ERR,
+ "Parse Error:VF Original not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ /* TBD: Update the computed VNIC using VF conversion */
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
+ &vf_action->id,
+ BNXT_ULP_ACT_PROP_SZ_VNIC);
+ }
+
+ /* Update the hdr_bitmap with count */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action port_id. */
+int32_t
+ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action_port_id *port_id;
+
+ port_id = act_item->conf;
+ if (port_id) {
+ if (port_id->original) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:Portid Original not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ /* TBD: Update the computed VNIC using port conversion */
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
+ &port_id->id,
+ BNXT_ULP_ACT_PROP_SZ_VNIC);
+ }
+
+ /* Update the hdr_bitmap with count */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
+ return BNXT_TF_RC_SUCCESS;
+}
+
+/* Function to handle the parsing of RTE Flow action phy_port. */
+int32_t
+ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_act_bitmap *act,
+ struct ulp_rte_act_prop *act_prop)
+{
+ const struct rte_flow_action_phy_port *phy_port;
+
+ phy_port = action_item->conf;
+ if (phy_port) {
+ if (phy_port->original) {
+ BNXT_TF_DBG(ERR,
+ "Parse Err:Port Original not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
+ &phy_port->index,
+ BNXT_ULP_ACT_PROP_SZ_VPORT);
+ }
+
+ /* Update the hdr_bitmap with count */
+ ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VPORT);
+ return BNXT_TF_RC_SUCCESS;
+}