From: Kishore Padmanabha Date: Fri, 11 Sep 2020 01:55:56 +0000 (-0700) Subject: net/bnxt: enable VXLAN IPv6 encapsulation X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=9238ac2a5cad854e67a35877e183a15d22e476bd;p=dpdk.git net/bnxt: enable VXLAN IPv6 encapsulation Add code to support vxlan ipv6 tunnel encapsulation. The ipv6 flow traffic class and flow label wild card match can be ignored to support offload on some applications. Signed-off-by: Kishore Padmanabha Reviewed-by: Mike Baucom Reviewed-by: Ajit Khaparde --- diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c index 1e4aa8da48..eae8884bd2 100644 --- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c +++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c @@ -149,6 +149,7 @@ ulp_ctx_session_open(struct bnxt *bp, /* SP */ resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488; + resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 511; rc = tf_open_session(&bp->tfp, ¶ms); if (rc) { diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c index c0339e6ab7..770fec55c2 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -78,6 +78,16 @@ ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params, *idx = *idx + 1; } +/* Utility function to ignore field masks items */ +static void +ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused, + uint32_t *idx, + const void *buffer __rte_unused, + uint32_t size __rte_unused) +{ + *idx = *idx + 1; +} + /* * Function to handle the parsing of RTE Flows and placing * the RTE flow items into the ulp structures. @@ -741,7 +751,8 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, * wild card match and it is not supported. This is a work * around and shall be addressed in the future. */ - idx += 1; + ulp_rte_prsr_mask_ignore(params, &idx, &priority, + sizeof(priority)); ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag, sizeof(vlan_tag)); @@ -920,7 +931,10 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, * match and it is not supported. This is a work around and * shall be addressed in the future. */ - idx += 1; + ulp_rte_prsr_mask_ignore(params, &idx, + &ipv4_mask->hdr.type_of_service, + sizeof(ipv4_mask->hdr.type_of_service) + ); ulp_rte_prsr_mask_copy(params, &idx, &ipv4_mask->hdr.total_length, @@ -1041,17 +1055,17 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, ulp_rte_prsr_mask_copy(params, &idx, &vtcf_mask, size); - + /* + * The TC and flow label field are ignored since OVS is + * setting it for match and it is not supported. + * This is a work around and + * shall be addressed in the future. + */ vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow); - ulp_rte_prsr_mask_copy(params, &idx, - &vtcf_mask, - size); - + ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size); vtcf_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow); - ulp_rte_prsr_mask_copy(params, &idx, - &vtcf_mask, - size); + ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size); ulp_rte_prsr_mask_copy(params, &idx, &ipv6_mask->hdr.payload_len, @@ -1414,8 +1428,12 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */ const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11}; + /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */ + const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x11, 0xf6}; struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; struct ulp_rte_act_prop *ap = ¶ms->act_prop; + const uint8_t *tmp_buff; vxlan_encap = action_item->conf; if (!vxlan_encap) { @@ -1441,12 +1459,14 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC]; ulp_encap_buffer_copy(buff, eth_spec->dst.addr_bytes, - BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC); + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC, + ULP_BUFFER_ALIGN_8_BYTE); buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC]; ulp_encap_buffer_copy(buff, eth_spec->src.addr_bytes, - BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC); + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC, + ULP_BUFFER_ALIGN_8_BYTE); /* Goto the next item */ if (!ulp_rte_item_skip_void(&item, 1)) @@ -1458,7 +1478,8 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG]; ulp_encap_buffer_copy(buff, item->spec, - sizeof(struct rte_flow_item_vlan)); + sizeof(struct rte_flow_item_vlan), + ULP_BUFFER_ALIGN_8_BYTE); if (!ulp_rte_item_skip_void(&item, 1)) return BNXT_TF_RC_ERROR; @@ -1499,32 +1520,41 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, ulp_encap_buffer_copy(buff, def_ipv4_hdr, BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + - BNXT_ULP_ENCAP_IPV4_ID_PROTO); + BNXT_ULP_ENCAP_IPV4_ID_PROTO, + ULP_BUFFER_ALIGN_8_BYTE); } else { - const uint8_t *tmp_buff; - + /* Total length being ignored in the ip hdr. */ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id; ulp_encap_buffer_copy(buff, tmp_buff, - BNXT_ULP_ENCAP_IPV4_ID_PROTO); + BNXT_ULP_ENCAP_IPV4_ID_PROTO, + ULP_BUFFER_ALIGN_8_BYTE); buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + BNXT_ULP_ENCAP_IPV4_ID_PROTO]; ulp_encap_buffer_copy(buff, &ipv4_spec->hdr.version_ihl, - BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS); + BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS, + ULP_BUFFER_ALIGN_8_BYTE); } + + /* Update the dst ip address in ip encap buffer */ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + BNXT_ULP_ENCAP_IPV4_ID_PROTO]; ulp_encap_buffer_copy(buff, (const uint8_t *)&ipv4_spec->hdr.dst_addr, - BNXT_ULP_ENCAP_IPV4_DEST_IP); + sizeof(ipv4_spec->hdr.dst_addr), + ULP_BUFFER_ALIGN_8_BYTE); - buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC]; + /* Update the src ip address */ + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC + + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC - + sizeof(ipv4_spec->hdr.src_addr)]; ulp_encap_buffer_copy(buff, (const uint8_t *)&ipv4_spec->hdr.src_addr, - BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC); + sizeof(ipv4_spec->hdr.src_addr), + ULP_BUFFER_ALIGN_8_BYTE); /* Update the ip size details */ ip_size = tfp_cpu_to_be_32(ip_size); @@ -1546,9 +1576,46 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, ipv6_spec = item->spec; ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; - /* copy the ipv4 details */ - memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP], - ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE); + /* copy the ipv6 details */ + tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow; + if (ulp_buffer_is_empty(tmp_buff, + BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) { + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; + ulp_encap_buffer_copy(buff, + def_ipv6_hdr, + sizeof(def_ipv6_hdr), + ULP_BUFFER_ALIGN_8_BYTE); + } else { + /* The payload length being ignored in the ip hdr. */ + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; + tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto; + ulp_encap_buffer_copy(buff, + tmp_buff, + BNXT_ULP_ENCAP_IPV6_PROTO_TTL, + ULP_BUFFER_ALIGN_8_BYTE); + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + + BNXT_ULP_ENCAP_IPV6_PROTO_TTL + + BNXT_ULP_ENCAP_IPV6_DO]; + tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow; + ulp_encap_buffer_copy(buff, + tmp_buff, + BNXT_ULP_ENCAP_IPV6_VTC_FLOW, + ULP_BUFFER_ALIGN_8_BYTE); + } + /* Update the dst ip address in ip encap buffer */ + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + + sizeof(def_ipv6_hdr)]; + ulp_encap_buffer_copy(buff, + (const uint8_t *)ipv6_spec->hdr.dst_addr, + sizeof(ipv6_spec->hdr.dst_addr), + ULP_BUFFER_ALIGN_8_BYTE); + + /* Update the src ip address */ + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC]; + ulp_encap_buffer_copy(buff, + (const uint8_t *)ipv6_spec->hdr.src_addr, + sizeof(ipv6_spec->hdr.src_addr), + ULP_BUFFER_ALIGN_16_BYTE); /* Update the ip size details */ ip_size = tfp_cpu_to_be_32(ip_size); @@ -1578,7 +1645,8 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, } /* copy the udp details */ ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP], - item->spec, BNXT_ULP_ENCAP_UDP_SIZE); + item->spec, BNXT_ULP_ENCAP_UDP_SIZE, + ULP_BUFFER_ALIGN_8_BYTE); if (!ulp_rte_item_skip_void(&item, 1)) return BNXT_TF_RC_ERROR; @@ -1592,9 +1660,17 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, /* copy the vxlan details */ memcpy(&vxlan_spec, item->spec, vxlan_size); vxlan_spec.flags = 0x08; - ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN], - (const uint8_t *)&vxlan_spec, - vxlan_size); + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN]; + if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) { + ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec, + vxlan_size, ULP_BUFFER_ALIGN_8_BYTE); + } else { + ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec, + vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE); + ulp_encap_buffer_copy(buff + (vxlan_size / 2), + (const uint8_t *)&vxlan_spec.vni, + vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE); + } vxlan_size = tfp_cpu_to_be_32(vxlan_size); memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], &vxlan_size, sizeof(uint32_t)); diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h index 7b6b57e0e3..41f3df998e 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h @@ -17,7 +17,10 @@ #define BNXT_ULP_ENCAP_IPV4_ID_PROTO 6 #define BNXT_ULP_ENCAP_IPV4_DEST_IP 4 #define BNXT_ULP_ENCAP_IPV4_SIZE 12 -#define BNXT_ULP_ENCAP_IPV6_SIZE 8 +#define BNXT_ULP_ENCAP_IPV6_VTC_FLOW 4 +#define BNXT_ULP_ENCAP_IPV6_PROTO_TTL 2 +#define BNXT_ULP_ENCAP_IPV6_DO 2 +#define BNXT_ULP_ENCAP_IPV6_SIZE 24 #define BNXT_ULP_ENCAP_UDP_SIZE 4 #define BNXT_ULP_INVALID_SVIF_VAL -1U diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c index de96afe8cf..cab3445a23 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c +++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c @@ -1036,7 +1036,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_act_tbl_list[] = { }, { .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, - .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV4, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV6, .resource_sub_type = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_NORMAL, .cond_opcode = BNXT_ULP_COND_OPCODE_COMP_FIELD_IS_SET, diff --git a/drivers/net/bnxt/tf_ulp/ulp_utils.c b/drivers/net/bnxt/tf_ulp/ulp_utils.c index a923da86ec..24474e2e27 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_utils.c +++ b/drivers/net/bnxt/tf_ulp/ulp_utils.c @@ -546,8 +546,8 @@ ulp_blob_encap_swap_idx_set(struct ulp_blob *blob) void ulp_blob_perform_encap_swap(struct ulp_blob *blob) { - uint32_t i, idx = 0, end_idx = 0; - uint8_t temp_val_1, temp_val_2; + uint32_t i, idx = 0, end_idx = 0, roundoff; + uint8_t temp_val_1, temp_val_2; /* validate the arguments */ if (!blob) { @@ -556,7 +556,11 @@ ulp_blob_perform_encap_swap(struct ulp_blob *blob) } idx = ULP_BITS_2_BYTE_NR(blob->encap_swap_idx); end_idx = ULP_BITS_2_BYTE(blob->write_idx); - + roundoff = ULP_BYTE_2_BITS(ULP_BITS_2_BYTE(end_idx)); + if (roundoff > end_idx) { + blob->write_idx += ULP_BYTE_2_BITS(roundoff - end_idx); + end_idx = roundoff; + } while (idx <= end_idx) { for (i = 0; i < 4; i = i + 2) { temp_val_1 = blob->data[idx + i]; @@ -631,20 +635,35 @@ ulp_operand_read(uint8_t *operand, * dst [out] The destination buffer * src [in] The source buffer dst * size[in] size of the buffer. + * align[in] The alignment is either 8 or 16. */ void ulp_encap_buffer_copy(uint8_t *dst, const uint8_t *src, - uint16_t size) + uint16_t size, + uint16_t align) { - uint16_t idx = 0; - - /* copy 2 bytes at a time. Write MSB to LSB */ - while ((idx + sizeof(uint16_t)) <= size) { - memcpy(&dst[idx], &src[size - idx - sizeof(uint16_t)], - sizeof(uint16_t)); - idx += sizeof(uint16_t); - } + uint16_t idx, tmp_size = 0; + + do { + dst += tmp_size; + src += tmp_size; + idx = 0; + if (size > align) { + tmp_size = align; + size -= align; + } else { + tmp_size = size; + size = 0; + } + /* copy 2 bytes at a time. Write MSB to LSB */ + while ((idx + sizeof(uint16_t)) <= tmp_size) { + memcpy(&dst[idx], + &src[tmp_size - idx - sizeof(uint16_t)], + sizeof(uint16_t)); + idx += sizeof(uint16_t); + } + } while (size); } /* diff --git a/drivers/net/bnxt/tf_ulp/ulp_utils.h b/drivers/net/bnxt/tf_ulp/ulp_utils.h index 22dfb17324..c054a77a90 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_utils.h +++ b/drivers/net/bnxt/tf_ulp/ulp_utils.h @@ -9,6 +9,9 @@ #include "bnxt.h" #include "ulp_template_db_enum.h" +#define ULP_BUFFER_ALIGN_8_BYTE 8 +#define ULP_BUFFER_ALIGN_16_BYTE 16 + /* * Macros for bitmap sets and gets * These macros can be used if the val are power of 2. @@ -315,11 +318,13 @@ ulp_operand_read(uint8_t *operand, * dst [out] The destination buffer * src [in] The source buffer dst * size[in] size of the buffer. + * align[in] The alignment is either 8 or 16. */ void ulp_encap_buffer_copy(uint8_t *dst, const uint8_t *src, - uint16_t size); + uint16_t size, + uint16_t align); /* * Check the buffer is empty