PRM expose fields "Icmp_header_data" in IPv4 ICMP.
Update ICMP mask parameter with ICMP identifier and sequence number
fields.
ICMP sequence number spec with mask, Icmp_header_data low 16 bits are
set.
ICMP identifier spec with mask, Icmp_header_data high 16 bits are set.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
- The input buffer, providing the removal size, is not validated.
- The buffer size must match the length of the headers to be removed.
-- ICMP/ICMP6 code/type matching, IP-in-IP and MPLS flow matching are all
+- ICMP(code/type/identifier/sequence number) / ICMP6(code/type) matching, IP-in-IP and MPLS flow matching are all
mutually exclusive features which cannot be supported together
(see :ref:`mlx5_firmware_config`).
FLEX_PARSER_PROFILE_ENABLE=1
-- enable ICMP/ICMP6 code/type fields matching::
+- enable ICMP(code/type/identifier/sequence number) / ICMP6(code/type) fields matching::
FLEX_PARSER_PROFILE_ENABLE=2
* Added flag action.
* Added raw encap/decap actions.
* Added VXLAN encap/decap actions.
- * Added ICMP and ICMP6 matching items.
+ * Added ICMP(code/type/identifier/sequence number) and ICMP6(code/type) matching items.
* Added option to set port mask for insertion/deletion:
``--portmask=N``
where N represents the hexadecimal bitmask of ports used.
struct rte_flow_error *error)
{
const struct rte_flow_item_icmp *mask = item->mask;
+ const struct rte_flow_item_icmp nic_mask = {
+ .hdr.icmp_type = 0xff,
+ .hdr.icmp_code = 0xff,
+ .hdr.icmp_ident = RTE_BE16(0xffff),
+ .hdr.icmp_seq_nb = RTE_BE16(0xffff),
+ };
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
RTE_FLOW_ERROR_TYPE_ITEM, item,
"multiple L4 layers not supported");
if (!mask)
- mask = &rte_flow_item_icmp_mask;
+ mask = &nic_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_icmp_mask,
+ (const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_icmp), error);
if (ret < 0)
return ret;
{
const struct rte_flow_item_icmp *icmp_m = item->mask;
const struct rte_flow_item_icmp *icmp_v = item->spec;
+ uint32_t icmp_header_data_m = 0;
+ uint32_t icmp_header_data_v = 0;
void *headers_m;
void *headers_v;
void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
icmp_m->hdr.icmp_code);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
+ icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
+ icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
+ if (icmp_header_data_m) {
+ icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
+ icmp_header_data_v |=
+ rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
+ MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
+ icmp_header_data_m);
+ MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
+ icmp_header_data_v & icmp_header_data_m);
+ }
}
/**