+static void
+flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
+ const struct rte_flow_item_integrity *value,
+ void *headers_m, void *headers_v)
+{
+ if (mask->l4_ok) {
+ /* application l4_ok filter aggregates all hardware l4 filters
+ * therefore hw l4_checksum_ok must be implicitly added here.
+ */
+ struct rte_flow_item_integrity local_item;
+
+ local_item.l4_csum_ok = 1;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
+ local_item.l4_csum_ok);
+ if (value->l4_ok) {
+ /* application l4_ok = 1 matches sets both hw flags
+ * l4_ok and l4_checksum_ok flags to 1.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l4_checksum_ok, local_item.l4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
+ mask->l4_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
+ value->l4_ok);
+ } else {
+ /* application l4_ok = 0 matches on hw flag
+ * l4_checksum_ok = 0 only.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l4_checksum_ok, 0);
+ }
+ } else if (mask->l4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
+ mask->l4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+ value->l4_csum_ok);
+ }
+}
+
+static void
+flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
+ const struct rte_flow_item_integrity *value,
+ void *headers_m, void *headers_v,
+ bool is_ipv4)
+{
+ if (mask->l3_ok) {
+ /* application l3_ok filter aggregates all hardware l3 filters
+ * therefore hw ipv4_checksum_ok must be implicitly added here.
+ */
+ struct rte_flow_item_integrity local_item;
+
+ local_item.ipv4_csum_ok = !!is_ipv4;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
+ local_item.ipv4_csum_ok);
+ if (value->l3_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ipv4_checksum_ok, local_item.ipv4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
+ mask->l3_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
+ value->l3_ok);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ipv4_checksum_ok, 0);
+ }
+ } else if (mask->ipv4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
+ mask->ipv4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+ value->ipv4_csum_ok);
+ }
+}
+
+static void
+flow_dv_translate_item_integrity(void *matcher, void *key,
+ const struct rte_flow_item *head_item,
+ const struct rte_flow_item *integrity_item)
+{
+ const struct rte_flow_item_integrity *mask = integrity_item->mask;
+ const struct rte_flow_item_integrity *value = integrity_item->spec;
+ const struct rte_flow_item *tunnel_item, *end_item, *item;
+ void *headers_m;
+ void *headers_v;
+ uint32_t l3_protocol;
+
+ if (!value)
+ return;
+ if (!mask)
+ mask = &rte_flow_item_integrity_mask;
+ if (value->level > 1) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ tunnel_item = mlx5_flow_find_tunnel_item(head_item);
+ if (value->level > 1) {
+ /* tunnel item was verified during the item validation */
+ item = tunnel_item;
+ end_item = mlx5_find_end_item(tunnel_item);
+ } else {
+ item = head_item;
+ end_item = tunnel_item ? tunnel_item :
+ mlx5_find_end_item(integrity_item);
+ }
+ l3_protocol = mask->l3_ok ?
+ mlx5_flow_locate_proto_l3(&item, end_item) : 0;
+ flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
+ l3_protocol == RTE_ETHER_TYPE_IPV4);
+ flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
+}
+
+/**
+ * Prepares DV flow counter with aging configuration.
+ * Gets it by index when exists, creates a new one when doesn't.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in, out] flow
+ * Pointer to the sub flow.
+ * @param[in] count
+ * Pointer to the counter action configuration.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to the counter, NULL otherwise.
+ */
+static struct mlx5_flow_counter *
+flow_dv_prepare_counter(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow *flow,
+ const struct rte_flow_action_count *count,
+ const struct rte_flow_action_age *age,
+ struct rte_flow_error *error)
+{
+ if (!flow->counter) {
+ flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
+ count, age);
+ if (!flow->counter) {
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "cannot create counter object.");
+ return NULL;
+ }
+ }
+ return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
+}
+