b4a9f1a537a920bce102650ee87d67567c713ba5
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_flex.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3  */
4 #include <rte_malloc.h>
5 #include <mlx5_devx_cmds.h>
6 #include <mlx5_malloc.h>
7 #include "mlx5.h"
8 #include "mlx5_flow.h"
9
10 static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11               "Flex item maximal number exceeds uint32_t bit width");
12
13 /**
14  *  Routine called once on port initialization to init flex item
15  *  related infrastructure initialization
16  *
17  * @param dev
18  *   Ethernet device to perform flex item initialization
19  *
20  * @return
21  *   0 on success, a negative errno value otherwise and rte_errno is set.
22  */
23 int
24 mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25 {
26         struct mlx5_priv *priv = dev->data->dev_private;
27
28         rte_spinlock_init(&priv->flex_item_sl);
29         MLX5_ASSERT(!priv->flex_item_map);
30         return 0;
31 }
32
33 /**
34  *  Routine called once on port close to perform flex item
35  *  related infrastructure cleanup.
36  *
37  * @param dev
38  *   Ethernet device to perform cleanup
39  */
40 void
41 mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42 {
43         struct mlx5_priv *priv = dev->data->dev_private;
44         uint32_t i;
45
46         for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47                 if (priv->flex_item_map & (1 << i)) {
48                         struct mlx5_flex_item *flex = &priv->flex_item[i];
49
50                         claim_zero(mlx5_list_unregister
51                                         (priv->sh->flex_parsers_dv,
52                                          &flex->devx_fp->entry));
53                         flex->devx_fp = NULL;
54                         flex->refcnt = 0;
55                         priv->flex_item_map &= ~(1 << i);
56                 }
57         }
58 }
59
60 static int
61 mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
62 {
63         uintptr_t start = (uintptr_t)&priv->flex_item[0];
64         uintptr_t entry = (uintptr_t)item;
65         uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
66
67         if (entry < start ||
68             idx >= MLX5_PORT_FLEX_ITEM_NUM ||
69             (entry - start) % sizeof(struct mlx5_flex_item) ||
70             !(priv->flex_item_map & (1u << idx)))
71                 return -1;
72         return (int)idx;
73 }
74
75 static struct mlx5_flex_item *
76 mlx5_flex_alloc(struct mlx5_priv *priv)
77 {
78         struct mlx5_flex_item *item = NULL;
79
80         rte_spinlock_lock(&priv->flex_item_sl);
81         if (~priv->flex_item_map) {
82                 uint32_t idx = rte_bsf32(~priv->flex_item_map);
83
84                 if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
85                         item = &priv->flex_item[idx];
86                         MLX5_ASSERT(!item->refcnt);
87                         MLX5_ASSERT(!item->devx_fp);
88                         item->devx_fp = NULL;
89                         __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
90                         priv->flex_item_map |= 1u << idx;
91                 }
92         }
93         rte_spinlock_unlock(&priv->flex_item_sl);
94         return item;
95 }
96
97 static void
98 mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
99 {
100         int idx = mlx5_flex_index(priv, item);
101
102         MLX5_ASSERT(idx >= 0 &&
103                     idx < MLX5_PORT_FLEX_ITEM_NUM &&
104                     (priv->flex_item_map & (1u << idx)));
105         if (idx >= 0) {
106                 rte_spinlock_lock(&priv->flex_item_sl);
107                 MLX5_ASSERT(!item->refcnt);
108                 MLX5_ASSERT(!item->devx_fp);
109                 item->devx_fp = NULL;
110                 __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
111                 priv->flex_item_map &= ~(1u << idx);
112                 rte_spinlock_unlock(&priv->flex_item_sl);
113         }
114 }
115
116 /*
117  * Calculate largest mask value for a given shift.
118  *
119  *   shift      mask
120  * ------- ---------------
121  *    0     b111100  0x3C
122  *    1     b111110  0x3E
123  *    2     b111111  0x3F
124  *    3     b011111  0x1F
125  *    4     b001111  0x0F
126  *    5     b000111  0x07
127  */
128 static uint8_t
129 mlx5_flex_hdr_len_mask(uint8_t shift,
130                        const struct mlx5_hca_flex_attr *attr)
131 {
132         uint32_t base_mask;
133         int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
134
135         base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
136         return diff == 0 ? base_mask :
137                diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
138 }
139
140 static int
141 mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
142                            const struct rte_flow_item_flex_conf *conf,
143                            struct mlx5_flex_parser_devx *devx,
144                            struct rte_flow_error *error)
145 {
146         const struct rte_flow_item_flex_field *field = &conf->next_header;
147         struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
148         uint32_t len_width, mask;
149
150         if (field->field_base % CHAR_BIT)
151                 return rte_flow_error_set
152                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
153                          "not byte aligned header length field");
154         switch (field->field_mode) {
155         case FIELD_MODE_DUMMY:
156                 return rte_flow_error_set
157                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
158                          "invalid header length field mode (DUMMY)");
159         case FIELD_MODE_FIXED:
160                 if (!(attr->header_length_mode &
161                     RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
162                         return rte_flow_error_set
163                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
164                                  "unsupported header length field mode (FIXED)");
165                 if (attr->header_length_mask_width < field->field_size)
166                         return rte_flow_error_set
167                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
168                                  "header length field width exceeds limit");
169                 if (field->offset_shift < 0 ||
170                     field->offset_shift > attr->header_length_mask_width)
171                         return rte_flow_error_set
172                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
173                                  "invalid header length field shift (FIXED");
174                 if (field->field_base < 0)
175                         return rte_flow_error_set
176                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
177                                  "negative header length field base (FIXED)");
178                 node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
179                 break;
180         case FIELD_MODE_OFFSET:
181                 if (!(attr->header_length_mode &
182                     RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
183                         return rte_flow_error_set
184                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
185                                  "unsupported header length field mode (OFFSET)");
186                 node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
187                 if (field->offset_mask == 0 ||
188                     !rte_is_power_of_2(field->offset_mask + 1))
189                         return rte_flow_error_set
190                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
191                                  "invalid length field offset mask (OFFSET)");
192                 len_width = rte_fls_u32(field->offset_mask);
193                 if (len_width > attr->header_length_mask_width)
194                         return rte_flow_error_set
195                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
196                                  "length field offset mask too wide (OFFSET)");
197                 mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
198                 if (mask < field->offset_mask)
199                         return rte_flow_error_set
200                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
201                                  "length field shift too big (OFFSET)");
202                 node->header_length_field_mask = RTE_MIN(mask,
203                                                          field->offset_mask);
204                 break;
205         case FIELD_MODE_BITMASK:
206                 if (!(attr->header_length_mode &
207                     RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
208                         return rte_flow_error_set
209                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
210                                  "unsupported header length field mode (BITMASK)");
211                 if (attr->header_length_mask_width < field->field_size)
212                         return rte_flow_error_set
213                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
214                                  "header length field width exceeds limit");
215                 node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
216                 mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
217                 if (mask < field->offset_mask)
218                         return rte_flow_error_set
219                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
220                                  "length field shift too big (BITMASK)");
221                 node->header_length_field_mask = RTE_MIN(mask,
222                                                          field->offset_mask);
223                 break;
224         default:
225                 return rte_flow_error_set
226                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
227                          "unknown header length field mode");
228         }
229         if (field->field_base / CHAR_BIT >= 0 &&
230             field->field_base / CHAR_BIT > attr->max_base_header_length)
231                 return rte_flow_error_set
232                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
233                          "header length field base exceeds limit");
234         node->header_length_base_value = field->field_base / CHAR_BIT;
235         if (field->field_mode == FIELD_MODE_OFFSET ||
236             field->field_mode == FIELD_MODE_BITMASK) {
237                 if (field->offset_shift > 15 || field->offset_shift < 0)
238                         return rte_flow_error_set
239                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
240                                  "header length field shift exceeeds limit");
241                 node->header_length_field_shift = field->offset_shift;
242                 node->header_length_field_offset = field->offset_base;
243         }
244         return 0;
245 }
246
247 static int
248 mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
249                          const struct rte_flow_item_flex_conf *conf,
250                          struct mlx5_flex_parser_devx *devx,
251                          struct rte_flow_error *error)
252 {
253         const struct rte_flow_item_flex_field *field = &conf->next_protocol;
254         struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
255
256         switch (field->field_mode) {
257         case FIELD_MODE_DUMMY:
258                 if (conf->nb_outputs)
259                         return rte_flow_error_set
260                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
261                                  "next protocol field is required (DUMMY)");
262                 return 0;
263         case FIELD_MODE_FIXED:
264                 break;
265         case FIELD_MODE_OFFSET:
266                 return rte_flow_error_set
267                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
268                          "unsupported next protocol field mode (OFFSET)");
269                 break;
270         case FIELD_MODE_BITMASK:
271                 return rte_flow_error_set
272                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
273                          "unsupported next protocol field mode (BITMASK)");
274         default:
275                 return rte_flow_error_set
276                         (error, EINVAL,
277                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
278                          "unknown next protocol field mode");
279         }
280         MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
281         if (!conf->nb_outputs)
282                 return rte_flow_error_set
283                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
284                          "out link(s) is required if next field present");
285         if (attr->max_next_header_offset < field->field_base)
286                 return rte_flow_error_set
287                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
288                          "next protocol field base exceeds limit");
289         if (field->offset_shift)
290                 return rte_flow_error_set
291                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
292                          "unsupported next protocol field shift");
293         node->next_header_field_offset = field->field_base;
294         node->next_header_field_size = field->field_size;
295         return 0;
296 }
297
298 /* Helper structure to handle field bit intervals. */
299 struct mlx5_flex_field_cover {
300         uint16_t num;
301         int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
302         int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
303         uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
304 };
305
306 static void
307 mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
308                        uint16_t num, int32_t start, int32_t end)
309 {
310         MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
311         MLX5_ASSERT(num <= cover->num);
312         if (num < cover->num) {
313                 memmove(&cover->start[num + 1], &cover->start[num],
314                         (cover->num - num) * sizeof(int32_t));
315                 memmove(&cover->end[num + 1],   &cover->end[num],
316                         (cover->num - num) * sizeof(int32_t));
317         }
318         cover->start[num] = start;
319         cover->end[num] = end;
320         cover->num++;
321 }
322
323 static void
324 mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
325 {
326         uint32_t i, del = 0;
327         int32_t end;
328
329         MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
330         MLX5_ASSERT(num < (cover->num - 1));
331         end = cover->end[num];
332         for (i = num + 1; i < cover->num; i++) {
333                 if (end < cover->start[i])
334                         break;
335                 del++;
336                 if (end <= cover->end[i]) {
337                         cover->end[num] = cover->end[i];
338                         break;
339                 }
340         }
341         if (del) {
342                 MLX5_ASSERT(del < (cover->num - 1u - num));
343                 cover->num -= del;
344                 MLX5_ASSERT(cover->num > num);
345                 if ((cover->num - num) > 1) {
346                         memmove(&cover->start[num + 1],
347                                 &cover->start[num + 1 + del],
348                                 (cover->num - num - 1) * sizeof(int32_t));
349                         memmove(&cover->end[num + 1],
350                                 &cover->end[num + 1 + del],
351                                 (cover->num - num - 1) * sizeof(int32_t));
352                 }
353         }
354 }
355
356 /*
357  * Validate the sample field and update interval array
358  * if parameters match with the 'match" field.
359  * Returns:
360  *    < 0  - error
361  *    == 0 - no match, interval array not updated
362  *    > 0  - match, interval array updated
363  */
364 static int
365 mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
366                        struct rte_flow_item_flex_field *field,
367                        struct rte_flow_item_flex_field *match,
368                        struct mlx5_hca_flex_attr *attr,
369                        struct rte_flow_error *error)
370 {
371         int32_t start, end;
372         uint32_t i;
373
374         switch (field->field_mode) {
375         case FIELD_MODE_DUMMY:
376                 return 0;
377         case FIELD_MODE_FIXED:
378                 if (!(attr->sample_offset_mode &
379                     RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
380                         return rte_flow_error_set
381                                 (error, EINVAL,
382                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
383                                  "unsupported sample field mode (FIXED)");
384                 if (field->offset_shift)
385                         return rte_flow_error_set
386                                 (error, EINVAL,
387                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
388                                  "invalid sample field shift (FIXED");
389                 if (field->field_base < 0)
390                         return rte_flow_error_set
391                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
392                                  "invalid sample field base (FIXED)");
393                 if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
394                         return rte_flow_error_set
395                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
396                                  "sample field base exceeds limit (FIXED)");
397                 break;
398         case FIELD_MODE_OFFSET:
399                 if (!(attr->sample_offset_mode &
400                     RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
401                         return rte_flow_error_set
402                                 (error, EINVAL,
403                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
404                                  "unsupported sample field mode (OFFSET)");
405                 if (field->field_base / CHAR_BIT >= 0 &&
406                     field->field_base / CHAR_BIT > attr->max_sample_base_offset)
407                         return rte_flow_error_set
408                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
409                                 "sample field base exceeds limit");
410                 break;
411         case FIELD_MODE_BITMASK:
412                 if (!(attr->sample_offset_mode &
413                     RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
414                         return rte_flow_error_set
415                                 (error, EINVAL,
416                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
417                                  "unsupported sample field mode (BITMASK)");
418                 if (field->field_base / CHAR_BIT >= 0 &&
419                     field->field_base / CHAR_BIT > attr->max_sample_base_offset)
420                         return rte_flow_error_set
421                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
422                                 "sample field base exceeds limit");
423                 break;
424         default:
425                 return rte_flow_error_set
426                         (error, EINVAL,
427                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
428                          "unknown data sample field mode");
429         }
430         if (!match) {
431                 if (!field->field_size)
432                         return rte_flow_error_set
433                                 (error, EINVAL,
434                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
435                                 "zero sample field width");
436                 if (field->field_id)
437                         DRV_LOG(DEBUG, "sample field id hint ignored");
438         } else {
439                 if (field->field_mode != match->field_mode ||
440                     field->offset_base | match->offset_base ||
441                     field->offset_mask | match->offset_mask ||
442                     field->offset_shift | match->offset_shift)
443                         return 0;
444         }
445         start = field->field_base;
446         end = start + field->field_size;
447         /* Add the new or similar field to interval array. */
448         if (!cover->num) {
449                 cover->start[cover->num] = start;
450                 cover->end[cover->num] = end;
451                 cover->num = 1;
452                 return 1;
453         }
454         for (i = 0; i < cover->num; i++) {
455                 if (start > cover->end[i]) {
456                         if (i >= (cover->num - 1u)) {
457                                 mlx5_flex_insert_field(cover, cover->num,
458                                                        start, end);
459                                 break;
460                         }
461                         continue;
462                 }
463                 if (end < cover->start[i]) {
464                         mlx5_flex_insert_field(cover, i, start, end);
465                         break;
466                 }
467                 if (start < cover->start[i])
468                         cover->start[i] = start;
469                 if (end > cover->end[i]) {
470                         cover->end[i] = end;
471                         if (i < (cover->num - 1u))
472                                 mlx5_flex_merge_field(cover, i);
473                 }
474                 break;
475         }
476         return 1;
477 }
478
479 static void
480 mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
481                         struct rte_flow_item_flex_field *field,
482                         enum rte_flow_item_flex_tunnel_mode tunnel_mode)
483 {
484         memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
485         na->flow_match_sample_en = 1;
486         switch (field->field_mode) {
487         case FIELD_MODE_FIXED:
488                 na->flow_match_sample_offset_mode =
489                         MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
490                 break;
491         case FIELD_MODE_OFFSET:
492                 na->flow_match_sample_offset_mode =
493                         MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
494                 na->flow_match_sample_field_offset = field->offset_base;
495                 na->flow_match_sample_field_offset_mask = field->offset_mask;
496                 na->flow_match_sample_field_offset_shift = field->offset_shift;
497                 break;
498         case FIELD_MODE_BITMASK:
499                 na->flow_match_sample_offset_mode =
500                         MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
501                 na->flow_match_sample_field_offset = field->offset_base;
502                 na->flow_match_sample_field_offset_mask = field->offset_mask;
503                 na->flow_match_sample_field_offset_shift = field->offset_shift;
504                 break;
505         default:
506                 MLX5_ASSERT(false);
507                 break;
508         }
509         switch (tunnel_mode) {
510         case FLEX_TUNNEL_MODE_SINGLE:
511                 /* Fallthrough */
512         case FLEX_TUNNEL_MODE_TUNNEL:
513                 na->flow_match_sample_tunnel_mode =
514                         MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
515                 break;
516         case FLEX_TUNNEL_MODE_MULTI:
517                 /* Fallthrough */
518         case FLEX_TUNNEL_MODE_OUTER:
519                 na->flow_match_sample_tunnel_mode =
520                         MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
521                 break;
522         case FLEX_TUNNEL_MODE_INNER:
523                 na->flow_match_sample_tunnel_mode =
524                         MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
525                 break;
526         default:
527                 MLX5_ASSERT(false);
528                 break;
529         }
530 }
531
532 /* Map specified field to set/subset of allocated sample registers. */
533 static int
534 mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
535                      struct mlx5_flex_parser_devx *parser,
536                      struct mlx5_flex_item *item,
537                      struct rte_flow_error *error)
538 {
539         struct mlx5_devx_match_sample_attr node;
540         int32_t start = field->field_base;
541         int32_t end = start + field->field_size;
542         struct mlx5_flex_pattern_field *trans;
543         uint32_t i, done_bits = 0;
544
545         if (field->field_mode == FIELD_MODE_DUMMY) {
546                 done_bits = field->field_size;
547                 while (done_bits) {
548                         uint32_t part = RTE_MIN(done_bits,
549                                                 sizeof(uint32_t) * CHAR_BIT);
550                         if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
551                                 return rte_flow_error_set
552                                         (error,
553                                          EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
554                                          "too many flex item pattern translations");
555                         trans = &item->map[item->mapnum];
556                         trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
557                         trans->shift = 0;
558                         trans->width = part;
559                         item->mapnum++;
560                         done_bits -= part;
561                 }
562                 return 0;
563         }
564         mlx5_flex_config_sample(&node, field, item->tunnel_mode);
565         for (i = 0; i < parser->num_samples; i++) {
566                 struct mlx5_devx_match_sample_attr *sample =
567                         &parser->devx_conf.sample[i];
568                 int32_t reg_start, reg_end;
569                 int32_t cov_start, cov_end;
570
571                 MLX5_ASSERT(sample->flow_match_sample_en);
572                 if (!sample->flow_match_sample_en)
573                         break;
574                 node.flow_match_sample_field_base_offset =
575                         sample->flow_match_sample_field_base_offset;
576                 if (memcmp(&node, sample, sizeof(node)))
577                         continue;
578                 reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
579                 reg_start *= CHAR_BIT;
580                 reg_end = reg_start + 32;
581                 if (end <= reg_start || start >= reg_end)
582                         continue;
583                 cov_start = RTE_MAX(reg_start, start);
584                 cov_end = RTE_MIN(reg_end, end);
585                 MLX5_ASSERT(cov_end > cov_start);
586                 done_bits += cov_end - cov_start;
587                 if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
588                         return rte_flow_error_set
589                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
590                                  "too many flex item pattern translations");
591                 trans = &item->map[item->mapnum];
592                 item->mapnum++;
593                 trans->reg_id = i;
594                 trans->shift = cov_start - reg_start;
595                 trans->width = cov_end - cov_start;
596         }
597         if (done_bits != field->field_size) {
598                 MLX5_ASSERT(false);
599                 return rte_flow_error_set
600                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
601                          "failed to map field to sample register");
602         }
603         return 0;
604 }
605
606 /* Allocate sample registers for the specified field type and interval array. */
607 static int
608 mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
609                        struct mlx5_flex_parser_devx *parser,
610                        struct mlx5_flex_item *item,
611                        struct rte_flow_item_flex_field *field,
612                        struct mlx5_hca_flex_attr *attr,
613                        struct rte_flow_error *error)
614 {
615         struct mlx5_devx_match_sample_attr node;
616         uint32_t idx = 0;
617
618         mlx5_flex_config_sample(&node, field, item->tunnel_mode);
619         while (idx < cover->num) {
620                 int32_t start, end;
621
622                 /*
623                  * Sample base offsets are in bytes, should be aligned
624                  * to 32-bit as required by firmware for samples.
625                  */
626                 start = RTE_ALIGN_FLOOR(cover->start[idx],
627                                         sizeof(uint32_t) * CHAR_BIT);
628                 node.flow_match_sample_field_base_offset =
629                                                 (start / CHAR_BIT) & 0xFF;
630                 /* Allocate sample register. */
631                 if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
632                     parser->num_samples >= attr->max_num_sample ||
633                     parser->num_samples >= attr->max_num_prog_sample)
634                         return rte_flow_error_set
635                                 (error, EINVAL,
636                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
637                                  "no sample registers to handle all flex item fields");
638                 parser->devx_conf.sample[parser->num_samples] = node;
639                 parser->num_samples++;
640                 /* Remove or update covered intervals. */
641                 end = start + 32;
642                 while (idx < cover->num) {
643                         if (end >= cover->end[idx]) {
644                                 idx++;
645                                 continue;
646                         }
647                         if (end > cover->start[idx])
648                                 cover->start[idx] = end;
649                         break;
650                 }
651         }
652         return 0;
653 }
654
655 static int
656 mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
657                            const struct rte_flow_item_flex_conf *conf,
658                            struct mlx5_flex_parser_devx *parser,
659                            struct mlx5_flex_item *item,
660                            struct rte_flow_error *error)
661 {
662         struct mlx5_flex_field_cover cover;
663         uint32_t i, j;
664         int ret;
665
666         switch (conf->tunnel) {
667         case FLEX_TUNNEL_MODE_SINGLE:
668                 /* Fallthrough */
669         case FLEX_TUNNEL_MODE_OUTER:
670                 /* Fallthrough */
671         case FLEX_TUNNEL_MODE_INNER:
672                 /* Fallthrough */
673         case FLEX_TUNNEL_MODE_MULTI:
674                 /* Fallthrough */
675         case FLEX_TUNNEL_MODE_TUNNEL:
676                 break;
677         default:
678                 return rte_flow_error_set
679                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
680                          "unrecognized tunnel mode");
681         }
682         item->tunnel_mode = conf->tunnel;
683         if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
684                 return rte_flow_error_set
685                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
686                          "sample field number exceeds limit");
687         /*
688          * The application can specify fields smaller or bigger than 32 bits
689          * covered with single sample register and it can specify field
690          * offsets in any order.
691          *
692          * Gather all similar fields together, build array of bit intervals
693          * in asсending order and try to cover with the smallest set of sample
694          * registers.
695          */
696         memset(&cover, 0, sizeof(cover));
697         for (i = 0; i < conf->nb_samples; i++) {
698                 struct rte_flow_item_flex_field *fl = conf->sample_data + i;
699
700                 /* Check whether field was covered in the previous iteration. */
701                 if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
702                         continue;
703                 if (fl->field_mode == FIELD_MODE_DUMMY)
704                         continue;
705                 /* Build an interval array for the field and similar ones */
706                 cover.num = 0;
707                 /* Add the first field to array unconditionally. */
708                 ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
709                 if (ret < 0)
710                         return ret;
711                 MLX5_ASSERT(ret > 0);
712                 cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
713                 for (j = i + 1; j < conf->nb_samples; j++) {
714                         struct rte_flow_item_flex_field *ft;
715
716                         /* Add field to array if its type matches. */
717                         ft = conf->sample_data + j;
718                         ret = mlx5_flex_cover_sample(&cover, ft, fl,
719                                                      attr, error);
720                         if (ret < 0)
721                                 return ret;
722                         if (!ret)
723                                 continue;
724                         cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
725                 }
726                 /* Allocate sample registers to cover array of intervals. */
727                 ret = mlx5_flex_alloc_sample(&cover, parser, item,
728                                              fl, attr, error);
729                 if (ret)
730                         return ret;
731         }
732         /* Build the item pattern translating data on flow creation. */
733         item->mapnum = 0;
734         memset(&item->map, 0, sizeof(item->map));
735         for (i = 0; i < conf->nb_samples; i++) {
736                 struct rte_flow_item_flex_field *fl = conf->sample_data + i;
737
738                 ret = mlx5_flex_map_sample(fl, parser, item, error);
739                 if (ret) {
740                         MLX5_ASSERT(false);
741                         return ret;
742                 }
743         }
744         if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
745                 /*
746                  * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
747                  * of samples. The first set is for outer and the second set
748                  * for inner flex flow item. Outer and inner samples differ
749                  * only in tunnel_mode.
750                  */
751                 if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
752                         return rte_flow_error_set
753                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
754                                  "no sample registers for inner");
755                 rte_memcpy(parser->devx_conf.sample + parser->num_samples,
756                            parser->devx_conf.sample,
757                            parser->num_samples *
758                                         sizeof(parser->devx_conf.sample[0]));
759                 for (i = 0; i < parser->num_samples; i++) {
760                         struct mlx5_devx_match_sample_attr *sm = i +
761                                 parser->devx_conf.sample + parser->num_samples;
762
763                         sm->flow_match_sample_tunnel_mode =
764                                                 MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
765                 }
766                 parser->num_samples *= 2;
767         }
768         return 0;
769 }
770
771 static int
772 mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
773 {
774         switch (type) {
775         case RTE_FLOW_ITEM_TYPE_ETH:
776                 return  MLX5_GRAPH_ARC_NODE_MAC;
777         case RTE_FLOW_ITEM_TYPE_IPV4:
778                 return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
779         case RTE_FLOW_ITEM_TYPE_IPV6:
780                 return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
781         case RTE_FLOW_ITEM_TYPE_UDP:
782                 return MLX5_GRAPH_ARC_NODE_UDP;
783         case RTE_FLOW_ITEM_TYPE_TCP:
784                 return MLX5_GRAPH_ARC_NODE_TCP;
785         case RTE_FLOW_ITEM_TYPE_MPLS:
786                 return MLX5_GRAPH_ARC_NODE_MPLS;
787         case RTE_FLOW_ITEM_TYPE_GRE:
788                 return MLX5_GRAPH_ARC_NODE_GRE;
789         case RTE_FLOW_ITEM_TYPE_GENEVE:
790                 return MLX5_GRAPH_ARC_NODE_GENEVE;
791         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
792                 return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
793         default:
794                 return -EINVAL;
795         }
796 }
797
798 static int
799 mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
800                      struct rte_flow_error *error)
801 {
802         const struct rte_flow_item_eth *spec = item->spec;
803         const struct rte_flow_item_eth *mask = item->mask;
804         struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
805
806         if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
807                 return rte_flow_error_set
808                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
809                          "invalid eth item mask");
810         }
811         return rte_be_to_cpu_16(spec->hdr.ether_type);
812 }
813
814 static int
815 mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
816                      struct rte_flow_error *error)
817 {
818         const struct rte_flow_item_udp *spec = item->spec;
819         const struct rte_flow_item_udp *mask = item->mask;
820         struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
821
822         if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
823                 return rte_flow_error_set
824                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
825                          "invalid eth item mask");
826         }
827         return rte_be_to_cpu_16(spec->hdr.dst_port);
828 }
829
830 static int
831 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
832                            const struct rte_flow_item_flex_conf *conf,
833                            struct mlx5_flex_parser_devx *devx,
834                            struct mlx5_flex_item *item,
835                            struct rte_flow_error *error)
836 {
837         struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
838         uint32_t i;
839
840         RTE_SET_USED(item);
841         if (conf->nb_inputs > attr->max_num_arc_in)
842                 return rte_flow_error_set
843                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
844                          "too many input links");
845         for (i = 0; i < conf->nb_inputs; i++) {
846                 struct mlx5_devx_graph_arc_attr *arc = node->in + i;
847                 struct rte_flow_item_flex_link *link = conf->input_link + i;
848                 const struct rte_flow_item *rte_item = &link->item;
849                 int arc_type;
850                 int ret;
851
852                 if (!rte_item->spec || !rte_item->mask || rte_item->last)
853                         return rte_flow_error_set
854                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
855                                  "invalid flex item IN arc format");
856                 arc_type = mlx5_flex_arc_type(rte_item->type, true);
857                 if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
858                         return rte_flow_error_set
859                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
860                                  "unsupported flex item IN arc type");
861                 arc->arc_parse_graph_node = arc_type;
862                 arc->start_inner_tunnel = 0;
863                 /*
864                  * Configure arc IN condition value. The value location depends
865                  * on protocol. Current FW version supports IP & UDP for IN
866                  * arcs only, and locations for these protocols are defined.
867                  * Add more protocols when available.
868                  */
869                 switch (rte_item->type) {
870                 case RTE_FLOW_ITEM_TYPE_ETH:
871                         ret = mlx5_flex_arc_in_eth(rte_item, error);
872                         break;
873                 case RTE_FLOW_ITEM_TYPE_UDP:
874                         ret = mlx5_flex_arc_in_udp(rte_item, error);
875                         break;
876                 default:
877                         MLX5_ASSERT(false);
878                         return rte_flow_error_set
879                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
880                                  "unsupported flex item IN arc type");
881                 }
882                 if (ret < 0)
883                         return ret;
884                 arc->compare_condition_value = (uint16_t)ret;
885         }
886         return 0;
887 }
888
889 static int
890 mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
891                             const struct rte_flow_item_flex_conf *conf,
892                             struct mlx5_flex_parser_devx *devx,
893                             struct mlx5_flex_item *item,
894                             struct rte_flow_error *error)
895 {
896         struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
897         bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
898         uint32_t i;
899
900         RTE_SET_USED(item);
901         if (conf->nb_outputs > attr->max_num_arc_out)
902                 return rte_flow_error_set
903                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
904                          "too many output links");
905         for (i = 0; i < conf->nb_outputs; i++) {
906                 struct mlx5_devx_graph_arc_attr *arc = node->out + i;
907                 struct rte_flow_item_flex_link *link = conf->output_link + i;
908                 const struct rte_flow_item *rte_item = &link->item;
909                 int arc_type;
910
911                 if (rte_item->spec || rte_item->mask || rte_item->last)
912                         return rte_flow_error_set
913                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
914                                  "flex node: invalid OUT arc format");
915                 arc_type = mlx5_flex_arc_type(rte_item->type, false);
916                 if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
917                         return rte_flow_error_set
918                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
919                                  "unsupported flex item OUT arc type");
920                 arc->arc_parse_graph_node = arc_type;
921                 arc->start_inner_tunnel = !!is_tunnel;
922                 arc->compare_condition_value = link->next;
923         }
924         return 0;
925 }
926
927 /* Translate RTE flex item API configuration into flaex parser settings. */
928 static int
929 mlx5_flex_translate_conf(struct rte_eth_dev *dev,
930                          const struct rte_flow_item_flex_conf *conf,
931                          struct mlx5_flex_parser_devx *devx,
932                          struct mlx5_flex_item *item,
933                          struct rte_flow_error *error)
934 {
935         struct mlx5_priv *priv = dev->data->dev_private;
936         struct mlx5_hca_flex_attr *attr = &priv->config.hca_attr.flex;
937         int ret;
938
939         ret = mlx5_flex_translate_length(attr, conf, devx, error);
940         if (ret)
941                 return ret;
942         ret = mlx5_flex_translate_next(attr, conf, devx, error);
943         if (ret)
944                 return ret;
945         ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
946         if (ret)
947                 return ret;
948         ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
949         if (ret)
950                 return ret;
951         ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
952         if (ret)
953                 return ret;
954         return 0;
955 }
956
957 /**
958  * Create the flex item with specified configuration over the Ethernet device.
959  *
960  * @param dev
961  *   Ethernet device to create flex item on.
962  * @param[in] conf
963  *   Flex item configuration.
964  * @param[out] error
965  *   Perform verbose error reporting if not NULL. PMDs initialize this
966  *   structure in case of error only.
967  *
968  * @return
969  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
970  */
971 struct rte_flow_item_flex_handle *
972 flow_dv_item_create(struct rte_eth_dev *dev,
973                     const struct rte_flow_item_flex_conf *conf,
974                     struct rte_flow_error *error)
975 {
976         struct mlx5_priv *priv = dev->data->dev_private;
977         struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
978         struct mlx5_flex_item *flex;
979         struct mlx5_list_entry *ent;
980
981         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
982         flex = mlx5_flex_alloc(priv);
983         if (!flex) {
984                 rte_flow_error_set(error, ENOMEM,
985                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
986                                    "too many flex items created on the port");
987                 return NULL;
988         }
989         if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
990                 goto error;
991         ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
992         if (!ent) {
993                 rte_flow_error_set(error, ENOMEM,
994                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
995                                    "flex item creation failure");
996                 goto error;
997         }
998         flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
999         /* Mark initialized flex item valid. */
1000         __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
1001         return (struct rte_flow_item_flex_handle *)flex;
1002
1003 error:
1004         mlx5_flex_free(priv, flex);
1005         return NULL;
1006 }
1007
1008 /**
1009  * Release the flex item on the specified Ethernet device.
1010  *
1011  * @param dev
1012  *   Ethernet device to destroy flex item on.
1013  * @param[in] handle
1014  *   Handle of the item existing on the specified device.
1015  * @param[out] error
1016  *   Perform verbose error reporting if not NULL. PMDs initialize this
1017  *   structure in case of error only.
1018  *
1019  * @return
1020  *   0 on success, a negative errno value otherwise and rte_errno is set.
1021  */
1022 int
1023 flow_dv_item_release(struct rte_eth_dev *dev,
1024                      const struct rte_flow_item_flex_handle *handle,
1025                      struct rte_flow_error *error)
1026 {
1027         struct mlx5_priv *priv = dev->data->dev_private;
1028         struct mlx5_flex_item *flex =
1029                 (struct mlx5_flex_item *)(uintptr_t)handle;
1030         uint32_t old_refcnt = 1;
1031         int rc;
1032
1033         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1034         rte_spinlock_lock(&priv->flex_item_sl);
1035         if (mlx5_flex_index(priv, flex) < 0) {
1036                 rte_spinlock_unlock(&priv->flex_item_sl);
1037                 return rte_flow_error_set(error, EINVAL,
1038                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1039                                           "invalid flex item handle value");
1040         }
1041         if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
1042                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
1043                 rte_spinlock_unlock(&priv->flex_item_sl);
1044                 return rte_flow_error_set(error, EBUSY,
1045                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1046                                           "flex item has flow references");
1047         }
1048         /* Flex item is marked as invalid, we can leave locked section. */
1049         rte_spinlock_unlock(&priv->flex_item_sl);
1050         MLX5_ASSERT(flex->devx_fp);
1051         rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
1052                                   &flex->devx_fp->entry);
1053         flex->devx_fp = NULL;
1054         mlx5_flex_free(priv, flex);
1055         if (rc < 0)
1056                 return rte_flow_error_set(error, EBUSY,
1057                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1058                                           "flex item release failure");
1059         return 0;
1060 }
1061
1062 /* DevX flex parser list callbacks. */
1063 struct mlx5_list_entry *
1064 mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
1065 {
1066         struct mlx5_dev_ctx_shared *sh = list_ctx;
1067         struct mlx5_flex_parser_devx *fp, *conf = ctx;
1068         int ret;
1069
1070         fp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flex_parser_devx),
1071                          0, SOCKET_ID_ANY);
1072         if (!fp)
1073                 return NULL;
1074         /* Copy the requested configurations. */
1075         fp->num_samples = conf->num_samples;
1076         memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
1077         /* Create DevX flex parser. */
1078         fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
1079                                                         &fp->devx_conf);
1080         if (!fp->devx_obj)
1081                 goto error;
1082         /* Query the firmware assigned sample ids. */
1083         ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
1084                                                 fp->sample_ids,
1085                                                 fp->num_samples);
1086         if (ret)
1087                 goto error;
1088         DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
1089                 (const void *)fp, fp->num_samples);
1090         return &fp->entry;
1091 error:
1092         if (fp->devx_obj)
1093                 mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
1094         if (fp)
1095                 mlx5_free(fp);
1096         return NULL;
1097 }
1098
1099 int
1100 mlx5_flex_parser_match_cb(void *list_ctx,
1101                           struct mlx5_list_entry *iter, void *ctx)
1102 {
1103         struct mlx5_flex_parser_devx *fp =
1104                 container_of(iter, struct mlx5_flex_parser_devx, entry);
1105         struct mlx5_flex_parser_devx *org =
1106                 container_of(ctx, struct mlx5_flex_parser_devx, entry);
1107
1108         RTE_SET_USED(list_ctx);
1109         return !iter || !ctx || memcmp(&fp->devx_conf,
1110                                        &org->devx_conf,
1111                                        sizeof(fp->devx_conf));
1112 }
1113
1114 void
1115 mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
1116 {
1117         struct mlx5_flex_parser_devx *fp =
1118                 container_of(entry, struct mlx5_flex_parser_devx, entry);
1119
1120         RTE_SET_USED(list_ctx);
1121         MLX5_ASSERT(fp->devx_obj);
1122         claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
1123         DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
1124         mlx5_free(entry);
1125 }
1126
1127 struct mlx5_list_entry *
1128 mlx5_flex_parser_clone_cb(void *list_ctx,
1129                           struct mlx5_list_entry *entry, void *ctx)
1130 {
1131         struct mlx5_flex_parser_devx *fp;
1132
1133         RTE_SET_USED(list_ctx);
1134         RTE_SET_USED(entry);
1135         fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
1136                          0, SOCKET_ID_ANY);
1137         if (!fp)
1138                 return NULL;
1139         memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
1140         return &fp->entry;
1141 }
1142
1143 void
1144 mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
1145 {
1146         struct mlx5_flex_parser_devx *fp =
1147                 container_of(entry, struct mlx5_flex_parser_devx, entry);
1148         RTE_SET_USED(list_ctx);
1149         mlx5_free(fp);
1150 }