net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_flex.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3  */
4 #include <rte_malloc.h>
5 #include <mlx5_devx_cmds.h>
6 #include <mlx5_malloc.h>
7 #include "mlx5.h"
8 #include "mlx5_flow.h"
9
10 static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11               "Flex item maximal number exceeds uint32_t bit width");
12
13 /**
14  *  Routine called once on port initialization to init flex item
15  *  related infrastructure initialization
16  *
17  * @param dev
18  *   Ethernet device to perform flex item initialization
19  *
20  * @return
21  *   0 on success, a negative errno value otherwise and rte_errno is set.
22  */
23 int
24 mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25 {
26         struct mlx5_priv *priv = dev->data->dev_private;
27
28         rte_spinlock_init(&priv->flex_item_sl);
29         MLX5_ASSERT(!priv->flex_item_map);
30         return 0;
31 }
32
33 /**
34  *  Routine called once on port close to perform flex item
35  *  related infrastructure cleanup.
36  *
37  * @param dev
38  *   Ethernet device to perform cleanup
39  */
40 void
41 mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42 {
43         struct mlx5_priv *priv = dev->data->dev_private;
44         uint32_t i;
45
46         for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47                 if (priv->flex_item_map & (1 << i)) {
48                         struct mlx5_flex_item *flex = &priv->flex_item[i];
49
50                         claim_zero(mlx5_list_unregister
51                                         (priv->sh->flex_parsers_dv,
52                                          &flex->devx_fp->entry));
53                         flex->devx_fp = NULL;
54                         flex->refcnt = 0;
55                         priv->flex_item_map &= ~(1 << i);
56                 }
57         }
58 }
59
60 static int
61 mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
62 {
63         uintptr_t start = (uintptr_t)&priv->flex_item[0];
64         uintptr_t entry = (uintptr_t)item;
65         uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
66
67         if (entry < start ||
68             idx >= MLX5_PORT_FLEX_ITEM_NUM ||
69             (entry - start) % sizeof(struct mlx5_flex_item) ||
70             !(priv->flex_item_map & (1u << idx)))
71                 return -1;
72         return (int)idx;
73 }
74
75 static struct mlx5_flex_item *
76 mlx5_flex_alloc(struct mlx5_priv *priv)
77 {
78         struct mlx5_flex_item *item = NULL;
79
80         rte_spinlock_lock(&priv->flex_item_sl);
81         if (~priv->flex_item_map) {
82                 uint32_t idx = rte_bsf32(~priv->flex_item_map);
83
84                 if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
85                         item = &priv->flex_item[idx];
86                         MLX5_ASSERT(!item->refcnt);
87                         MLX5_ASSERT(!item->devx_fp);
88                         item->devx_fp = NULL;
89                         __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
90                         priv->flex_item_map |= 1u << idx;
91                 }
92         }
93         rte_spinlock_unlock(&priv->flex_item_sl);
94         return item;
95 }
96
97 static void
98 mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
99 {
100         int idx = mlx5_flex_index(priv, item);
101
102         MLX5_ASSERT(idx >= 0 &&
103                     idx < MLX5_PORT_FLEX_ITEM_NUM &&
104                     (priv->flex_item_map & (1u << idx)));
105         if (idx >= 0) {
106                 rte_spinlock_lock(&priv->flex_item_sl);
107                 MLX5_ASSERT(!item->refcnt);
108                 MLX5_ASSERT(!item->devx_fp);
109                 item->devx_fp = NULL;
110                 __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
111                 priv->flex_item_map &= ~(1u << idx);
112                 rte_spinlock_unlock(&priv->flex_item_sl);
113         }
114 }
115
116 static uint32_t
117 mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
118                        uint32_t pos, uint32_t width, uint32_t shift)
119 {
120         const uint8_t *ptr = item->pattern + pos / CHAR_BIT;
121         uint32_t val, vbits;
122
123         /* Proceed the bitfield start byte. */
124         MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width);
125         MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT);
126         if (item->length <= pos / CHAR_BIT)
127                 return 0;
128         val = *ptr++ >> (pos % CHAR_BIT);
129         vbits = CHAR_BIT - pos % CHAR_BIT;
130         pos = (pos + vbits) / CHAR_BIT;
131         vbits = RTE_MIN(vbits, width);
132         val &= RTE_BIT32(vbits) - 1;
133         while (vbits < width && pos < item->length) {
134                 uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT);
135                 uint32_t tmp = *ptr++;
136
137                 pos++;
138                 tmp &= RTE_BIT32(part) - 1;
139                 val |= tmp << vbits;
140                 vbits += part;
141         }
142         return rte_bswap32(val <<= shift);
143 }
144
145 #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
146         do { \
147                 uint32_t tmp, out = (def); \
148                 tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
149                                prog_sample_field_value_##x); \
150                 tmp = (tmp & ~out) | (val); \
151                 MLX5_SET(fte_match_set_misc4, misc4_v, \
152                          prog_sample_field_value_##x, tmp); \
153                 tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
154                                prog_sample_field_value_##x); \
155                 tmp = (tmp & ~out) | (msk); \
156                 MLX5_SET(fte_match_set_misc4, misc4_m, \
157                          prog_sample_field_value_##x, tmp); \
158                 tmp = tmp ? (sid) : 0; \
159                 MLX5_SET(fte_match_set_misc4, misc4_v, \
160                          prog_sample_field_id_##x, tmp);\
161                 MLX5_SET(fte_match_set_misc4, misc4_m, \
162                          prog_sample_field_id_##x, tmp); \
163         } while (0)
164
165 __rte_always_inline static void
166 mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
167                            uint32_t def, uint32_t mask, uint32_t value,
168                            uint32_t sample_id, uint32_t id)
169 {
170         switch (id) {
171         case 0:
172                 SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
173                 break;
174         case 1:
175                 SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
176                 break;
177         case 2:
178                 SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
179                 break;
180         case 3:
181                 SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
182                 break;
183         case 4:
184                 SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
185                 break;
186         case 5:
187                 SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
188                 break;
189         case 6:
190                 SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
191                 break;
192         case 7:
193                 SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
194                 break;
195         default:
196                 MLX5_ASSERT(false);
197                 break;
198         }
199 #undef SET_FP_MATCH_SAMPLE_ID
200 }
201 /**
202  * Translate item pattern into matcher fields according to translation
203  * array.
204  *
205  * @param dev
206  *   Ethernet device to translate flex item on.
207  * @param[in, out] matcher
208  *   Flow matcher to configure
209  * @param[in, out] key
210  *   Flow matcher value.
211  * @param[in] item
212  *   Flow pattern to translate.
213  * @param[in] is_inner
214  *   Inner Flex Item (follows after tunnel header).
215  *
216  * @return
217  *   0 on success, a negative errno value otherwise and rte_errno is set.
218  */
219 void
220 mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
221                               void *matcher, void *key,
222                               const struct rte_flow_item *item,
223                               bool is_inner)
224 {
225         const struct rte_flow_item_flex *spec, *mask;
226         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
227                                      misc_parameters_4);
228         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
229         struct mlx5_flex_item *tp;
230         uint32_t i, pos = 0;
231
232         RTE_SET_USED(dev);
233         MLX5_ASSERT(item->spec && item->mask);
234         spec = item->spec;
235         mask = item->mask;
236         tp = (struct mlx5_flex_item *)spec->handle;
237         MLX5_ASSERT(mlx5_flex_index(dev->data->dev_private, tp) >= 0);
238         for (i = 0; i < tp->mapnum; i++) {
239                 struct mlx5_flex_pattern_field *map = tp->map + i;
240                 uint32_t id = map->reg_id;
241                 uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
242                 uint32_t val, msk;
243
244                 /* Skip placeholders for DUMMY fields. */
245                 if (id == MLX5_INVALID_SAMPLE_REG_ID) {
246                         pos += map->width;
247                         continue;
248                 }
249                 val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
250                 msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
251                 MLX5_ASSERT(map->width);
252                 MLX5_ASSERT(id < tp->devx_fp->num_samples);
253                 if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
254                         uint32_t num_samples = tp->devx_fp->num_samples / 2;
255
256                         MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
257                         MLX5_ASSERT(id < num_samples);
258                         id += num_samples;
259                 }
260                 mlx5_flex_set_match_sample(misc4_m, misc4_v,
261                                            def, msk & def, val & msk & def,
262                                            tp->devx_fp->sample_ids[id], id);
263                 pos += map->width;
264         }
265 }
266
267 /**
268  * Convert flex item handle (from the RTE flow) to flex item index on port.
269  * Optionally can increment flex item object reference count.
270  *
271  * @param dev
272  *   Ethernet device to acquire flex item on.
273  * @param[in] handle
274  *   Flow item handle from item spec.
275  * @param[in] acquire
276  *   If set - increment reference counter.
277  *
278  * @return
279  *   >=0 - index on success, a negative errno value otherwise
280  *         and rte_errno is set.
281  */
282 int
283 mlx5_flex_acquire_index(struct rte_eth_dev *dev,
284                         struct rte_flow_item_flex_handle *handle,
285                         bool acquire)
286 {
287         struct mlx5_priv *priv = dev->data->dev_private;
288         struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
289         int ret = mlx5_flex_index(priv, flex);
290
291         if (ret < 0) {
292                 errno = -EINVAL;
293                 rte_errno = EINVAL;
294                 return ret;
295         }
296         if (acquire)
297                 __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
298         return ret;
299 }
300
301 /**
302  * Release flex item index on port - decrements reference counter by index.
303  *
304  * @param dev
305  *   Ethernet device to acquire flex item on.
306  * @param[in] index
307  *   Flow item index.
308  *
309  * @return
310  *   0 - on success, a negative errno value otherwise and rte_errno is set.
311  */
312 int
313 mlx5_flex_release_index(struct rte_eth_dev *dev,
314                         int index)
315 {
316         struct mlx5_priv *priv = dev->data->dev_private;
317         struct mlx5_flex_item *flex;
318
319         if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
320             !(priv->flex_item_map & (1u << index))) {
321                 errno = EINVAL;
322                 rte_errno = -EINVAL;
323                 return -EINVAL;
324         }
325         flex = priv->flex_item + index;
326         if (flex->refcnt <= 1) {
327                 MLX5_ASSERT(false);
328                 errno = EINVAL;
329                 rte_errno = -EINVAL;
330                 return -EINVAL;
331         }
332         __atomic_sub_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
333         return 0;
334 }
335
336 /*
337  * Calculate largest mask value for a given shift.
338  *
339  *   shift      mask
340  * ------- ---------------
341  *    0     b111100  0x3C
342  *    1     b111110  0x3E
343  *    2     b111111  0x3F
344  *    3     b011111  0x1F
345  *    4     b001111  0x0F
346  *    5     b000111  0x07
347  */
348 static uint8_t
349 mlx5_flex_hdr_len_mask(uint8_t shift,
350                        const struct mlx5_hca_flex_attr *attr)
351 {
352         uint32_t base_mask;
353         int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
354
355         base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
356         return diff == 0 ? base_mask :
357                diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
358 }
359
360 static int
361 mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
362                            const struct rte_flow_item_flex_conf *conf,
363                            struct mlx5_flex_parser_devx *devx,
364                            struct rte_flow_error *error)
365 {
366         const struct rte_flow_item_flex_field *field = &conf->next_header;
367         struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
368         uint32_t len_width, mask;
369
370         if (field->field_base % CHAR_BIT)
371                 return rte_flow_error_set
372                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
373                          "not byte aligned header length field");
374         switch (field->field_mode) {
375         case FIELD_MODE_DUMMY:
376                 return rte_flow_error_set
377                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
378                          "invalid header length field mode (DUMMY)");
379         case FIELD_MODE_FIXED:
380                 if (!(attr->header_length_mode &
381                     RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
382                         return rte_flow_error_set
383                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
384                                  "unsupported header length field mode (FIXED)");
385                 if (attr->header_length_mask_width < field->field_size)
386                         return rte_flow_error_set
387                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
388                                  "header length field width exceeds limit");
389                 if (field->offset_shift < 0 ||
390                     field->offset_shift > attr->header_length_mask_width)
391                         return rte_flow_error_set
392                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
393                                  "invalid header length field shift (FIXED");
394                 if (field->field_base < 0)
395                         return rte_flow_error_set
396                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
397                                  "negative header length field base (FIXED)");
398                 node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
399                 break;
400         case FIELD_MODE_OFFSET:
401                 if (!(attr->header_length_mode &
402                     RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
403                         return rte_flow_error_set
404                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
405                                  "unsupported header length field mode (OFFSET)");
406                 node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
407                 if (field->offset_mask == 0 ||
408                     !rte_is_power_of_2(field->offset_mask + 1))
409                         return rte_flow_error_set
410                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
411                                  "invalid length field offset mask (OFFSET)");
412                 len_width = rte_fls_u32(field->offset_mask);
413                 if (len_width > attr->header_length_mask_width)
414                         return rte_flow_error_set
415                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
416                                  "length field offset mask too wide (OFFSET)");
417                 mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
418                 if (mask < field->offset_mask)
419                         return rte_flow_error_set
420                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
421                                  "length field shift too big (OFFSET)");
422                 node->header_length_field_mask = RTE_MIN(mask,
423                                                          field->offset_mask);
424                 break;
425         case FIELD_MODE_BITMASK:
426                 if (!(attr->header_length_mode &
427                     RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
428                         return rte_flow_error_set
429                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
430                                  "unsupported header length field mode (BITMASK)");
431                 if (attr->header_length_mask_width < field->field_size)
432                         return rte_flow_error_set
433                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
434                                  "header length field width exceeds limit");
435                 node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
436                 mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
437                 if (mask < field->offset_mask)
438                         return rte_flow_error_set
439                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
440                                  "length field shift too big (BITMASK)");
441                 node->header_length_field_mask = RTE_MIN(mask,
442                                                          field->offset_mask);
443                 break;
444         default:
445                 return rte_flow_error_set
446                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
447                          "unknown header length field mode");
448         }
449         if (field->field_base / CHAR_BIT >= 0 &&
450             field->field_base / CHAR_BIT > attr->max_base_header_length)
451                 return rte_flow_error_set
452                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
453                          "header length field base exceeds limit");
454         node->header_length_base_value = field->field_base / CHAR_BIT;
455         if (field->field_mode == FIELD_MODE_OFFSET ||
456             field->field_mode == FIELD_MODE_BITMASK) {
457                 if (field->offset_shift > 15 || field->offset_shift < 0)
458                         return rte_flow_error_set
459                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
460                                  "header length field shift exceeds limit");
461                 node->header_length_field_shift = field->offset_shift;
462                 node->header_length_field_offset = field->offset_base;
463         }
464         return 0;
465 }
466
467 static int
468 mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
469                          const struct rte_flow_item_flex_conf *conf,
470                          struct mlx5_flex_parser_devx *devx,
471                          struct rte_flow_error *error)
472 {
473         const struct rte_flow_item_flex_field *field = &conf->next_protocol;
474         struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
475
476         switch (field->field_mode) {
477         case FIELD_MODE_DUMMY:
478                 if (conf->nb_outputs)
479                         return rte_flow_error_set
480                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
481                                  "next protocol field is required (DUMMY)");
482                 return 0;
483         case FIELD_MODE_FIXED:
484                 break;
485         case FIELD_MODE_OFFSET:
486                 return rte_flow_error_set
487                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
488                          "unsupported next protocol field mode (OFFSET)");
489                 break;
490         case FIELD_MODE_BITMASK:
491                 return rte_flow_error_set
492                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
493                          "unsupported next protocol field mode (BITMASK)");
494         default:
495                 return rte_flow_error_set
496                         (error, EINVAL,
497                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
498                          "unknown next protocol field mode");
499         }
500         MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
501         if (!conf->nb_outputs)
502                 return rte_flow_error_set
503                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
504                          "out link(s) is required if next field present");
505         if (attr->max_next_header_offset < field->field_base)
506                 return rte_flow_error_set
507                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
508                          "next protocol field base exceeds limit");
509         if (field->offset_shift)
510                 return rte_flow_error_set
511                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
512                          "unsupported next protocol field shift");
513         node->next_header_field_offset = field->field_base;
514         node->next_header_field_size = field->field_size;
515         return 0;
516 }
517
518 /* Helper structure to handle field bit intervals. */
519 struct mlx5_flex_field_cover {
520         uint16_t num;
521         int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
522         int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
523         uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
524 };
525
526 static void
527 mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
528                        uint16_t num, int32_t start, int32_t end)
529 {
530         MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
531         MLX5_ASSERT(num <= cover->num);
532         if (num < cover->num) {
533                 memmove(&cover->start[num + 1], &cover->start[num],
534                         (cover->num - num) * sizeof(int32_t));
535                 memmove(&cover->end[num + 1],   &cover->end[num],
536                         (cover->num - num) * sizeof(int32_t));
537         }
538         cover->start[num] = start;
539         cover->end[num] = end;
540         cover->num++;
541 }
542
543 static void
544 mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
545 {
546         uint32_t i, del = 0;
547         int32_t end;
548
549         MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
550         MLX5_ASSERT(num < (cover->num - 1));
551         end = cover->end[num];
552         for (i = num + 1; i < cover->num; i++) {
553                 if (end < cover->start[i])
554                         break;
555                 del++;
556                 if (end <= cover->end[i]) {
557                         cover->end[num] = cover->end[i];
558                         break;
559                 }
560         }
561         if (del) {
562                 MLX5_ASSERT(del < (cover->num - 1u - num));
563                 cover->num -= del;
564                 MLX5_ASSERT(cover->num > num);
565                 if ((cover->num - num) > 1) {
566                         memmove(&cover->start[num + 1],
567                                 &cover->start[num + 1 + del],
568                                 (cover->num - num - 1) * sizeof(int32_t));
569                         memmove(&cover->end[num + 1],
570                                 &cover->end[num + 1 + del],
571                                 (cover->num - num - 1) * sizeof(int32_t));
572                 }
573         }
574 }
575
576 /*
577  * Validate the sample field and update interval array
578  * if parameters match with the 'match" field.
579  * Returns:
580  *    < 0  - error
581  *    == 0 - no match, interval array not updated
582  *    > 0  - match, interval array updated
583  */
584 static int
585 mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
586                        struct rte_flow_item_flex_field *field,
587                        struct rte_flow_item_flex_field *match,
588                        struct mlx5_hca_flex_attr *attr,
589                        struct rte_flow_error *error)
590 {
591         int32_t start, end;
592         uint32_t i;
593
594         switch (field->field_mode) {
595         case FIELD_MODE_DUMMY:
596                 return 0;
597         case FIELD_MODE_FIXED:
598                 if (!(attr->sample_offset_mode &
599                     RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
600                         return rte_flow_error_set
601                                 (error, EINVAL,
602                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
603                                  "unsupported sample field mode (FIXED)");
604                 if (field->offset_shift)
605                         return rte_flow_error_set
606                                 (error, EINVAL,
607                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
608                                  "invalid sample field shift (FIXED");
609                 if (field->field_base < 0)
610                         return rte_flow_error_set
611                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
612                                  "invalid sample field base (FIXED)");
613                 if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
614                         return rte_flow_error_set
615                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
616                                  "sample field base exceeds limit (FIXED)");
617                 break;
618         case FIELD_MODE_OFFSET:
619                 if (!(attr->sample_offset_mode &
620                     RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
621                         return rte_flow_error_set
622                                 (error, EINVAL,
623                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
624                                  "unsupported sample field mode (OFFSET)");
625                 if (field->field_base / CHAR_BIT >= 0 &&
626                     field->field_base / CHAR_BIT > attr->max_sample_base_offset)
627                         return rte_flow_error_set
628                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
629                                 "sample field base exceeds limit");
630                 break;
631         case FIELD_MODE_BITMASK:
632                 if (!(attr->sample_offset_mode &
633                     RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
634                         return rte_flow_error_set
635                                 (error, EINVAL,
636                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
637                                  "unsupported sample field mode (BITMASK)");
638                 if (field->field_base / CHAR_BIT >= 0 &&
639                     field->field_base / CHAR_BIT > attr->max_sample_base_offset)
640                         return rte_flow_error_set
641                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
642                                 "sample field base exceeds limit");
643                 break;
644         default:
645                 return rte_flow_error_set
646                         (error, EINVAL,
647                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
648                          "unknown data sample field mode");
649         }
650         if (!match) {
651                 if (!field->field_size)
652                         return rte_flow_error_set
653                                 (error, EINVAL,
654                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
655                                 "zero sample field width");
656                 if (field->field_id)
657                         DRV_LOG(DEBUG, "sample field id hint ignored");
658         } else {
659                 if (field->field_mode != match->field_mode ||
660                     field->offset_base | match->offset_base ||
661                     field->offset_mask | match->offset_mask ||
662                     field->offset_shift | match->offset_shift)
663                         return 0;
664         }
665         start = field->field_base;
666         end = start + field->field_size;
667         /* Add the new or similar field to interval array. */
668         if (!cover->num) {
669                 cover->start[cover->num] = start;
670                 cover->end[cover->num] = end;
671                 cover->num = 1;
672                 return 1;
673         }
674         for (i = 0; i < cover->num; i++) {
675                 if (start > cover->end[i]) {
676                         if (i >= (cover->num - 1u)) {
677                                 mlx5_flex_insert_field(cover, cover->num,
678                                                        start, end);
679                                 break;
680                         }
681                         continue;
682                 }
683                 if (end < cover->start[i]) {
684                         mlx5_flex_insert_field(cover, i, start, end);
685                         break;
686                 }
687                 if (start < cover->start[i])
688                         cover->start[i] = start;
689                 if (end > cover->end[i]) {
690                         cover->end[i] = end;
691                         if (i < (cover->num - 1u))
692                                 mlx5_flex_merge_field(cover, i);
693                 }
694                 break;
695         }
696         return 1;
697 }
698
699 static void
700 mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
701                         struct rte_flow_item_flex_field *field,
702                         enum rte_flow_item_flex_tunnel_mode tunnel_mode)
703 {
704         memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
705         na->flow_match_sample_en = 1;
706         switch (field->field_mode) {
707         case FIELD_MODE_FIXED:
708                 na->flow_match_sample_offset_mode =
709                         MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
710                 break;
711         case FIELD_MODE_OFFSET:
712                 na->flow_match_sample_offset_mode =
713                         MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
714                 na->flow_match_sample_field_offset = field->offset_base;
715                 na->flow_match_sample_field_offset_mask = field->offset_mask;
716                 na->flow_match_sample_field_offset_shift = field->offset_shift;
717                 break;
718         case FIELD_MODE_BITMASK:
719                 na->flow_match_sample_offset_mode =
720                         MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
721                 na->flow_match_sample_field_offset = field->offset_base;
722                 na->flow_match_sample_field_offset_mask = field->offset_mask;
723                 na->flow_match_sample_field_offset_shift = field->offset_shift;
724                 break;
725         default:
726                 MLX5_ASSERT(false);
727                 break;
728         }
729         switch (tunnel_mode) {
730         case FLEX_TUNNEL_MODE_SINGLE:
731                 /* Fallthrough */
732         case FLEX_TUNNEL_MODE_TUNNEL:
733                 na->flow_match_sample_tunnel_mode =
734                         MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
735                 break;
736         case FLEX_TUNNEL_MODE_MULTI:
737                 /* Fallthrough */
738         case FLEX_TUNNEL_MODE_OUTER:
739                 na->flow_match_sample_tunnel_mode =
740                         MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
741                 break;
742         case FLEX_TUNNEL_MODE_INNER:
743                 na->flow_match_sample_tunnel_mode =
744                         MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
745                 break;
746         default:
747                 MLX5_ASSERT(false);
748                 break;
749         }
750 }
751
752 /* Map specified field to set/subset of allocated sample registers. */
753 static int
754 mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
755                      struct mlx5_flex_parser_devx *parser,
756                      struct mlx5_flex_item *item,
757                      struct rte_flow_error *error)
758 {
759         struct mlx5_devx_match_sample_attr node;
760         int32_t start = field->field_base;
761         int32_t end = start + field->field_size;
762         struct mlx5_flex_pattern_field *trans;
763         uint32_t i, done_bits = 0;
764
765         if (field->field_mode == FIELD_MODE_DUMMY) {
766                 done_bits = field->field_size;
767                 while (done_bits) {
768                         uint32_t part = RTE_MIN(done_bits,
769                                                 sizeof(uint32_t) * CHAR_BIT);
770                         if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
771                                 return rte_flow_error_set
772                                         (error,
773                                          EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
774                                          "too many flex item pattern translations");
775                         trans = &item->map[item->mapnum];
776                         trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
777                         trans->shift = 0;
778                         trans->width = part;
779                         item->mapnum++;
780                         done_bits -= part;
781                 }
782                 return 0;
783         }
784         mlx5_flex_config_sample(&node, field, item->tunnel_mode);
785         for (i = 0; i < parser->num_samples; i++) {
786                 struct mlx5_devx_match_sample_attr *sample =
787                         &parser->devx_conf.sample[i];
788                 int32_t reg_start, reg_end;
789                 int32_t cov_start, cov_end;
790
791                 MLX5_ASSERT(sample->flow_match_sample_en);
792                 if (!sample->flow_match_sample_en)
793                         break;
794                 node.flow_match_sample_field_base_offset =
795                         sample->flow_match_sample_field_base_offset;
796                 if (memcmp(&node, sample, sizeof(node)))
797                         continue;
798                 reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
799                 reg_start *= CHAR_BIT;
800                 reg_end = reg_start + 32;
801                 if (end <= reg_start || start >= reg_end)
802                         continue;
803                 cov_start = RTE_MAX(reg_start, start);
804                 cov_end = RTE_MIN(reg_end, end);
805                 MLX5_ASSERT(cov_end > cov_start);
806                 done_bits += cov_end - cov_start;
807                 if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
808                         return rte_flow_error_set
809                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
810                                  "too many flex item pattern translations");
811                 trans = &item->map[item->mapnum];
812                 item->mapnum++;
813                 trans->reg_id = i;
814                 trans->shift = cov_start - reg_start;
815                 trans->width = cov_end - cov_start;
816         }
817         if (done_bits != field->field_size) {
818                 MLX5_ASSERT(false);
819                 return rte_flow_error_set
820                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
821                          "failed to map field to sample register");
822         }
823         return 0;
824 }
825
826 /* Allocate sample registers for the specified field type and interval array. */
827 static int
828 mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
829                        struct mlx5_flex_parser_devx *parser,
830                        struct mlx5_flex_item *item,
831                        struct rte_flow_item_flex_field *field,
832                        struct mlx5_hca_flex_attr *attr,
833                        struct rte_flow_error *error)
834 {
835         struct mlx5_devx_match_sample_attr node;
836         uint32_t idx = 0;
837
838         mlx5_flex_config_sample(&node, field, item->tunnel_mode);
839         while (idx < cover->num) {
840                 int32_t start, end;
841
842                 /*
843                  * Sample base offsets are in bytes, should be aligned
844                  * to 32-bit as required by firmware for samples.
845                  */
846                 start = RTE_ALIGN_FLOOR(cover->start[idx],
847                                         sizeof(uint32_t) * CHAR_BIT);
848                 node.flow_match_sample_field_base_offset =
849                                                 (start / CHAR_BIT) & 0xFF;
850                 /* Allocate sample register. */
851                 if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
852                     parser->num_samples >= attr->max_num_sample ||
853                     parser->num_samples >= attr->max_num_prog_sample)
854                         return rte_flow_error_set
855                                 (error, EINVAL,
856                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
857                                  "no sample registers to handle all flex item fields");
858                 parser->devx_conf.sample[parser->num_samples] = node;
859                 parser->num_samples++;
860                 /* Remove or update covered intervals. */
861                 end = start + 32;
862                 while (idx < cover->num) {
863                         if (end >= cover->end[idx]) {
864                                 idx++;
865                                 continue;
866                         }
867                         if (end > cover->start[idx])
868                                 cover->start[idx] = end;
869                         break;
870                 }
871         }
872         return 0;
873 }
874
875 static int
876 mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
877                            const struct rte_flow_item_flex_conf *conf,
878                            struct mlx5_flex_parser_devx *parser,
879                            struct mlx5_flex_item *item,
880                            struct rte_flow_error *error)
881 {
882         struct mlx5_flex_field_cover cover;
883         uint32_t i, j;
884         int ret;
885
886         switch (conf->tunnel) {
887         case FLEX_TUNNEL_MODE_SINGLE:
888                 /* Fallthrough */
889         case FLEX_TUNNEL_MODE_OUTER:
890                 /* Fallthrough */
891         case FLEX_TUNNEL_MODE_INNER:
892                 /* Fallthrough */
893         case FLEX_TUNNEL_MODE_MULTI:
894                 /* Fallthrough */
895         case FLEX_TUNNEL_MODE_TUNNEL:
896                 break;
897         default:
898                 return rte_flow_error_set
899                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
900                          "unrecognized tunnel mode");
901         }
902         item->tunnel_mode = conf->tunnel;
903         if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
904                 return rte_flow_error_set
905                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
906                          "sample field number exceeds limit");
907         /*
908          * The application can specify fields smaller or bigger than 32 bits
909          * covered with single sample register and it can specify field
910          * offsets in any order.
911          *
912          * Gather all similar fields together, build array of bit intervals
913          * in asсending order and try to cover with the smallest set of sample
914          * registers.
915          */
916         memset(&cover, 0, sizeof(cover));
917         for (i = 0; i < conf->nb_samples; i++) {
918                 struct rte_flow_item_flex_field *fl = conf->sample_data + i;
919
920                 /* Check whether field was covered in the previous iteration. */
921                 if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
922                         continue;
923                 if (fl->field_mode == FIELD_MODE_DUMMY)
924                         continue;
925                 /* Build an interval array for the field and similar ones */
926                 cover.num = 0;
927                 /* Add the first field to array unconditionally. */
928                 ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
929                 if (ret < 0)
930                         return ret;
931                 MLX5_ASSERT(ret > 0);
932                 cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
933                 for (j = i + 1; j < conf->nb_samples; j++) {
934                         struct rte_flow_item_flex_field *ft;
935
936                         /* Add field to array if its type matches. */
937                         ft = conf->sample_data + j;
938                         ret = mlx5_flex_cover_sample(&cover, ft, fl,
939                                                      attr, error);
940                         if (ret < 0)
941                                 return ret;
942                         if (!ret)
943                                 continue;
944                         cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
945                 }
946                 /* Allocate sample registers to cover array of intervals. */
947                 ret = mlx5_flex_alloc_sample(&cover, parser, item,
948                                              fl, attr, error);
949                 if (ret)
950                         return ret;
951         }
952         /* Build the item pattern translating data on flow creation. */
953         item->mapnum = 0;
954         memset(&item->map, 0, sizeof(item->map));
955         for (i = 0; i < conf->nb_samples; i++) {
956                 struct rte_flow_item_flex_field *fl = conf->sample_data + i;
957
958                 ret = mlx5_flex_map_sample(fl, parser, item, error);
959                 if (ret) {
960                         MLX5_ASSERT(false);
961                         return ret;
962                 }
963         }
964         if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
965                 /*
966                  * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
967                  * of samples. The first set is for outer and the second set
968                  * for inner flex flow item. Outer and inner samples differ
969                  * only in tunnel_mode.
970                  */
971                 if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
972                         return rte_flow_error_set
973                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
974                                  "no sample registers for inner");
975                 rte_memcpy(parser->devx_conf.sample + parser->num_samples,
976                            parser->devx_conf.sample,
977                            parser->num_samples *
978                                         sizeof(parser->devx_conf.sample[0]));
979                 for (i = 0; i < parser->num_samples; i++) {
980                         struct mlx5_devx_match_sample_attr *sm = i +
981                                 parser->devx_conf.sample + parser->num_samples;
982
983                         sm->flow_match_sample_tunnel_mode =
984                                                 MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
985                 }
986                 parser->num_samples *= 2;
987         }
988         return 0;
989 }
990
991 static int
992 mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
993 {
994         switch (type) {
995         case RTE_FLOW_ITEM_TYPE_ETH:
996                 return  MLX5_GRAPH_ARC_NODE_MAC;
997         case RTE_FLOW_ITEM_TYPE_IPV4:
998                 return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
999         case RTE_FLOW_ITEM_TYPE_IPV6:
1000                 return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
1001         case RTE_FLOW_ITEM_TYPE_UDP:
1002                 return MLX5_GRAPH_ARC_NODE_UDP;
1003         case RTE_FLOW_ITEM_TYPE_TCP:
1004                 return MLX5_GRAPH_ARC_NODE_TCP;
1005         case RTE_FLOW_ITEM_TYPE_MPLS:
1006                 return MLX5_GRAPH_ARC_NODE_MPLS;
1007         case RTE_FLOW_ITEM_TYPE_GRE:
1008                 return MLX5_GRAPH_ARC_NODE_GRE;
1009         case RTE_FLOW_ITEM_TYPE_GENEVE:
1010                 return MLX5_GRAPH_ARC_NODE_GENEVE;
1011         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1012                 return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
1013         default:
1014                 return -EINVAL;
1015         }
1016 }
1017
1018 static int
1019 mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
1020                      struct rte_flow_error *error)
1021 {
1022         const struct rte_flow_item_eth *spec = item->spec;
1023         const struct rte_flow_item_eth *mask = item->mask;
1024         struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
1025
1026         if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
1027                 return rte_flow_error_set
1028                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1029                          "invalid eth item mask");
1030         }
1031         return rte_be_to_cpu_16(spec->hdr.ether_type);
1032 }
1033
1034 static int
1035 mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
1036                      struct rte_flow_error *error)
1037 {
1038         const struct rte_flow_item_udp *spec = item->spec;
1039         const struct rte_flow_item_udp *mask = item->mask;
1040         struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
1041
1042         if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
1043                 return rte_flow_error_set
1044                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1045                          "invalid eth item mask");
1046         }
1047         return rte_be_to_cpu_16(spec->hdr.dst_port);
1048 }
1049
1050 static int
1051 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
1052                            const struct rte_flow_item_flex_conf *conf,
1053                            struct mlx5_flex_parser_devx *devx,
1054                            struct mlx5_flex_item *item,
1055                            struct rte_flow_error *error)
1056 {
1057         struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1058         uint32_t i;
1059
1060         RTE_SET_USED(item);
1061         if (conf->nb_inputs > attr->max_num_arc_in)
1062                 return rte_flow_error_set
1063                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1064                          "too many input links");
1065         for (i = 0; i < conf->nb_inputs; i++) {
1066                 struct mlx5_devx_graph_arc_attr *arc = node->in + i;
1067                 struct rte_flow_item_flex_link *link = conf->input_link + i;
1068                 const struct rte_flow_item *rte_item = &link->item;
1069                 int arc_type;
1070                 int ret;
1071
1072                 if (!rte_item->spec || !rte_item->mask || rte_item->last)
1073                         return rte_flow_error_set
1074                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1075                                  "invalid flex item IN arc format");
1076                 arc_type = mlx5_flex_arc_type(rte_item->type, true);
1077                 if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
1078                         return rte_flow_error_set
1079                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1080                                  "unsupported flex item IN arc type");
1081                 arc->arc_parse_graph_node = arc_type;
1082                 arc->start_inner_tunnel = 0;
1083                 /*
1084                  * Configure arc IN condition value. The value location depends
1085                  * on protocol. Current FW version supports IP & UDP for IN
1086                  * arcs only, and locations for these protocols are defined.
1087                  * Add more protocols when available.
1088                  */
1089                 switch (rte_item->type) {
1090                 case RTE_FLOW_ITEM_TYPE_ETH:
1091                         ret = mlx5_flex_arc_in_eth(rte_item, error);
1092                         break;
1093                 case RTE_FLOW_ITEM_TYPE_UDP:
1094                         ret = mlx5_flex_arc_in_udp(rte_item, error);
1095                         break;
1096                 default:
1097                         MLX5_ASSERT(false);
1098                         return rte_flow_error_set
1099                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1100                                  "unsupported flex item IN arc type");
1101                 }
1102                 if (ret < 0)
1103                         return ret;
1104                 arc->compare_condition_value = (uint16_t)ret;
1105         }
1106         return 0;
1107 }
1108
1109 static int
1110 mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
1111                             const struct rte_flow_item_flex_conf *conf,
1112                             struct mlx5_flex_parser_devx *devx,
1113                             struct mlx5_flex_item *item,
1114                             struct rte_flow_error *error)
1115 {
1116         struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1117         bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
1118         uint32_t i;
1119
1120         RTE_SET_USED(item);
1121         if (conf->nb_outputs > attr->max_num_arc_out)
1122                 return rte_flow_error_set
1123                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1124                          "too many output links");
1125         for (i = 0; i < conf->nb_outputs; i++) {
1126                 struct mlx5_devx_graph_arc_attr *arc = node->out + i;
1127                 struct rte_flow_item_flex_link *link = conf->output_link + i;
1128                 const struct rte_flow_item *rte_item = &link->item;
1129                 int arc_type;
1130
1131                 if (rte_item->spec || rte_item->mask || rte_item->last)
1132                         return rte_flow_error_set
1133                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1134                                  "flex node: invalid OUT arc format");
1135                 arc_type = mlx5_flex_arc_type(rte_item->type, false);
1136                 if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
1137                         return rte_flow_error_set
1138                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1139                                  "unsupported flex item OUT arc type");
1140                 arc->arc_parse_graph_node = arc_type;
1141                 arc->start_inner_tunnel = !!is_tunnel;
1142                 arc->compare_condition_value = link->next;
1143         }
1144         return 0;
1145 }
1146
1147 /* Translate RTE flex item API configuration into flaex parser settings. */
1148 static int
1149 mlx5_flex_translate_conf(struct rte_eth_dev *dev,
1150                          const struct rte_flow_item_flex_conf *conf,
1151                          struct mlx5_flex_parser_devx *devx,
1152                          struct mlx5_flex_item *item,
1153                          struct rte_flow_error *error)
1154 {
1155         struct mlx5_priv *priv = dev->data->dev_private;
1156         struct mlx5_hca_flex_attr *attr = &priv->config.hca_attr.flex;
1157         int ret;
1158
1159         ret = mlx5_flex_translate_length(attr, conf, devx, error);
1160         if (ret)
1161                 return ret;
1162         ret = mlx5_flex_translate_next(attr, conf, devx, error);
1163         if (ret)
1164                 return ret;
1165         ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
1166         if (ret)
1167                 return ret;
1168         ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
1169         if (ret)
1170                 return ret;
1171         ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
1172         if (ret)
1173                 return ret;
1174         return 0;
1175 }
1176
1177 /**
1178  * Create the flex item with specified configuration over the Ethernet device.
1179  *
1180  * @param dev
1181  *   Ethernet device to create flex item on.
1182  * @param[in] conf
1183  *   Flex item configuration.
1184  * @param[out] error
1185  *   Perform verbose error reporting if not NULL. PMDs initialize this
1186  *   structure in case of error only.
1187  *
1188  * @return
1189  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
1190  */
1191 struct rte_flow_item_flex_handle *
1192 flow_dv_item_create(struct rte_eth_dev *dev,
1193                     const struct rte_flow_item_flex_conf *conf,
1194                     struct rte_flow_error *error)
1195 {
1196         struct mlx5_priv *priv = dev->data->dev_private;
1197         struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
1198         struct mlx5_flex_item *flex;
1199         struct mlx5_list_entry *ent;
1200
1201         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1202         flex = mlx5_flex_alloc(priv);
1203         if (!flex) {
1204                 rte_flow_error_set(error, ENOMEM,
1205                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1206                                    "too many flex items created on the port");
1207                 return NULL;
1208         }
1209         if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
1210                 goto error;
1211         ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
1212         if (!ent) {
1213                 rte_flow_error_set(error, ENOMEM,
1214                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1215                                    "flex item creation failure");
1216                 goto error;
1217         }
1218         flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
1219         /* Mark initialized flex item valid. */
1220         __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
1221         return (struct rte_flow_item_flex_handle *)flex;
1222
1223 error:
1224         mlx5_flex_free(priv, flex);
1225         return NULL;
1226 }
1227
1228 /**
1229  * Release the flex item on the specified Ethernet device.
1230  *
1231  * @param dev
1232  *   Ethernet device to destroy flex item on.
1233  * @param[in] handle
1234  *   Handle of the item existing on the specified device.
1235  * @param[out] error
1236  *   Perform verbose error reporting if not NULL. PMDs initialize this
1237  *   structure in case of error only.
1238  *
1239  * @return
1240  *   0 on success, a negative errno value otherwise and rte_errno is set.
1241  */
1242 int
1243 flow_dv_item_release(struct rte_eth_dev *dev,
1244                      const struct rte_flow_item_flex_handle *handle,
1245                      struct rte_flow_error *error)
1246 {
1247         struct mlx5_priv *priv = dev->data->dev_private;
1248         struct mlx5_flex_item *flex =
1249                 (struct mlx5_flex_item *)(uintptr_t)handle;
1250         uint32_t old_refcnt = 1;
1251         int rc;
1252
1253         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1254         rte_spinlock_lock(&priv->flex_item_sl);
1255         if (mlx5_flex_index(priv, flex) < 0) {
1256                 rte_spinlock_unlock(&priv->flex_item_sl);
1257                 return rte_flow_error_set(error, EINVAL,
1258                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1259                                           "invalid flex item handle value");
1260         }
1261         if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
1262                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
1263                 rte_spinlock_unlock(&priv->flex_item_sl);
1264                 return rte_flow_error_set(error, EBUSY,
1265                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1266                                           "flex item has flow references");
1267         }
1268         /* Flex item is marked as invalid, we can leave locked section. */
1269         rte_spinlock_unlock(&priv->flex_item_sl);
1270         MLX5_ASSERT(flex->devx_fp);
1271         rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
1272                                   &flex->devx_fp->entry);
1273         flex->devx_fp = NULL;
1274         mlx5_flex_free(priv, flex);
1275         if (rc < 0)
1276                 return rte_flow_error_set(error, EBUSY,
1277                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1278                                           "flex item release failure");
1279         return 0;
1280 }
1281
1282 /* DevX flex parser list callbacks. */
1283 struct mlx5_list_entry *
1284 mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
1285 {
1286         struct mlx5_dev_ctx_shared *sh = list_ctx;
1287         struct mlx5_flex_parser_devx *fp, *conf = ctx;
1288         int ret;
1289
1290         fp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flex_parser_devx),
1291                          0, SOCKET_ID_ANY);
1292         if (!fp)
1293                 return NULL;
1294         /* Copy the requested configurations. */
1295         fp->num_samples = conf->num_samples;
1296         memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
1297         /* Create DevX flex parser. */
1298         fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
1299                                                         &fp->devx_conf);
1300         if (!fp->devx_obj)
1301                 goto error;
1302         /* Query the firmware assigned sample ids. */
1303         ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
1304                                                 fp->sample_ids,
1305                                                 fp->num_samples);
1306         if (ret)
1307                 goto error;
1308         DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
1309                 (const void *)fp, fp->num_samples);
1310         return &fp->entry;
1311 error:
1312         if (fp->devx_obj)
1313                 mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
1314         if (fp)
1315                 mlx5_free(fp);
1316         return NULL;
1317 }
1318
1319 int
1320 mlx5_flex_parser_match_cb(void *list_ctx,
1321                           struct mlx5_list_entry *iter, void *ctx)
1322 {
1323         struct mlx5_flex_parser_devx *fp =
1324                 container_of(iter, struct mlx5_flex_parser_devx, entry);
1325         struct mlx5_flex_parser_devx *org =
1326                 container_of(ctx, struct mlx5_flex_parser_devx, entry);
1327
1328         RTE_SET_USED(list_ctx);
1329         return !iter || !ctx || memcmp(&fp->devx_conf,
1330                                        &org->devx_conf,
1331                                        sizeof(fp->devx_conf));
1332 }
1333
1334 void
1335 mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
1336 {
1337         struct mlx5_flex_parser_devx *fp =
1338                 container_of(entry, struct mlx5_flex_parser_devx, entry);
1339
1340         RTE_SET_USED(list_ctx);
1341         MLX5_ASSERT(fp->devx_obj);
1342         claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
1343         DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
1344         mlx5_free(entry);
1345 }
1346
1347 struct mlx5_list_entry *
1348 mlx5_flex_parser_clone_cb(void *list_ctx,
1349                           struct mlx5_list_entry *entry, void *ctx)
1350 {
1351         struct mlx5_flex_parser_devx *fp;
1352
1353         RTE_SET_USED(list_ctx);
1354         RTE_SET_USED(entry);
1355         fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
1356                          0, SOCKET_ID_ANY);
1357         if (!fp)
1358                 return NULL;
1359         memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
1360         return &fp->entry;
1361 }
1362
1363 void
1364 mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
1365 {
1366         struct mlx5_flex_parser_devx *fp =
1367                 container_of(entry, struct mlx5_flex_parser_devx, entry);
1368         RTE_SET_USED(list_ctx);
1369         mlx5_free(fp);
1370 }