net/octeontx2: add flow operations
[dpdk.git] / drivers / net / octeontx2 / otx2_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include "otx2_ethdev.h"
6 #include "otx2_flow.h"
7
8 static int
9 flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
10                  struct otx2_npc_flow_info *flow_info)
11 {
12         /* This is non-LDATA part in search key */
13         uint64_t key_data[2] = {0ULL, 0ULL};
14         uint64_t key_mask[2] = {0ULL, 0ULL};
15         int intf = pst->flow->nix_intf;
16         int key_len, bit = 0, index;
17         int off, idx, data_off = 0;
18         uint8_t lid, mask, data;
19         uint16_t layer_info;
20         uint64_t lt, flags;
21
22
23         /* Skip till Layer A data start */
24         while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
25                 if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
26                         data_off++;
27                 bit++;
28         }
29
30         /* Each bit represents 1 nibble */
31         data_off *= 4;
32
33         index = 0;
34         for (lid = 0; lid < NPC_MAX_LID; lid++) {
35                 /* Offset in key */
36                 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
37                 lt = pst->lt[lid] & 0xf;
38                 flags = pst->flags[lid] & 0xff;
39
40                 /* NPC_LAYER_KEX_S */
41                 layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
42
43                 if (layer_info) {
44                         for (idx = 0; idx <= 2 ; idx++) {
45                                 if (layer_info & (1 << idx)) {
46                                         if (idx == 2)
47                                                 data = lt;
48                                         else if (idx == 1)
49                                                 data = ((flags >> 4) & 0xf);
50                                         else
51                                                 data = (flags & 0xf);
52
53                                         if (data_off >= 64) {
54                                                 data_off = 0;
55                                                 index++;
56                                         }
57                                         key_data[index] |= ((uint64_t)data <<
58                                                             data_off);
59                                         mask = 0xf;
60                                         if (lt == 0)
61                                                 mask = 0;
62                                         key_mask[index] |= ((uint64_t)mask <<
63                                                             data_off);
64                                         data_off += 4;
65                                 }
66                         }
67                 }
68         }
69
70         otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
71                      key_data[0], key_data[1]);
72
73         /* Copy this into mcam string */
74         key_len = (pst->npc->keyx_len[intf] + 7) / 8;
75         otx2_npc_dbg("Key_len  = %d", key_len);
76         memcpy(pst->flow->mcam_data, key_data, key_len);
77         memcpy(pst->flow->mcam_mask, key_mask, key_len);
78
79         otx2_npc_dbg("Final flow data");
80         for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
81                 otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
82                              idx, pst->flow->mcam_data[idx],
83                              idx, pst->flow->mcam_mask[idx]);
84         }
85
86         /*
87          * Now we have mcam data and mask formatted as
88          * [Key_len/4 nibbles][0 or 1 nibble hole][data]
89          * hole is present if key_len is odd number of nibbles.
90          * mcam data must be split into 64 bits + 48 bits segments
91          * for each back W0, W1.
92          */
93
94         return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
95 }
96
97 static int
98 flow_parse_attr(struct rte_eth_dev *eth_dev,
99                 const struct rte_flow_attr *attr,
100                 struct rte_flow_error *error,
101                 struct rte_flow *flow)
102 {
103         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
104         const char *errmsg = NULL;
105
106         if (attr == NULL)
107                 errmsg = "Attribute can't be empty";
108         else if (attr->group)
109                 errmsg = "Groups are not supported";
110         else if (attr->priority >= dev->npc_flow.flow_max_priority)
111                 errmsg = "Priority should be with in specified range";
112         else if ((!attr->egress && !attr->ingress) ||
113                  (attr->egress && attr->ingress))
114                 errmsg = "Exactly one of ingress or egress must be set";
115
116         if (errmsg != NULL) {
117                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
118                                    attr, errmsg);
119                 return -ENOTSUP;
120         }
121
122         if (attr->ingress)
123                 flow->nix_intf = OTX2_INTF_RX;
124         else
125                 flow->nix_intf = OTX2_INTF_TX;
126
127         flow->priority = attr->priority;
128         return 0;
129 }
130
131 static inline int
132 flow_get_free_rss_grp(struct rte_bitmap *bmap,
133                       uint32_t size, uint32_t *pos)
134 {
135         for (*pos = 0; *pos < size; ++*pos) {
136                 if (!rte_bitmap_get(bmap, *pos))
137                         break;
138         }
139
140         return *pos < size ? 0 : -1;
141 }
142
143 static int
144 flow_configure_rss_action(struct otx2_eth_dev *dev,
145                           const struct rte_flow_action_rss *rss,
146                           uint8_t *alg_idx, uint32_t *rss_grp,
147                           int mcam_index)
148 {
149         struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
150         uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
151         uint32_t flowkey_cfg, grp_aval, i;
152         uint16_t *ind_tbl = NULL;
153         uint8_t flowkey_algx;
154         int rc;
155
156         rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
157                                    flow_info->rss_grps, &grp_aval);
158         /* RSS group :0 is not usable for flow rss action */
159         if (rc < 0 || grp_aval == 0)
160                 return -ENOSPC;
161
162         *rss_grp = grp_aval;
163
164         otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
165                              rss->key_len);
166
167         /* If queue count passed in the rss action is less than
168          * HW configured reta size, replicate rss action reta
169          * across HW reta table.
170          */
171         if (dev->rss_info.rss_size > rss->queue_num) {
172                 ind_tbl = reta;
173
174                 for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
175                         memcpy(reta + i * rss->queue_num, rss->queue,
176                                sizeof(uint16_t) * rss->queue_num);
177
178                 i = dev->rss_info.rss_size % rss->queue_num;
179                 if (i)
180                         memcpy(&reta[dev->rss_info.rss_size] - i,
181                                rss->queue, i * sizeof(uint16_t));
182         } else {
183                 ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
184         }
185
186         rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
187         if (rc) {
188                 otx2_err("Failed to init rss table rc = %d", rc);
189                 return rc;
190         }
191
192         flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
193
194         rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
195                              *rss_grp, mcam_index);
196         if (rc) {
197                 otx2_err("Failed to set rss hash function rc = %d", rc);
198                 return rc;
199         }
200
201         *alg_idx = flowkey_algx;
202
203         rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
204
205         return 0;
206 }
207
208
209 static int
210 flow_program_rss_action(struct rte_eth_dev *eth_dev,
211                         const struct rte_flow_action actions[],
212                         struct rte_flow *flow)
213 {
214         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
215         const struct rte_flow_action_rss *rss;
216         uint32_t rss_grp;
217         uint8_t alg_idx;
218         int rc;
219
220         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
221                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
222                         rss = (const struct rte_flow_action_rss *)actions->conf;
223
224                         rc = flow_configure_rss_action(dev,
225                                                        rss, &alg_idx, &rss_grp,
226                                                        flow->mcam_id);
227                         if (rc)
228                                 return rc;
229
230                         flow->npc_action |=
231                                 ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
232                                  NIX_RSS_ACT_ALG_OFFSET) |
233                                 ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
234                                  NIX_RSS_ACT_GRP_OFFSET);
235                 }
236         }
237         return 0;
238 }
239
240 static int
241 flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
242 {
243         otx2_npc_dbg("Meta Item");
244         return 0;
245 }
246
247 /*
248  * Parse function of each layer:
249  *  - Consume one or more patterns that are relevant.
250  *  - Update parse_state
251  *  - Set parse_state.pattern = last item consumed
252  *  - Set appropriate error code/message when returning error.
253  */
254 typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
255
256 static int
257 flow_parse_pattern(struct rte_eth_dev *dev,
258                    const struct rte_flow_item pattern[],
259                    struct rte_flow_error *error,
260                    struct rte_flow *flow,
261                    struct otx2_parse_state *pst)
262 {
263         flow_parse_stage_func_t parse_stage_funcs[] = {
264                 flow_parse_meta_items,
265                 otx2_flow_parse_la,
266                 otx2_flow_parse_lb,
267                 otx2_flow_parse_lc,
268                 otx2_flow_parse_ld,
269                 otx2_flow_parse_le,
270                 otx2_flow_parse_lf,
271                 otx2_flow_parse_lg,
272                 otx2_flow_parse_lh,
273         };
274         struct otx2_eth_dev *hw = dev->data->dev_private;
275         uint8_t layer = 0;
276         int key_offset;
277         int rc;
278
279         if (pattern == NULL) {
280                 rte_flow_error_set(error, EINVAL,
281                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
282                                    "pattern is NULL");
283                 return -EINVAL;
284         }
285
286         memset(pst, 0, sizeof(*pst));
287         pst->npc = &hw->npc_flow;
288         pst->error = error;
289         pst->flow = flow;
290
291         /* Use integral byte offset */
292         key_offset = pst->npc->keyx_len[flow->nix_intf];
293         key_offset = (key_offset + 7) / 8;
294
295         /* Location where LDATA would begin */
296         pst->mcam_data = (uint8_t *)flow->mcam_data;
297         pst->mcam_mask = (uint8_t *)flow->mcam_mask;
298
299         while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
300                layer < RTE_DIM(parse_stage_funcs)) {
301                 otx2_npc_dbg("Pattern type = %d", pattern->type);
302
303                 /* Skip place-holders */
304                 pattern = otx2_flow_skip_void_and_any_items(pattern);
305
306                 pst->pattern = pattern;
307                 otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
308                 rc = parse_stage_funcs[layer](pst);
309                 if (rc != 0)
310                         return -rte_errno;
311
312                 layer++;
313
314                 /*
315                  * Parse stage function sets pst->pattern to
316                  * 1 past the last item it consumed.
317                  */
318                 pattern = pst->pattern;
319
320                 if (pst->terminate)
321                         break;
322         }
323
324         /* Skip trailing place-holders */
325         pattern = otx2_flow_skip_void_and_any_items(pattern);
326
327         /* Are there more items than what we can handle? */
328         if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
329                 rte_flow_error_set(error, ENOTSUP,
330                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
331                                    "unsupported item in the sequence");
332                 return -ENOTSUP;
333         }
334
335         return 0;
336 }
337
338 static int
339 flow_parse_rule(struct rte_eth_dev *dev,
340                 const struct rte_flow_attr *attr,
341                 const struct rte_flow_item pattern[],
342                 const struct rte_flow_action actions[],
343                 struct rte_flow_error *error,
344                 struct rte_flow *flow,
345                 struct otx2_parse_state *pst)
346 {
347         int err;
348
349         /* Check attributes */
350         err = flow_parse_attr(dev, attr, error, flow);
351         if (err)
352                 return err;
353
354         /* Check actions */
355         err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
356         if (err)
357                 return err;
358
359         /* Check pattern */
360         err = flow_parse_pattern(dev, pattern, error, flow, pst);
361         if (err)
362                 return err;
363
364         /* Check for overlaps? */
365         return 0;
366 }
367
368 static int
369 otx2_flow_validate(struct rte_eth_dev *dev,
370                    const struct rte_flow_attr *attr,
371                    const struct rte_flow_item pattern[],
372                    const struct rte_flow_action actions[],
373                    struct rte_flow_error *error)
374 {
375         struct otx2_parse_state parse_state;
376         struct rte_flow flow;
377
378         memset(&flow, 0, sizeof(flow));
379         return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
380                                &parse_state);
381 }
382
383 static struct rte_flow *
384 otx2_flow_create(struct rte_eth_dev *dev,
385                  const struct rte_flow_attr *attr,
386                  const struct rte_flow_item pattern[],
387                  const struct rte_flow_action actions[],
388                  struct rte_flow_error *error)
389 {
390         struct otx2_eth_dev *hw = dev->data->dev_private;
391         struct otx2_parse_state parse_state;
392         struct otx2_mbox *mbox = hw->mbox;
393         struct rte_flow *flow, *flow_iter;
394         struct otx2_flow_list *list;
395         int rc;
396
397         flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
398         if (flow == NULL) {
399                 rte_flow_error_set(error, ENOMEM,
400                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
401                                    NULL,
402                                    "Memory allocation failed");
403                 return NULL;
404         }
405         memset(flow, 0, sizeof(*flow));
406
407         rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
408                              &parse_state);
409         if (rc != 0)
410                 goto err_exit;
411
412         rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
413         if (rc != 0) {
414                 rte_flow_error_set(error, EIO,
415                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
416                                    NULL,
417                                    "Failed to insert filter");
418                 goto err_exit;
419         }
420
421         rc = flow_program_rss_action(dev, actions, flow);
422         if (rc != 0) {
423                 rte_flow_error_set(error, EIO,
424                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
425                                    NULL,
426                                    "Failed to program rss action");
427                 goto err_exit;
428         }
429
430
431         list = &hw->npc_flow.flow_list[flow->priority];
432         /* List in ascending order of mcam entries */
433         TAILQ_FOREACH(flow_iter, list, next) {
434                 if (flow_iter->mcam_id > flow->mcam_id) {
435                         TAILQ_INSERT_BEFORE(flow_iter, flow, next);
436                         return flow;
437                 }
438         }
439
440         TAILQ_INSERT_TAIL(list, flow, next);
441         return flow;
442
443 err_exit:
444         rte_free(flow);
445         return NULL;
446 }
447
448 const struct rte_flow_ops otx2_flow_ops = {
449         .validate = otx2_flow_validate,
450         .create = otx2_flow_create,
451 };