net/octeontx2: support HIGIG2
[dpdk.git] / drivers / net / octeontx2 / otx2_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include "otx2_ethdev.h"
6 #include "otx2_flow.h"
7
8 int
9 otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
10 {
11         struct otx2_npc_flow_info *npc = &hw->npc_flow;
12         struct otx2_mbox *mbox = hw->mbox;
13         struct otx2_mcam_ents_info *info;
14         struct rte_bitmap *bmap;
15         struct rte_flow *flow;
16         int entry_count = 0;
17         int rc, idx;
18
19         for (idx = 0; idx < npc->flow_max_priority; idx++) {
20                 info = &npc->flow_entry_info[idx];
21                 entry_count += info->live_ent;
22         }
23
24         if (entry_count == 0)
25                 return 0;
26
27         /* Free all MCAM entries allocated */
28         rc = otx2_flow_mcam_free_all_entries(mbox);
29
30         /* Free any MCAM counters and delete flow list */
31         for (idx = 0; idx < npc->flow_max_priority; idx++) {
32                 while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
33                         if (flow->ctr_id != NPC_COUNTER_NONE)
34                                 rc |= otx2_flow_mcam_free_counter(mbox,
35                                                              flow->ctr_id);
36
37                         TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
38                         rte_free(flow);
39                         bmap = npc->live_entries[flow->priority];
40                         rte_bitmap_clear(bmap, flow->mcam_id);
41                 }
42                 info = &npc->flow_entry_info[idx];
43                 info->free_ent = 0;
44                 info->live_ent = 0;
45         }
46         return rc;
47 }
48
49
50 static int
51 flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
52                  struct otx2_npc_flow_info *flow_info)
53 {
54         /* This is non-LDATA part in search key */
55         uint64_t key_data[2] = {0ULL, 0ULL};
56         uint64_t key_mask[2] = {0ULL, 0ULL};
57         int intf = pst->flow->nix_intf;
58         int key_len, bit = 0, index;
59         int off, idx, data_off = 0;
60         uint8_t lid, mask, data;
61         uint16_t layer_info;
62         uint64_t lt, flags;
63
64
65         /* Skip till Layer A data start */
66         while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
67                 if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
68                         data_off++;
69                 bit++;
70         }
71
72         /* Each bit represents 1 nibble */
73         data_off *= 4;
74
75         index = 0;
76         for (lid = 0; lid < NPC_MAX_LID; lid++) {
77                 /* Offset in key */
78                 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
79                 lt = pst->lt[lid] & 0xf;
80                 flags = pst->flags[lid] & 0xff;
81
82                 /* NPC_LAYER_KEX_S */
83                 layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
84
85                 if (layer_info) {
86                         for (idx = 0; idx <= 2 ; idx++) {
87                                 if (layer_info & (1 << idx)) {
88                                         if (idx == 2)
89                                                 data = lt;
90                                         else if (idx == 1)
91                                                 data = ((flags >> 4) & 0xf);
92                                         else
93                                                 data = (flags & 0xf);
94
95                                         if (data_off >= 64) {
96                                                 data_off = 0;
97                                                 index++;
98                                         }
99                                         key_data[index] |= ((uint64_t)data <<
100                                                             data_off);
101                                         mask = 0xf;
102                                         if (lt == 0)
103                                                 mask = 0;
104                                         key_mask[index] |= ((uint64_t)mask <<
105                                                             data_off);
106                                         data_off += 4;
107                                 }
108                         }
109                 }
110         }
111
112         otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
113                      key_data[0], key_data[1]);
114
115         /* Copy this into mcam string */
116         key_len = (pst->npc->keyx_len[intf] + 7) / 8;
117         otx2_npc_dbg("Key_len  = %d", key_len);
118         memcpy(pst->flow->mcam_data, key_data, key_len);
119         memcpy(pst->flow->mcam_mask, key_mask, key_len);
120
121         otx2_npc_dbg("Final flow data");
122         for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
123                 otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
124                              idx, pst->flow->mcam_data[idx],
125                              idx, pst->flow->mcam_mask[idx]);
126         }
127
128         /*
129          * Now we have mcam data and mask formatted as
130          * [Key_len/4 nibbles][0 or 1 nibble hole][data]
131          * hole is present if key_len is odd number of nibbles.
132          * mcam data must be split into 64 bits + 48 bits segments
133          * for each back W0, W1.
134          */
135
136         return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
137 }
138
139 static int
140 flow_parse_attr(struct rte_eth_dev *eth_dev,
141                 const struct rte_flow_attr *attr,
142                 struct rte_flow_error *error,
143                 struct rte_flow *flow)
144 {
145         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
146         const char *errmsg = NULL;
147
148         if (attr == NULL)
149                 errmsg = "Attribute can't be empty";
150         else if (attr->group)
151                 errmsg = "Groups are not supported";
152         else if (attr->priority >= dev->npc_flow.flow_max_priority)
153                 errmsg = "Priority should be with in specified range";
154         else if ((!attr->egress && !attr->ingress) ||
155                  (attr->egress && attr->ingress))
156                 errmsg = "Exactly one of ingress or egress must be set";
157
158         if (errmsg != NULL) {
159                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
160                                    attr, errmsg);
161                 return -ENOTSUP;
162         }
163
164         if (attr->ingress)
165                 flow->nix_intf = OTX2_INTF_RX;
166         else
167                 flow->nix_intf = OTX2_INTF_TX;
168
169         flow->priority = attr->priority;
170         return 0;
171 }
172
173 static inline int
174 flow_get_free_rss_grp(struct rte_bitmap *bmap,
175                       uint32_t size, uint32_t *pos)
176 {
177         for (*pos = 0; *pos < size; ++*pos) {
178                 if (!rte_bitmap_get(bmap, *pos))
179                         break;
180         }
181
182         return *pos < size ? 0 : -1;
183 }
184
185 static int
186 flow_configure_rss_action(struct otx2_eth_dev *dev,
187                           const struct rte_flow_action_rss *rss,
188                           uint8_t *alg_idx, uint32_t *rss_grp,
189                           int mcam_index)
190 {
191         struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
192         uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
193         uint32_t flowkey_cfg, grp_aval, i;
194         uint16_t *ind_tbl = NULL;
195         uint8_t flowkey_algx;
196         int rc;
197
198         rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
199                                    flow_info->rss_grps, &grp_aval);
200         /* RSS group :0 is not usable for flow rss action */
201         if (rc < 0 || grp_aval == 0)
202                 return -ENOSPC;
203
204         *rss_grp = grp_aval;
205
206         otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
207                              rss->key_len);
208
209         /* If queue count passed in the rss action is less than
210          * HW configured reta size, replicate rss action reta
211          * across HW reta table.
212          */
213         if (dev->rss_info.rss_size > rss->queue_num) {
214                 ind_tbl = reta;
215
216                 for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
217                         memcpy(reta + i * rss->queue_num, rss->queue,
218                                sizeof(uint16_t) * rss->queue_num);
219
220                 i = dev->rss_info.rss_size % rss->queue_num;
221                 if (i)
222                         memcpy(&reta[dev->rss_info.rss_size] - i,
223                                rss->queue, i * sizeof(uint16_t));
224         } else {
225                 ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
226         }
227
228         rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
229         if (rc) {
230                 otx2_err("Failed to init rss table rc = %d", rc);
231                 return rc;
232         }
233
234         flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
235
236         rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
237                              *rss_grp, mcam_index);
238         if (rc) {
239                 otx2_err("Failed to set rss hash function rc = %d", rc);
240                 return rc;
241         }
242
243         *alg_idx = flowkey_algx;
244
245         rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
246
247         return 0;
248 }
249
250
251 static int
252 flow_program_rss_action(struct rte_eth_dev *eth_dev,
253                         const struct rte_flow_action actions[],
254                         struct rte_flow *flow)
255 {
256         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
257         const struct rte_flow_action_rss *rss;
258         uint32_t rss_grp;
259         uint8_t alg_idx;
260         int rc;
261
262         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
263                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
264                         rss = (const struct rte_flow_action_rss *)actions->conf;
265
266                         rc = flow_configure_rss_action(dev,
267                                                        rss, &alg_idx, &rss_grp,
268                                                        flow->mcam_id);
269                         if (rc)
270                                 return rc;
271
272                         flow->npc_action |=
273                                 ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
274                                  NIX_RSS_ACT_ALG_OFFSET) |
275                                 ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
276                                  NIX_RSS_ACT_GRP_OFFSET);
277                 }
278         }
279         return 0;
280 }
281
282 static int
283 flow_free_rss_action(struct rte_eth_dev *eth_dev,
284                      struct rte_flow *flow)
285 {
286         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
287         struct otx2_npc_flow_info *npc = &dev->npc_flow;
288         uint32_t rss_grp;
289
290         if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
291                 rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
292                         NIX_RSS_ACT_GRP_MASK;
293                 if (rss_grp == 0 || rss_grp >= npc->rss_grps)
294                         return -EINVAL;
295
296                 rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
297         }
298
299         return 0;
300 }
301
302
303 static int
304 flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
305 {
306         otx2_npc_dbg("Meta Item");
307         return 0;
308 }
309
310 /*
311  * Parse function of each layer:
312  *  - Consume one or more patterns that are relevant.
313  *  - Update parse_state
314  *  - Set parse_state.pattern = last item consumed
315  *  - Set appropriate error code/message when returning error.
316  */
317 typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
318
319 static int
320 flow_parse_pattern(struct rte_eth_dev *dev,
321                    const struct rte_flow_item pattern[],
322                    struct rte_flow_error *error,
323                    struct rte_flow *flow,
324                    struct otx2_parse_state *pst)
325 {
326         flow_parse_stage_func_t parse_stage_funcs[] = {
327                 flow_parse_meta_items,
328                 otx2_flow_parse_higig2_hdr,
329                 otx2_flow_parse_la,
330                 otx2_flow_parse_lb,
331                 otx2_flow_parse_lc,
332                 otx2_flow_parse_ld,
333                 otx2_flow_parse_le,
334                 otx2_flow_parse_lf,
335                 otx2_flow_parse_lg,
336                 otx2_flow_parse_lh,
337         };
338         struct otx2_eth_dev *hw = dev->data->dev_private;
339         uint8_t layer = 0;
340         int key_offset;
341         int rc;
342
343         if (pattern == NULL) {
344                 rte_flow_error_set(error, EINVAL,
345                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
346                                    "pattern is NULL");
347                 return -EINVAL;
348         }
349
350         memset(pst, 0, sizeof(*pst));
351         pst->npc = &hw->npc_flow;
352         pst->error = error;
353         pst->flow = flow;
354
355         /* Use integral byte offset */
356         key_offset = pst->npc->keyx_len[flow->nix_intf];
357         key_offset = (key_offset + 7) / 8;
358
359         /* Location where LDATA would begin */
360         pst->mcam_data = (uint8_t *)flow->mcam_data;
361         pst->mcam_mask = (uint8_t *)flow->mcam_mask;
362
363         while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
364                layer < RTE_DIM(parse_stage_funcs)) {
365                 otx2_npc_dbg("Pattern type = %d", pattern->type);
366
367                 /* Skip place-holders */
368                 pattern = otx2_flow_skip_void_and_any_items(pattern);
369
370                 pst->pattern = pattern;
371                 otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
372                 rc = parse_stage_funcs[layer](pst);
373                 if (rc != 0)
374                         return -rte_errno;
375
376                 layer++;
377
378                 /*
379                  * Parse stage function sets pst->pattern to
380                  * 1 past the last item it consumed.
381                  */
382                 pattern = pst->pattern;
383
384                 if (pst->terminate)
385                         break;
386         }
387
388         /* Skip trailing place-holders */
389         pattern = otx2_flow_skip_void_and_any_items(pattern);
390
391         /* Are there more items than what we can handle? */
392         if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
393                 rte_flow_error_set(error, ENOTSUP,
394                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
395                                    "unsupported item in the sequence");
396                 return -ENOTSUP;
397         }
398
399         return 0;
400 }
401
402 static int
403 flow_parse_rule(struct rte_eth_dev *dev,
404                 const struct rte_flow_attr *attr,
405                 const struct rte_flow_item pattern[],
406                 const struct rte_flow_action actions[],
407                 struct rte_flow_error *error,
408                 struct rte_flow *flow,
409                 struct otx2_parse_state *pst)
410 {
411         int err;
412
413         /* Check attributes */
414         err = flow_parse_attr(dev, attr, error, flow);
415         if (err)
416                 return err;
417
418         /* Check actions */
419         err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
420         if (err)
421                 return err;
422
423         /* Check pattern */
424         err = flow_parse_pattern(dev, pattern, error, flow, pst);
425         if (err)
426                 return err;
427
428         /* Check for overlaps? */
429         return 0;
430 }
431
432 static int
433 otx2_flow_validate(struct rte_eth_dev *dev,
434                    const struct rte_flow_attr *attr,
435                    const struct rte_flow_item pattern[],
436                    const struct rte_flow_action actions[],
437                    struct rte_flow_error *error)
438 {
439         struct otx2_parse_state parse_state;
440         struct rte_flow flow;
441
442         memset(&flow, 0, sizeof(flow));
443         return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
444                                &parse_state);
445 }
446
447 static struct rte_flow *
448 otx2_flow_create(struct rte_eth_dev *dev,
449                  const struct rte_flow_attr *attr,
450                  const struct rte_flow_item pattern[],
451                  const struct rte_flow_action actions[],
452                  struct rte_flow_error *error)
453 {
454         struct otx2_eth_dev *hw = dev->data->dev_private;
455         struct otx2_parse_state parse_state;
456         struct otx2_mbox *mbox = hw->mbox;
457         struct rte_flow *flow, *flow_iter;
458         struct otx2_flow_list *list;
459         int rc;
460
461         flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
462         if (flow == NULL) {
463                 rte_flow_error_set(error, ENOMEM,
464                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
465                                    NULL,
466                                    "Memory allocation failed");
467                 return NULL;
468         }
469         memset(flow, 0, sizeof(*flow));
470
471         rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
472                              &parse_state);
473         if (rc != 0)
474                 goto err_exit;
475
476         rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
477         if (rc != 0) {
478                 rte_flow_error_set(error, EIO,
479                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
480                                    NULL,
481                                    "Failed to insert filter");
482                 goto err_exit;
483         }
484
485         rc = flow_program_rss_action(dev, actions, flow);
486         if (rc != 0) {
487                 rte_flow_error_set(error, EIO,
488                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
489                                    NULL,
490                                    "Failed to program rss action");
491                 goto err_exit;
492         }
493
494
495         list = &hw->npc_flow.flow_list[flow->priority];
496         /* List in ascending order of mcam entries */
497         TAILQ_FOREACH(flow_iter, list, next) {
498                 if (flow_iter->mcam_id > flow->mcam_id) {
499                         TAILQ_INSERT_BEFORE(flow_iter, flow, next);
500                         return flow;
501                 }
502         }
503
504         TAILQ_INSERT_TAIL(list, flow, next);
505         return flow;
506
507 err_exit:
508         rte_free(flow);
509         return NULL;
510 }
511
512 static int
513 otx2_flow_destroy(struct rte_eth_dev *dev,
514                   struct rte_flow *flow,
515                   struct rte_flow_error *error)
516 {
517         struct otx2_eth_dev *hw = dev->data->dev_private;
518         struct otx2_npc_flow_info *npc = &hw->npc_flow;
519         struct otx2_mbox *mbox = hw->mbox;
520         struct rte_bitmap *bmap;
521         uint16_t match_id;
522         int rc;
523
524         match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
525                 NIX_RX_ACT_MATCH_MASK;
526
527         if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
528                 if (rte_atomic32_read(&npc->mark_actions) == 0)
529                         return -EINVAL;
530
531                 /* Clear mark offload flag if there are no more mark actions */
532                 if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
533                         hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
534                         otx2_eth_set_rx_function(dev);
535                 }
536         }
537
538         rc = flow_free_rss_action(dev, flow);
539         if (rc != 0) {
540                 rte_flow_error_set(error, EIO,
541                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
542                                    NULL,
543                                    "Failed to free rss action");
544         }
545
546         rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
547         if (rc != 0) {
548                 rte_flow_error_set(error, EIO,
549                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
550                                    NULL,
551                                    "Failed to destroy filter");
552         }
553
554         TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
555
556         bmap = npc->live_entries[flow->priority];
557         rte_bitmap_clear(bmap, flow->mcam_id);
558
559         rte_free(flow);
560         return 0;
561 }
562
563 static int
564 otx2_flow_flush(struct rte_eth_dev *dev,
565                 struct rte_flow_error *error)
566 {
567         struct otx2_eth_dev *hw = dev->data->dev_private;
568         int rc;
569
570         rc = otx2_flow_free_all_resources(hw);
571         if (rc) {
572                 otx2_err("Error when deleting NPC MCAM entries "
573                                 ", counters");
574                 rte_flow_error_set(error, EIO,
575                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
576                                    NULL,
577                                    "Failed to flush filter");
578                 return -rte_errno;
579         }
580
581         return 0;
582 }
583
584 static int
585 otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
586                   int enable __rte_unused,
587                   struct rte_flow_error *error)
588 {
589         /*
590          * If we support, we need to un-install the default mcam
591          * entry for this port.
592          */
593
594         rte_flow_error_set(error, ENOTSUP,
595                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
596                            NULL,
597                            "Flow isolation not supported");
598
599         return -rte_errno;
600 }
601
602 static int
603 otx2_flow_query(struct rte_eth_dev *dev,
604                 struct rte_flow *flow,
605                 const struct rte_flow_action *action,
606                 void *data,
607                 struct rte_flow_error *error)
608 {
609         struct otx2_eth_dev *hw = dev->data->dev_private;
610         struct rte_flow_query_count *query = data;
611         struct otx2_mbox *mbox = hw->mbox;
612         const char *errmsg = NULL;
613         int errcode = ENOTSUP;
614         int rc;
615
616         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
617                 errmsg = "Only COUNT is supported in query";
618                 goto err_exit;
619         }
620
621         if (flow->ctr_id == NPC_COUNTER_NONE) {
622                 errmsg = "Counter is not available";
623                 goto err_exit;
624         }
625
626         rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
627         if (rc != 0) {
628                 errcode = EIO;
629                 errmsg = "Error reading flow counter";
630                 goto err_exit;
631         }
632         query->hits_set = 1;
633         query->bytes_set = 0;
634
635         if (query->reset)
636                 rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
637         if (rc != 0) {
638                 errcode = EIO;
639                 errmsg = "Error clearing flow counter";
640                 goto err_exit;
641         }
642
643         return 0;
644
645 err_exit:
646         rte_flow_error_set(error, errcode,
647                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
648                            NULL,
649                            errmsg);
650         return -rte_errno;
651 }
652
653 const struct rte_flow_ops otx2_flow_ops = {
654         .validate = otx2_flow_validate,
655         .create = otx2_flow_create,
656         .destroy = otx2_flow_destroy,
657         .flush = otx2_flow_flush,
658         .query = otx2_flow_query,
659         .isolate = otx2_flow_isolate,
660 };
661
662 static int
663 flow_supp_key_len(uint32_t supp_mask)
664 {
665         int nib_count = 0;
666         while (supp_mask) {
667                 nib_count++;
668                 supp_mask &= (supp_mask - 1);
669         }
670         return nib_count * 4;
671 }
672
673 /* Refer HRM register:
674  * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
675  * and
676  * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
677  **/
678 #define BYTESM1_SHIFT   16
679 #define HDR_OFF_SHIFT   8
680 static void
681 flow_update_kex_info(struct npc_xtract_info *xtract_info,
682                      uint64_t val)
683 {
684         xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
685         xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
686         xtract_info->key_off = val & 0x3f;
687         xtract_info->enable = ((val >> 7) & 0x1);
688         xtract_info->flags_enable = ((val >> 6) & 0x1);
689 }
690
691 static void
692 flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
693                       struct npc_get_kex_cfg_rsp *kex_rsp)
694 {
695         volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
696                 [NPC_MAX_LD];
697         struct npc_xtract_info *x_info = NULL;
698         int lid, lt, ld, fl, ix;
699         otx2_dxcfg_t *p;
700         uint64_t keyw;
701         uint64_t val;
702
703         npc->keyx_supp_nmask[NPC_MCAM_RX] =
704                 kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
705         npc->keyx_supp_nmask[NPC_MCAM_TX] =
706                 kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
707         npc->keyx_len[NPC_MCAM_RX] =
708                 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
709         npc->keyx_len[NPC_MCAM_TX] =
710                 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
711
712         keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
713         npc->keyw[NPC_MCAM_RX] = keyw;
714         keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
715         npc->keyw[NPC_MCAM_TX] = keyw;
716
717         /* Update KEX_LD_FLAG */
718         for (ix = 0; ix < NPC_MAX_INTF; ix++) {
719                 for (ld = 0; ld < NPC_MAX_LD; ld++) {
720                         for (fl = 0; fl < NPC_MAX_LFL; fl++) {
721                                 x_info =
722                                     &npc->prx_fxcfg[ix][ld][fl].xtract[0];
723                                 val = kex_rsp->intf_ld_flags[ix][ld][fl];
724                                 flow_update_kex_info(x_info, val);
725                         }
726                 }
727         }
728
729         /* Update LID, LT and LDATA cfg */
730         p = &npc->prx_dxcfg;
731         q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
732                         (&kex_rsp->intf_lid_lt_ld);
733         for (ix = 0; ix < NPC_MAX_INTF; ix++) {
734                 for (lid = 0; lid < NPC_MAX_LID; lid++) {
735                         for (lt = 0; lt < NPC_MAX_LT; lt++) {
736                                 for (ld = 0; ld < NPC_MAX_LD; ld++) {
737                                         x_info = &(*p)[ix][lid][lt].xtract[ld];
738                                         val = (*q)[ix][lid][lt][ld];
739                                         flow_update_kex_info(x_info, val);
740                                 }
741                         }
742                 }
743         }
744         /* Update LDATA Flags cfg */
745         npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
746         npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
747 }
748
749 static struct otx2_idev_kex_cfg *
750 flow_intra_dev_kex_cfg(void)
751 {
752         static const char name[] = "octeontx2_intra_device_kex_conf";
753         struct otx2_idev_kex_cfg *idev;
754         const struct rte_memzone *mz;
755
756         mz = rte_memzone_lookup(name);
757         if (mz)
758                 return mz->addr;
759
760         /* Request for the first time */
761         mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
762                                          SOCKET_ID_ANY, 0, OTX2_ALIGN);
763         if (mz) {
764                 idev = mz->addr;
765                 rte_atomic16_set(&idev->kex_refcnt, 0);
766                 return idev;
767         }
768         return NULL;
769 }
770
771 static int
772 flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
773 {
774         struct otx2_npc_flow_info *npc = &dev->npc_flow;
775         struct npc_get_kex_cfg_rsp *kex_rsp;
776         struct otx2_mbox *mbox = dev->mbox;
777         char mkex_pfl_name[MKEX_NAME_LEN];
778         struct otx2_idev_kex_cfg *idev;
779         int rc = 0;
780
781         idev = flow_intra_dev_kex_cfg();
782         if (!idev)
783                 return -ENOMEM;
784
785         /* Is kex_cfg read by any another driver? */
786         if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
787                 /* Call mailbox to get key & data size */
788                 (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
789                 otx2_mbox_msg_send(mbox, 0);
790                 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
791                 if (rc) {
792                         otx2_err("Failed to fetch NPC keyx config");
793                         goto done;
794                 }
795                 memcpy(&idev->kex_cfg, kex_rsp,
796                        sizeof(struct npc_get_kex_cfg_rsp));
797         }
798
799         otx2_mbox_memcpy(mkex_pfl_name,
800                          idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
801
802         strlcpy((char *)dev->mkex_pfl_name,
803                 mkex_pfl_name, sizeof(dev->mkex_pfl_name));
804
805         flow_process_mkex_cfg(npc, &idev->kex_cfg);
806
807 done:
808         return rc;
809 }
810
811 int
812 otx2_flow_init(struct otx2_eth_dev *hw)
813 {
814         uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
815         struct otx2_npc_flow_info *npc = &hw->npc_flow;
816         uint32_t bmap_sz;
817         int rc = 0, idx;
818
819         rc = flow_fetch_kex_cfg(hw);
820         if (rc) {
821                 otx2_err("Failed to fetch NPC keyx config from idev");
822                 return rc;
823         }
824
825         rte_atomic32_init(&npc->mark_actions);
826
827         npc->mcam_entries = NPC_MCAM_TOT_ENTRIES >> npc->keyw[NPC_MCAM_RX];
828         /* Free, free_rev, live and live_rev entries */
829         bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
830         mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
831                           RTE_CACHE_LINE_SIZE);
832         if (mem == NULL) {
833                 otx2_err("Bmap alloc failed");
834                 rc = -ENOMEM;
835                 return rc;
836         }
837
838         npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
839                                            * sizeof(struct otx2_mcam_ents_info),
840                                            0);
841         if (npc->flow_entry_info == NULL) {
842                 otx2_err("flow_entry_info alloc failed");
843                 rc = -ENOMEM;
844                 goto err;
845         }
846
847         npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
848                                         * sizeof(struct rte_bitmap *),
849                                         0);
850         if (npc->free_entries == NULL) {
851                 otx2_err("free_entries alloc failed");
852                 rc = -ENOMEM;
853                 goto err;
854         }
855
856         npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
857                                         * sizeof(struct rte_bitmap *),
858                                         0);
859         if (npc->free_entries_rev == NULL) {
860                 otx2_err("free_entries_rev alloc failed");
861                 rc = -ENOMEM;
862                 goto err;
863         }
864
865         npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
866                                         * sizeof(struct rte_bitmap *),
867                                         0);
868         if (npc->live_entries == NULL) {
869                 otx2_err("live_entries alloc failed");
870                 rc = -ENOMEM;
871                 goto err;
872         }
873
874         npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
875                                         * sizeof(struct rte_bitmap *),
876                                         0);
877         if (npc->live_entries_rev == NULL) {
878                 otx2_err("live_entries_rev alloc failed");
879                 rc = -ENOMEM;
880                 goto err;
881         }
882
883         npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
884                                         * sizeof(struct otx2_flow_list),
885                                         0);
886         if (npc->flow_list == NULL) {
887                 otx2_err("flow_list alloc failed");
888                 rc = -ENOMEM;
889                 goto err;
890         }
891
892         npc_mem = mem;
893         for (idx = 0; idx < npc->flow_max_priority; idx++) {
894                 TAILQ_INIT(&npc->flow_list[idx]);
895
896                 npc->free_entries[idx] =
897                         rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
898                 mem += bmap_sz;
899
900                 npc->free_entries_rev[idx] =
901                         rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
902                 mem += bmap_sz;
903
904                 npc->live_entries[idx] =
905                         rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
906                 mem += bmap_sz;
907
908                 npc->live_entries_rev[idx] =
909                         rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
910                 mem += bmap_sz;
911
912                 npc->flow_entry_info[idx].free_ent = 0;
913                 npc->flow_entry_info[idx].live_ent = 0;
914                 npc->flow_entry_info[idx].max_id = 0;
915                 npc->flow_entry_info[idx].min_id = ~(0);
916         }
917
918         npc->rss_grps = NIX_RSS_GRPS;
919
920         bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
921         nix_mem = rte_zmalloc(NULL, bmap_sz,  RTE_CACHE_LINE_SIZE);
922         if (nix_mem == NULL) {
923                 otx2_err("Bmap alloc failed");
924                 rc = -ENOMEM;
925                 goto err;
926         }
927
928         npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
929
930         /* Group 0 will be used for RSS,
931          * 1 -7 will be used for rte_flow RSS action
932          */
933         rte_bitmap_set(npc->rss_grp_entries, 0);
934
935         return 0;
936
937 err:
938         if (npc->flow_list)
939                 rte_free(npc->flow_list);
940         if (npc->live_entries_rev)
941                 rte_free(npc->live_entries_rev);
942         if (npc->live_entries)
943                 rte_free(npc->live_entries);
944         if (npc->free_entries_rev)
945                 rte_free(npc->free_entries_rev);
946         if (npc->free_entries)
947                 rte_free(npc->free_entries);
948         if (npc->flow_entry_info)
949                 rte_free(npc->flow_entry_info);
950         if (npc_mem)
951                 rte_free(npc_mem);
952         return rc;
953 }
954
955 int
956 otx2_flow_fini(struct otx2_eth_dev *hw)
957 {
958         struct otx2_npc_flow_info *npc = &hw->npc_flow;
959         int rc;
960
961         rc = otx2_flow_free_all_resources(hw);
962         if (rc) {
963                 otx2_err("Error when deleting NPC MCAM entries, counters");
964                 return rc;
965         }
966
967         if (npc->flow_list)
968                 rte_free(npc->flow_list);
969         if (npc->live_entries_rev)
970                 rte_free(npc->live_entries_rev);
971         if (npc->live_entries)
972                 rte_free(npc->live_entries);
973         if (npc->free_entries_rev)
974                 rte_free(npc->free_entries_rev);
975         if (npc->free_entries)
976                 rte_free(npc->free_entries);
977         if (npc->flow_entry_info)
978                 rte_free(npc->flow_entry_info);
979
980         return 0;
981 }