event/octeontx2: improve single flow performance
[dpdk.git] / drivers / net / octeontx2 / otx2_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include "otx2_ethdev.h"
6 #include "otx2_ethdev_sec.h"
7 #include "otx2_flow.h"
8
9 enum flow_vtag_cfg_dir { VTAG_TX, VTAG_RX };
10
11 int
12 otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
13 {
14         struct otx2_npc_flow_info *npc = &hw->npc_flow;
15         struct otx2_mbox *mbox = hw->mbox;
16         struct otx2_mcam_ents_info *info;
17         struct rte_bitmap *bmap;
18         struct rte_flow *flow;
19         int entry_count = 0;
20         int rc, idx;
21
22         for (idx = 0; idx < npc->flow_max_priority; idx++) {
23                 info = &npc->flow_entry_info[idx];
24                 entry_count += info->live_ent;
25         }
26
27         if (entry_count == 0)
28                 return 0;
29
30         /* Free all MCAM entries allocated */
31         rc = otx2_flow_mcam_free_all_entries(mbox);
32
33         /* Free any MCAM counters and delete flow list */
34         for (idx = 0; idx < npc->flow_max_priority; idx++) {
35                 while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
36                         if (flow->ctr_id != NPC_COUNTER_NONE)
37                                 rc |= otx2_flow_mcam_free_counter(mbox,
38                                                              flow->ctr_id);
39
40                         TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
41                         rte_free(flow);
42                         bmap = npc->live_entries[flow->priority];
43                         rte_bitmap_clear(bmap, flow->mcam_id);
44                 }
45                 info = &npc->flow_entry_info[idx];
46                 info->free_ent = 0;
47                 info->live_ent = 0;
48         }
49         return rc;
50 }
51
52
53 static int
54 flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
55                  struct otx2_npc_flow_info *flow_info)
56 {
57         /* This is non-LDATA part in search key */
58         uint64_t key_data[2] = {0ULL, 0ULL};
59         uint64_t key_mask[2] = {0ULL, 0ULL};
60         int intf = pst->flow->nix_intf;
61         int key_len, bit = 0, index;
62         int off, idx, data_off = 0;
63         uint8_t lid, mask, data;
64         uint16_t layer_info;
65         uint64_t lt, flags;
66
67
68         /* Skip till Layer A data start */
69         while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
70                 if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
71                         data_off++;
72                 bit++;
73         }
74
75         /* Each bit represents 1 nibble */
76         data_off *= 4;
77
78         index = 0;
79         for (lid = 0; lid < NPC_MAX_LID; lid++) {
80                 /* Offset in key */
81                 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
82                 lt = pst->lt[lid] & 0xf;
83                 flags = pst->flags[lid] & 0xff;
84
85                 /* NPC_LAYER_KEX_S */
86                 layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
87
88                 if (layer_info) {
89                         for (idx = 0; idx <= 2 ; idx++) {
90                                 if (layer_info & (1 << idx)) {
91                                         if (idx == 2)
92                                                 data = lt;
93                                         else if (idx == 1)
94                                                 data = ((flags >> 4) & 0xf);
95                                         else
96                                                 data = (flags & 0xf);
97
98                                         if (data_off >= 64) {
99                                                 data_off = 0;
100                                                 index++;
101                                         }
102                                         key_data[index] |= ((uint64_t)data <<
103                                                             data_off);
104                                         mask = 0xf;
105                                         if (lt == 0)
106                                                 mask = 0;
107                                         key_mask[index] |= ((uint64_t)mask <<
108                                                             data_off);
109                                         data_off += 4;
110                                 }
111                         }
112                 }
113         }
114
115         otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
116                      key_data[0], key_data[1]);
117
118         /* Copy this into mcam string */
119         key_len = (pst->npc->keyx_len[intf] + 7) / 8;
120         otx2_npc_dbg("Key_len  = %d", key_len);
121         memcpy(pst->flow->mcam_data, key_data, key_len);
122         memcpy(pst->flow->mcam_mask, key_mask, key_len);
123
124         otx2_npc_dbg("Final flow data");
125         for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
126                 otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
127                              idx, pst->flow->mcam_data[idx],
128                              idx, pst->flow->mcam_mask[idx]);
129         }
130
131         /*
132          * Now we have mcam data and mask formatted as
133          * [Key_len/4 nibbles][0 or 1 nibble hole][data]
134          * hole is present if key_len is odd number of nibbles.
135          * mcam data must be split into 64 bits + 48 bits segments
136          * for each back W0, W1.
137          */
138
139         return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
140 }
141
142 static int
143 flow_parse_attr(struct rte_eth_dev *eth_dev,
144                 const struct rte_flow_attr *attr,
145                 struct rte_flow_error *error,
146                 struct rte_flow *flow)
147 {
148         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
149         const char *errmsg = NULL;
150
151         if (attr == NULL)
152                 errmsg = "Attribute can't be empty";
153         else if (attr->group)
154                 errmsg = "Groups are not supported";
155         else if (attr->priority >= dev->npc_flow.flow_max_priority)
156                 errmsg = "Priority should be with in specified range";
157         else if ((!attr->egress && !attr->ingress) ||
158                  (attr->egress && attr->ingress))
159                 errmsg = "Exactly one of ingress or egress must be set";
160
161         if (errmsg != NULL) {
162                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
163                                    attr, errmsg);
164                 return -ENOTSUP;
165         }
166
167         if (attr->ingress)
168                 flow->nix_intf = OTX2_INTF_RX;
169         else
170                 flow->nix_intf = OTX2_INTF_TX;
171
172         flow->priority = attr->priority;
173         return 0;
174 }
175
176 static inline int
177 flow_get_free_rss_grp(struct rte_bitmap *bmap,
178                       uint32_t size, uint32_t *pos)
179 {
180         for (*pos = 0; *pos < size; ++*pos) {
181                 if (!rte_bitmap_get(bmap, *pos))
182                         break;
183         }
184
185         return *pos < size ? 0 : -1;
186 }
187
188 static int
189 flow_configure_rss_action(struct otx2_eth_dev *dev,
190                           const struct rte_flow_action_rss *rss,
191                           uint8_t *alg_idx, uint32_t *rss_grp,
192                           int mcam_index)
193 {
194         struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
195         uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
196         uint32_t flowkey_cfg, grp_aval, i;
197         uint16_t *ind_tbl = NULL;
198         uint8_t flowkey_algx;
199         int rc;
200
201         rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
202                                    flow_info->rss_grps, &grp_aval);
203         /* RSS group :0 is not usable for flow rss action */
204         if (rc < 0 || grp_aval == 0)
205                 return -ENOSPC;
206
207         *rss_grp = grp_aval;
208
209         otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
210                              rss->key_len);
211
212         /* If queue count passed in the rss action is less than
213          * HW configured reta size, replicate rss action reta
214          * across HW reta table.
215          */
216         if (dev->rss_info.rss_size > rss->queue_num) {
217                 ind_tbl = reta;
218
219                 for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
220                         memcpy(reta + i * rss->queue_num, rss->queue,
221                                sizeof(uint16_t) * rss->queue_num);
222
223                 i = dev->rss_info.rss_size % rss->queue_num;
224                 if (i)
225                         memcpy(&reta[dev->rss_info.rss_size] - i,
226                                rss->queue, i * sizeof(uint16_t));
227         } else {
228                 ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
229         }
230
231         rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
232         if (rc) {
233                 otx2_err("Failed to init rss table rc = %d", rc);
234                 return rc;
235         }
236
237         flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
238
239         rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
240                              *rss_grp, mcam_index);
241         if (rc) {
242                 otx2_err("Failed to set rss hash function rc = %d", rc);
243                 return rc;
244         }
245
246         *alg_idx = flowkey_algx;
247
248         rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
249
250         return 0;
251 }
252
253
254 static int
255 flow_program_rss_action(struct rte_eth_dev *eth_dev,
256                         const struct rte_flow_action actions[],
257                         struct rte_flow *flow)
258 {
259         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
260         const struct rte_flow_action_rss *rss;
261         uint32_t rss_grp;
262         uint8_t alg_idx;
263         int rc;
264
265         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
266                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
267                         rss = (const struct rte_flow_action_rss *)actions->conf;
268
269                         rc = flow_configure_rss_action(dev,
270                                                        rss, &alg_idx, &rss_grp,
271                                                        flow->mcam_id);
272                         if (rc)
273                                 return rc;
274
275                         flow->npc_action |=
276                                 ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
277                                  NIX_RSS_ACT_ALG_OFFSET) |
278                                 ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
279                                  NIX_RSS_ACT_GRP_OFFSET);
280                 }
281         }
282         return 0;
283 }
284
285 static int
286 flow_free_rss_action(struct rte_eth_dev *eth_dev,
287                      struct rte_flow *flow)
288 {
289         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
290         struct otx2_npc_flow_info *npc = &dev->npc_flow;
291         uint32_t rss_grp;
292
293         if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
294                 rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
295                         NIX_RSS_ACT_GRP_MASK;
296                 if (rss_grp == 0 || rss_grp >= npc->rss_grps)
297                         return -EINVAL;
298
299                 rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
300         }
301
302         return 0;
303 }
304
305 static int
306 flow_update_sec_tt(struct rte_eth_dev *eth_dev,
307                    const struct rte_flow_action actions[])
308 {
309         int rc = 0;
310
311         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
312                 if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
313                         rc = otx2_eth_sec_update_tag_type(eth_dev);
314                         break;
315                 }
316         }
317
318         return rc;
319 }
320
321 static int
322 flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
323 {
324         otx2_npc_dbg("Meta Item");
325         return 0;
326 }
327
328 /*
329  * Parse function of each layer:
330  *  - Consume one or more patterns that are relevant.
331  *  - Update parse_state
332  *  - Set parse_state.pattern = last item consumed
333  *  - Set appropriate error code/message when returning error.
334  */
335 typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
336
337 static int
338 flow_parse_pattern(struct rte_eth_dev *dev,
339                    const struct rte_flow_item pattern[],
340                    struct rte_flow_error *error,
341                    struct rte_flow *flow,
342                    struct otx2_parse_state *pst)
343 {
344         flow_parse_stage_func_t parse_stage_funcs[] = {
345                 flow_parse_meta_items,
346                 otx2_flow_parse_higig2_hdr,
347                 otx2_flow_parse_la,
348                 otx2_flow_parse_lb,
349                 otx2_flow_parse_lc,
350                 otx2_flow_parse_ld,
351                 otx2_flow_parse_le,
352                 otx2_flow_parse_lf,
353                 otx2_flow_parse_lg,
354                 otx2_flow_parse_lh,
355         };
356         struct otx2_eth_dev *hw = dev->data->dev_private;
357         uint8_t layer = 0;
358         int key_offset;
359         int rc;
360
361         if (pattern == NULL) {
362                 rte_flow_error_set(error, EINVAL,
363                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
364                                    "pattern is NULL");
365                 return -EINVAL;
366         }
367
368         memset(pst, 0, sizeof(*pst));
369         pst->npc = &hw->npc_flow;
370         pst->error = error;
371         pst->flow = flow;
372
373         /* Use integral byte offset */
374         key_offset = pst->npc->keyx_len[flow->nix_intf];
375         key_offset = (key_offset + 7) / 8;
376
377         /* Location where LDATA would begin */
378         pst->mcam_data = (uint8_t *)flow->mcam_data;
379         pst->mcam_mask = (uint8_t *)flow->mcam_mask;
380
381         while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
382                layer < RTE_DIM(parse_stage_funcs)) {
383                 otx2_npc_dbg("Pattern type = %d", pattern->type);
384
385                 /* Skip place-holders */
386                 pattern = otx2_flow_skip_void_and_any_items(pattern);
387
388                 pst->pattern = pattern;
389                 otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
390                 rc = parse_stage_funcs[layer](pst);
391                 if (rc != 0)
392                         return -rte_errno;
393
394                 layer++;
395
396                 /*
397                  * Parse stage function sets pst->pattern to
398                  * 1 past the last item it consumed.
399                  */
400                 pattern = pst->pattern;
401
402                 if (pst->terminate)
403                         break;
404         }
405
406         /* Skip trailing place-holders */
407         pattern = otx2_flow_skip_void_and_any_items(pattern);
408
409         /* Are there more items than what we can handle? */
410         if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
411                 rte_flow_error_set(error, ENOTSUP,
412                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
413                                    "unsupported item in the sequence");
414                 return -ENOTSUP;
415         }
416
417         return 0;
418 }
419
420 static int
421 flow_parse_rule(struct rte_eth_dev *dev,
422                 const struct rte_flow_attr *attr,
423                 const struct rte_flow_item pattern[],
424                 const struct rte_flow_action actions[],
425                 struct rte_flow_error *error,
426                 struct rte_flow *flow,
427                 struct otx2_parse_state *pst)
428 {
429         int err;
430
431         /* Check attributes */
432         err = flow_parse_attr(dev, attr, error, flow);
433         if (err)
434                 return err;
435
436         /* Check actions */
437         err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
438         if (err)
439                 return err;
440
441         /* Check pattern */
442         err = flow_parse_pattern(dev, pattern, error, flow, pst);
443         if (err)
444                 return err;
445
446         /* Check for overlaps? */
447         return 0;
448 }
449
450 static int
451 otx2_flow_validate(struct rte_eth_dev *dev,
452                    const struct rte_flow_attr *attr,
453                    const struct rte_flow_item pattern[],
454                    const struct rte_flow_action actions[],
455                    struct rte_flow_error *error)
456 {
457         struct otx2_parse_state parse_state;
458         struct rte_flow flow;
459
460         memset(&flow, 0, sizeof(flow));
461         return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
462                                &parse_state);
463 }
464
465 static int
466 flow_program_vtag_action(struct rte_eth_dev *eth_dev,
467                          const struct rte_flow_action actions[],
468                          struct rte_flow *flow)
469 {
470         uint16_t vlan_id = 0, vlan_ethtype = RTE_ETHER_TYPE_VLAN;
471         struct otx2_eth_dev *dev = eth_dev->data->dev_private;
472         union {
473                 uint64_t reg;
474                 struct nix_tx_vtag_action_s act;
475         } tx_vtag_action;
476         struct otx2_mbox *mbox = dev->mbox;
477         struct nix_vtag_config *vtag_cfg;
478         struct nix_vtag_config_rsp *rsp;
479         bool vlan_insert_action = false;
480         uint64_t rx_vtag_action = 0;
481         uint8_t vlan_pcp = 0;
482         int rc;
483
484         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
485                 if (actions->type == RTE_FLOW_ACTION_TYPE_OF_POP_VLAN) {
486                         if (dev->npc_flow.vtag_actions == 1) {
487                                 vtag_cfg =
488                                         otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
489                                 vtag_cfg->cfg_type = VTAG_RX;
490                                 vtag_cfg->rx.strip_vtag = 1;
491                                 /* Always capture */
492                                 vtag_cfg->rx.capture_vtag = 1;
493                                 vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
494                                 vtag_cfg->rx.vtag_type = 0;
495
496                                 rc = otx2_mbox_process(mbox);
497                                 if (rc)
498                                         return rc;
499                         }
500
501                         rx_vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15);
502                         rx_vtag_action |= (NPC_LID_LB << 8);
503                         rx_vtag_action |= NIX_RX_VTAGACTION_VTAG0_RELPTR;
504                         flow->vtag_action = rx_vtag_action;
505                 } else if (actions->type ==
506                            RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
507                         const struct rte_flow_action_of_set_vlan_vid *vtag =
508                                 (const struct rte_flow_action_of_set_vlan_vid *)
509                                         actions->conf;
510                         vlan_id = rte_be_to_cpu_16(vtag->vlan_vid);
511                         if (vlan_id > 0xfff) {
512                                 otx2_err("Invalid vlan_id for set vlan action");
513                                 return -EINVAL;
514                         }
515                         vlan_insert_action = true;
516                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) {
517                         const struct rte_flow_action_of_push_vlan *ethtype =
518                                 (const struct rte_flow_action_of_push_vlan *)
519                                         actions->conf;
520                         vlan_ethtype = rte_be_to_cpu_16(ethtype->ethertype);
521                         if (vlan_ethtype != RTE_ETHER_TYPE_VLAN &&
522                             vlan_ethtype != RTE_ETHER_TYPE_QINQ) {
523                                 otx2_err("Invalid ethtype specified for push"
524                                          " vlan action");
525                                 return -EINVAL;
526                         }
527                         vlan_insert_action = true;
528                 } else if (actions->type ==
529                            RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
530                         const struct rte_flow_action_of_set_vlan_pcp *pcp =
531                                 (const struct rte_flow_action_of_set_vlan_pcp *)
532                                         actions->conf;
533                         vlan_pcp = pcp->vlan_pcp;
534                         if (vlan_pcp > 0x7) {
535                                 otx2_err("Invalid PCP value for pcp action");
536                                 return -EINVAL;
537                         }
538                         vlan_insert_action = true;
539                 }
540         }
541
542         if (vlan_insert_action) {
543                 vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
544                 vtag_cfg->cfg_type = VTAG_TX;
545                 vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
546                 vtag_cfg->tx.vtag0 =
547                         ((vlan_ethtype << 16) | (vlan_pcp << 13) | vlan_id);
548                 vtag_cfg->tx.cfg_vtag0 = 1;
549                 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
550                 if (rc)
551                         return rc;
552
553                 tx_vtag_action.reg = 0;
554                 tx_vtag_action.act.vtag0_def = rsp->vtag0_idx;
555                 if (tx_vtag_action.act.vtag0_def < 0) {
556                         otx2_err("Failed to config TX VTAG action");
557                         return -EINVAL;
558                 }
559                 tx_vtag_action.act.vtag0_lid = NPC_LID_LA;
560                 tx_vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
561                 tx_vtag_action.act.vtag0_relptr =
562                         NIX_TX_VTAGACTION_VTAG0_RELPTR;
563                 flow->vtag_action = tx_vtag_action.reg;
564         }
565         return 0;
566 }
567
568 static struct rte_flow *
569 otx2_flow_create(struct rte_eth_dev *dev,
570                  const struct rte_flow_attr *attr,
571                  const struct rte_flow_item pattern[],
572                  const struct rte_flow_action actions[],
573                  struct rte_flow_error *error)
574 {
575         struct otx2_eth_dev *hw = dev->data->dev_private;
576         struct otx2_parse_state parse_state;
577         struct otx2_mbox *mbox = hw->mbox;
578         struct rte_flow *flow, *flow_iter;
579         struct otx2_flow_list *list;
580         int rc;
581
582         flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
583         if (flow == NULL) {
584                 rte_flow_error_set(error, ENOMEM,
585                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
586                                    NULL,
587                                    "Memory allocation failed");
588                 return NULL;
589         }
590         memset(flow, 0, sizeof(*flow));
591
592         rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
593                              &parse_state);
594         if (rc != 0)
595                 goto err_exit;
596
597         rc = flow_program_vtag_action(dev, actions, flow);
598         if (rc != 0) {
599                 rte_flow_error_set(error, EIO,
600                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
601                                    NULL,
602                                    "Failed to program vlan action");
603                 goto err_exit;
604         }
605
606         rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
607         if (rc != 0) {
608                 rte_flow_error_set(error, EIO,
609                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
610                                    NULL,
611                                    "Failed to insert filter");
612                 goto err_exit;
613         }
614
615         rc = flow_program_rss_action(dev, actions, flow);
616         if (rc != 0) {
617                 rte_flow_error_set(error, EIO,
618                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
619                                    NULL,
620                                    "Failed to program rss action");
621                 goto err_exit;
622         }
623
624         if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
625                 rc = flow_update_sec_tt(dev, actions);
626                 if (rc != 0) {
627                         rte_flow_error_set(error, EIO,
628                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
629                                            NULL,
630                                            "Failed to update tt with sec act");
631                         goto err_exit;
632                 }
633         }
634
635         list = &hw->npc_flow.flow_list[flow->priority];
636         /* List in ascending order of mcam entries */
637         TAILQ_FOREACH(flow_iter, list, next) {
638                 if (flow_iter->mcam_id > flow->mcam_id) {
639                         TAILQ_INSERT_BEFORE(flow_iter, flow, next);
640                         return flow;
641                 }
642         }
643
644         TAILQ_INSERT_TAIL(list, flow, next);
645         return flow;
646
647 err_exit:
648         rte_free(flow);
649         return NULL;
650 }
651
652 static int
653 otx2_flow_destroy(struct rte_eth_dev *dev,
654                   struct rte_flow *flow,
655                   struct rte_flow_error *error)
656 {
657         struct otx2_eth_dev *hw = dev->data->dev_private;
658         struct otx2_npc_flow_info *npc = &hw->npc_flow;
659         struct otx2_mbox *mbox = hw->mbox;
660         struct rte_bitmap *bmap;
661         uint16_t match_id;
662         int rc;
663
664         match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
665                 NIX_RX_ACT_MATCH_MASK;
666
667         if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
668                 if (rte_atomic32_read(&npc->mark_actions) == 0)
669                         return -EINVAL;
670
671                 /* Clear mark offload flag if there are no more mark actions */
672                 if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
673                         hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
674                         otx2_eth_set_rx_function(dev);
675                 }
676         }
677
678         if (flow->nix_intf == OTX2_INTF_RX && flow->vtag_action) {
679                 npc->vtag_actions--;
680                 if (npc->vtag_actions == 0) {
681                         if (hw->vlan_info.strip_on == 0) {
682                                 hw->rx_offload_flags &=
683                                         ~NIX_RX_OFFLOAD_VLAN_STRIP_F;
684                                 otx2_eth_set_rx_function(dev);
685                         }
686                 }
687         }
688
689         rc = flow_free_rss_action(dev, flow);
690         if (rc != 0) {
691                 rte_flow_error_set(error, EIO,
692                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
693                                    NULL,
694                                    "Failed to free rss action");
695         }
696
697         rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
698         if (rc != 0) {
699                 rte_flow_error_set(error, EIO,
700                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
701                                    NULL,
702                                    "Failed to destroy filter");
703         }
704
705         TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
706
707         bmap = npc->live_entries[flow->priority];
708         rte_bitmap_clear(bmap, flow->mcam_id);
709
710         rte_free(flow);
711         return 0;
712 }
713
714 static int
715 otx2_flow_flush(struct rte_eth_dev *dev,
716                 struct rte_flow_error *error)
717 {
718         struct otx2_eth_dev *hw = dev->data->dev_private;
719         int rc;
720
721         rc = otx2_flow_free_all_resources(hw);
722         if (rc) {
723                 otx2_err("Error when deleting NPC MCAM entries "
724                                 ", counters");
725                 rte_flow_error_set(error, EIO,
726                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
727                                    NULL,
728                                    "Failed to flush filter");
729                 return -rte_errno;
730         }
731
732         return 0;
733 }
734
735 static int
736 otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
737                   int enable __rte_unused,
738                   struct rte_flow_error *error)
739 {
740         /*
741          * If we support, we need to un-install the default mcam
742          * entry for this port.
743          */
744
745         rte_flow_error_set(error, ENOTSUP,
746                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
747                            NULL,
748                            "Flow isolation not supported");
749
750         return -rte_errno;
751 }
752
753 static int
754 otx2_flow_query(struct rte_eth_dev *dev,
755                 struct rte_flow *flow,
756                 const struct rte_flow_action *action,
757                 void *data,
758                 struct rte_flow_error *error)
759 {
760         struct otx2_eth_dev *hw = dev->data->dev_private;
761         struct rte_flow_query_count *query = data;
762         struct otx2_mbox *mbox = hw->mbox;
763         const char *errmsg = NULL;
764         int errcode = ENOTSUP;
765         int rc;
766
767         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
768                 errmsg = "Only COUNT is supported in query";
769                 goto err_exit;
770         }
771
772         if (flow->ctr_id == NPC_COUNTER_NONE) {
773                 errmsg = "Counter is not available";
774                 goto err_exit;
775         }
776
777         rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
778         if (rc != 0) {
779                 errcode = EIO;
780                 errmsg = "Error reading flow counter";
781                 goto err_exit;
782         }
783         query->hits_set = 1;
784         query->bytes_set = 0;
785
786         if (query->reset)
787                 rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
788         if (rc != 0) {
789                 errcode = EIO;
790                 errmsg = "Error clearing flow counter";
791                 goto err_exit;
792         }
793
794         return 0;
795
796 err_exit:
797         rte_flow_error_set(error, errcode,
798                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
799                            NULL,
800                            errmsg);
801         return -rte_errno;
802 }
803
804 const struct rte_flow_ops otx2_flow_ops = {
805         .validate = otx2_flow_validate,
806         .create = otx2_flow_create,
807         .destroy = otx2_flow_destroy,
808         .flush = otx2_flow_flush,
809         .query = otx2_flow_query,
810         .isolate = otx2_flow_isolate,
811 };
812
813 static int
814 flow_supp_key_len(uint32_t supp_mask)
815 {
816         int nib_count = 0;
817         while (supp_mask) {
818                 nib_count++;
819                 supp_mask &= (supp_mask - 1);
820         }
821         return nib_count * 4;
822 }
823
824 /* Refer HRM register:
825  * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
826  * and
827  * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
828  **/
829 #define BYTESM1_SHIFT   16
830 #define HDR_OFF_SHIFT   8
831 static void
832 flow_update_kex_info(struct npc_xtract_info *xtract_info,
833                      uint64_t val)
834 {
835         xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
836         xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
837         xtract_info->key_off = val & 0x3f;
838         xtract_info->enable = ((val >> 7) & 0x1);
839         xtract_info->flags_enable = ((val >> 6) & 0x1);
840 }
841
842 static void
843 flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
844                       struct npc_get_kex_cfg_rsp *kex_rsp)
845 {
846         volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
847                 [NPC_MAX_LD];
848         struct npc_xtract_info *x_info = NULL;
849         int lid, lt, ld, fl, ix;
850         otx2_dxcfg_t *p;
851         uint64_t keyw;
852         uint64_t val;
853
854         npc->keyx_supp_nmask[NPC_MCAM_RX] =
855                 kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
856         npc->keyx_supp_nmask[NPC_MCAM_TX] =
857                 kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
858         npc->keyx_len[NPC_MCAM_RX] =
859                 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
860         npc->keyx_len[NPC_MCAM_TX] =
861                 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
862
863         keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
864         npc->keyw[NPC_MCAM_RX] = keyw;
865         keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
866         npc->keyw[NPC_MCAM_TX] = keyw;
867
868         /* Update KEX_LD_FLAG */
869         for (ix = 0; ix < NPC_MAX_INTF; ix++) {
870                 for (ld = 0; ld < NPC_MAX_LD; ld++) {
871                         for (fl = 0; fl < NPC_MAX_LFL; fl++) {
872                                 x_info =
873                                     &npc->prx_fxcfg[ix][ld][fl].xtract[0];
874                                 val = kex_rsp->intf_ld_flags[ix][ld][fl];
875                                 flow_update_kex_info(x_info, val);
876                         }
877                 }
878         }
879
880         /* Update LID, LT and LDATA cfg */
881         p = &npc->prx_dxcfg;
882         q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
883                         (&kex_rsp->intf_lid_lt_ld);
884         for (ix = 0; ix < NPC_MAX_INTF; ix++) {
885                 for (lid = 0; lid < NPC_MAX_LID; lid++) {
886                         for (lt = 0; lt < NPC_MAX_LT; lt++) {
887                                 for (ld = 0; ld < NPC_MAX_LD; ld++) {
888                                         x_info = &(*p)[ix][lid][lt].xtract[ld];
889                                         val = (*q)[ix][lid][lt][ld];
890                                         flow_update_kex_info(x_info, val);
891                                 }
892                         }
893                 }
894         }
895         /* Update LDATA Flags cfg */
896         npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
897         npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
898 }
899
900 static struct otx2_idev_kex_cfg *
901 flow_intra_dev_kex_cfg(void)
902 {
903         static const char name[] = "octeontx2_intra_device_kex_conf";
904         struct otx2_idev_kex_cfg *idev;
905         const struct rte_memzone *mz;
906
907         mz = rte_memzone_lookup(name);
908         if (mz)
909                 return mz->addr;
910
911         /* Request for the first time */
912         mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
913                                          SOCKET_ID_ANY, 0, OTX2_ALIGN);
914         if (mz) {
915                 idev = mz->addr;
916                 rte_atomic16_set(&idev->kex_refcnt, 0);
917                 return idev;
918         }
919         return NULL;
920 }
921
922 static int
923 flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
924 {
925         struct otx2_npc_flow_info *npc = &dev->npc_flow;
926         struct npc_get_kex_cfg_rsp *kex_rsp;
927         struct otx2_mbox *mbox = dev->mbox;
928         char mkex_pfl_name[MKEX_NAME_LEN];
929         struct otx2_idev_kex_cfg *idev;
930         int rc = 0;
931
932         idev = flow_intra_dev_kex_cfg();
933         if (!idev)
934                 return -ENOMEM;
935
936         /* Is kex_cfg read by any another driver? */
937         if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
938                 /* Call mailbox to get key & data size */
939                 (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
940                 otx2_mbox_msg_send(mbox, 0);
941                 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
942                 if (rc) {
943                         otx2_err("Failed to fetch NPC keyx config");
944                         goto done;
945                 }
946                 memcpy(&idev->kex_cfg, kex_rsp,
947                        sizeof(struct npc_get_kex_cfg_rsp));
948         }
949
950         otx2_mbox_memcpy(mkex_pfl_name,
951                          idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
952
953         strlcpy((char *)dev->mkex_pfl_name,
954                 mkex_pfl_name, sizeof(dev->mkex_pfl_name));
955
956         flow_process_mkex_cfg(npc, &idev->kex_cfg);
957
958 done:
959         return rc;
960 }
961
962 int
963 otx2_flow_init(struct otx2_eth_dev *hw)
964 {
965         uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
966         struct otx2_npc_flow_info *npc = &hw->npc_flow;
967         uint32_t bmap_sz;
968         int rc = 0, idx;
969
970         rc = flow_fetch_kex_cfg(hw);
971         if (rc) {
972                 otx2_err("Failed to fetch NPC keyx config from idev");
973                 return rc;
974         }
975
976         rte_atomic32_init(&npc->mark_actions);
977         npc->vtag_actions = 0;
978
979         npc->mcam_entries = NPC_MCAM_TOT_ENTRIES >> npc->keyw[NPC_MCAM_RX];
980         /* Free, free_rev, live and live_rev entries */
981         bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
982         mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
983                           RTE_CACHE_LINE_SIZE);
984         if (mem == NULL) {
985                 otx2_err("Bmap alloc failed");
986                 rc = -ENOMEM;
987                 return rc;
988         }
989
990         npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
991                                            * sizeof(struct otx2_mcam_ents_info),
992                                            0);
993         if (npc->flow_entry_info == NULL) {
994                 otx2_err("flow_entry_info alloc failed");
995                 rc = -ENOMEM;
996                 goto err;
997         }
998
999         npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
1000                                         * sizeof(struct rte_bitmap *),
1001                                         0);
1002         if (npc->free_entries == NULL) {
1003                 otx2_err("free_entries alloc failed");
1004                 rc = -ENOMEM;
1005                 goto err;
1006         }
1007
1008         npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
1009                                         * sizeof(struct rte_bitmap *),
1010                                         0);
1011         if (npc->free_entries_rev == NULL) {
1012                 otx2_err("free_entries_rev alloc failed");
1013                 rc = -ENOMEM;
1014                 goto err;
1015         }
1016
1017         npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
1018                                         * sizeof(struct rte_bitmap *),
1019                                         0);
1020         if (npc->live_entries == NULL) {
1021                 otx2_err("live_entries alloc failed");
1022                 rc = -ENOMEM;
1023                 goto err;
1024         }
1025
1026         npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
1027                                         * sizeof(struct rte_bitmap *),
1028                                         0);
1029         if (npc->live_entries_rev == NULL) {
1030                 otx2_err("live_entries_rev alloc failed");
1031                 rc = -ENOMEM;
1032                 goto err;
1033         }
1034
1035         npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
1036                                         * sizeof(struct otx2_flow_list),
1037                                         0);
1038         if (npc->flow_list == NULL) {
1039                 otx2_err("flow_list alloc failed");
1040                 rc = -ENOMEM;
1041                 goto err;
1042         }
1043
1044         npc_mem = mem;
1045         for (idx = 0; idx < npc->flow_max_priority; idx++) {
1046                 TAILQ_INIT(&npc->flow_list[idx]);
1047
1048                 npc->free_entries[idx] =
1049                         rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1050                 mem += bmap_sz;
1051
1052                 npc->free_entries_rev[idx] =
1053                         rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1054                 mem += bmap_sz;
1055
1056                 npc->live_entries[idx] =
1057                         rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1058                 mem += bmap_sz;
1059
1060                 npc->live_entries_rev[idx] =
1061                         rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1062                 mem += bmap_sz;
1063
1064                 npc->flow_entry_info[idx].free_ent = 0;
1065                 npc->flow_entry_info[idx].live_ent = 0;
1066                 npc->flow_entry_info[idx].max_id = 0;
1067                 npc->flow_entry_info[idx].min_id = ~(0);
1068         }
1069
1070         npc->rss_grps = NIX_RSS_GRPS;
1071
1072         bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
1073         nix_mem = rte_zmalloc(NULL, bmap_sz,  RTE_CACHE_LINE_SIZE);
1074         if (nix_mem == NULL) {
1075                 otx2_err("Bmap alloc failed");
1076                 rc = -ENOMEM;
1077                 goto err;
1078         }
1079
1080         npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
1081
1082         /* Group 0 will be used for RSS,
1083          * 1 -7 will be used for rte_flow RSS action
1084          */
1085         rte_bitmap_set(npc->rss_grp_entries, 0);
1086
1087         return 0;
1088
1089 err:
1090         if (npc->flow_list)
1091                 rte_free(npc->flow_list);
1092         if (npc->live_entries_rev)
1093                 rte_free(npc->live_entries_rev);
1094         if (npc->live_entries)
1095                 rte_free(npc->live_entries);
1096         if (npc->free_entries_rev)
1097                 rte_free(npc->free_entries_rev);
1098         if (npc->free_entries)
1099                 rte_free(npc->free_entries);
1100         if (npc->flow_entry_info)
1101                 rte_free(npc->flow_entry_info);
1102         if (npc_mem)
1103                 rte_free(npc_mem);
1104         return rc;
1105 }
1106
1107 int
1108 otx2_flow_fini(struct otx2_eth_dev *hw)
1109 {
1110         struct otx2_npc_flow_info *npc = &hw->npc_flow;
1111         int rc;
1112
1113         rc = otx2_flow_free_all_resources(hw);
1114         if (rc) {
1115                 otx2_err("Error when deleting NPC MCAM entries, counters");
1116                 return rc;
1117         }
1118
1119         if (npc->flow_list)
1120                 rte_free(npc->flow_list);
1121         if (npc->live_entries_rev)
1122                 rte_free(npc->live_entries_rev);
1123         if (npc->live_entries)
1124                 rte_free(npc->live_entries);
1125         if (npc->free_entries_rev)
1126                 rte_free(npc->free_entries_rev);
1127         if (npc->free_entries)
1128                 rte_free(npc->free_entries);
1129         if (npc->flow_entry_info)
1130                 rte_free(npc->flow_entry_info);
1131
1132         return 0;
1133 }