net/cnxk: add TM capabilities and queue rate limit handlers
[dpdk.git] / drivers / net / cnxk / cnxk_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5 #include <cnxk_tm.h>
6 #include <cnxk_utils.h>
7
8 static int
9 cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
10                           int *is_leaf, struct rte_tm_error *error)
11 {
12         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13         struct roc_nix *nix = &dev->nix;
14         struct roc_nix_tm_node *node;
15
16         if (is_leaf == NULL) {
17                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
18                 return -EINVAL;
19         }
20
21         node = roc_nix_tm_node_get(nix, node_id);
22         if (node_id == RTE_TM_NODE_ID_NULL || !node) {
23                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
24                 return -EINVAL;
25         }
26
27         if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
28                 *is_leaf = true;
29         else
30                 *is_leaf = false;
31
32         return 0;
33 }
34
35 static int
36 cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
37                      struct rte_tm_capabilities *cap,
38                      struct rte_tm_error *error)
39 {
40         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
41         int rc, max_nr_nodes = 0, i, n_lvl;
42         struct roc_nix *nix = &dev->nix;
43         uint16_t schq[ROC_TM_LVL_MAX];
44
45         memset(cap, 0, sizeof(*cap));
46
47         rc = roc_nix_tm_rsrc_count(nix, schq);
48         if (rc) {
49                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
50                 error->message = "unexpected fatal error";
51                 return rc;
52         }
53
54         for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
55                 max_nr_nodes += schq[i];
56
57         cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
58
59         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
60         /* Consider leaf level */
61         cap->n_levels_max = n_lvl + 1;
62         cap->non_leaf_nodes_identical = 1;
63         cap->leaf_nodes_identical = 1;
64
65         /* Shaper Capabilities */
66         cap->shaper_private_n_max = max_nr_nodes;
67         cap->shaper_n_max = max_nr_nodes;
68         cap->shaper_private_dual_rate_n_max = max_nr_nodes;
69         cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
70         cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
71         cap->shaper_private_packet_mode_supported = 1;
72         cap->shaper_private_byte_mode_supported = 1;
73         cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
74         cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
75
76         /* Schedule Capabilities */
77         cap->sched_n_children_max = schq[n_lvl - 1];
78         cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
79         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
80         cap->sched_wfq_n_groups_max = 1;
81         cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
82         cap->sched_wfq_packet_mode_supported = 1;
83         cap->sched_wfq_byte_mode_supported = 1;
84
85         cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
86                                    RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
87         cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
88                           RTE_TM_STATS_N_PKTS_RED_DROPPED |
89                           RTE_TM_STATS_N_BYTES_RED_DROPPED;
90
91         for (i = 0; i < RTE_COLORS; i++) {
92                 cap->mark_vlan_dei_supported[i] = false;
93                 cap->mark_ip_ecn_tcp_supported[i] = false;
94                 cap->mark_ip_dscp_supported[i] = false;
95         }
96
97         return 0;
98 }
99
100 static int
101 cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
102                            struct rte_tm_level_capabilities *cap,
103                            struct rte_tm_error *error)
104 {
105         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
106         struct roc_nix *nix = &dev->nix;
107         uint16_t schq[ROC_TM_LVL_MAX];
108         int rc, n_lvl;
109
110         memset(cap, 0, sizeof(*cap));
111
112         rc = roc_nix_tm_rsrc_count(nix, schq);
113         if (rc) {
114                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
115                 error->message = "unexpected fatal error";
116                 return rc;
117         }
118
119         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
120
121         if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
122                 /* Leaf */
123                 cap->n_nodes_max = dev->nb_txq;
124                 cap->n_nodes_leaf_max = dev->nb_txq;
125                 cap->leaf_nodes_identical = 1;
126                 cap->leaf.stats_mask =
127                         RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
128
129         } else if (lvl == ROC_TM_LVL_ROOT) {
130                 /* Root node, a.k.a. TL2(vf)/TL1(pf) */
131                 cap->n_nodes_max = 1;
132                 cap->n_nodes_nonleaf_max = 1;
133                 cap->non_leaf_nodes_identical = 1;
134
135                 cap->nonleaf.shaper_private_supported = true;
136                 cap->nonleaf.shaper_private_dual_rate_supported =
137                         roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
138                                                                     true;
139                 cap->nonleaf.shaper_private_rate_min =
140                         NIX_TM_MIN_SHAPER_RATE / 8;
141                 cap->nonleaf.shaper_private_rate_max =
142                         NIX_TM_MAX_SHAPER_RATE / 8;
143                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
144                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
145
146                 cap->nonleaf.sched_n_children_max = schq[lvl];
147                 cap->nonleaf.sched_sp_n_priorities_max =
148                         roc_nix_tm_max_prio(nix, lvl) + 1;
149                 cap->nonleaf.sched_wfq_n_groups_max = 1;
150                 cap->nonleaf.sched_wfq_weight_max =
151                         roc_nix_tm_max_sched_wt_get();
152                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
153                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
154
155                 if (roc_nix_tm_lvl_have_link_access(nix, lvl))
156                         cap->nonleaf.stats_mask =
157                                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
158                                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
159         } else if (lvl < ROC_TM_LVL_MAX) {
160                 /* TL2, TL3, TL4, MDQ */
161                 cap->n_nodes_max = schq[lvl];
162                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
163                 cap->non_leaf_nodes_identical = 1;
164
165                 cap->nonleaf.shaper_private_supported = true;
166                 cap->nonleaf.shaper_private_dual_rate_supported = true;
167                 cap->nonleaf.shaper_private_rate_min =
168                         NIX_TM_MIN_SHAPER_RATE / 8;
169                 cap->nonleaf.shaper_private_rate_max =
170                         NIX_TM_MAX_SHAPER_RATE / 8;
171                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
172                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
173
174                 /* MDQ doesn't support Strict Priority */
175                 if ((int)lvl == (n_lvl - 1))
176                         cap->nonleaf.sched_n_children_max = dev->nb_txq;
177                 else
178                         cap->nonleaf.sched_n_children_max = schq[lvl - 1];
179                 cap->nonleaf.sched_sp_n_priorities_max =
180                         roc_nix_tm_max_prio(nix, lvl) + 1;
181                 cap->nonleaf.sched_wfq_n_groups_max = 1;
182                 cap->nonleaf.sched_wfq_weight_max =
183                         roc_nix_tm_max_sched_wt_get();
184                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
185                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
186         } else {
187                 /* unsupported level */
188                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
189                 return rc;
190         }
191         return 0;
192 }
193
194 static int
195 cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
196                           struct rte_tm_node_capabilities *cap,
197                           struct rte_tm_error *error)
198 {
199         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
200         struct cnxk_nix_tm_node *tm_node;
201         struct roc_nix *nix = &dev->nix;
202         uint16_t schq[ROC_TM_LVL_MAX];
203         int rc, n_lvl, lvl;
204
205         memset(cap, 0, sizeof(*cap));
206
207         tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
208         if (!tm_node) {
209                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
210                 error->message = "no such node";
211                 return -EINVAL;
212         }
213
214         lvl = tm_node->nix_node.lvl;
215         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
216
217         /* Leaf node */
218         if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
219                 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
220                 return 0;
221         }
222
223         rc = roc_nix_tm_rsrc_count(nix, schq);
224         if (rc) {
225                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
226                 error->message = "unexpected fatal error";
227                 return rc;
228         }
229
230         /* Non Leaf Shaper */
231         cap->shaper_private_supported = true;
232         cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
233         cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
234         cap->shaper_private_packet_mode_supported = 1;
235         cap->shaper_private_byte_mode_supported = 1;
236
237         /* Non Leaf Scheduler */
238         if (lvl == (n_lvl - 1))
239                 cap->nonleaf.sched_n_children_max = dev->nb_txq;
240         else
241                 cap->nonleaf.sched_n_children_max = schq[lvl - 1];
242
243         cap->nonleaf.sched_sp_n_priorities_max =
244                 roc_nix_tm_max_prio(nix, lvl) + 1;
245         cap->nonleaf.sched_wfq_n_children_per_group_max =
246                 cap->nonleaf.sched_n_children_max;
247         cap->nonleaf.sched_wfq_n_groups_max = 1;
248         cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
249         cap->nonleaf.sched_wfq_packet_mode_supported = 1;
250         cap->nonleaf.sched_wfq_byte_mode_supported = 1;
251
252         cap->shaper_private_dual_rate_supported = true;
253         if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
254                 cap->shaper_private_dual_rate_supported = false;
255                 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
256                                   RTE_TM_STATS_N_BYTES_RED_DROPPED;
257         }
258
259         return 0;
260 }
261
262 const struct rte_tm_ops cnxk_tm_ops = {
263         .node_type_get = cnxk_nix_tm_node_type_get,
264         .capabilities_get = cnxk_nix_tm_capa_get,
265         .level_capabilities_get = cnxk_nix_tm_level_capa_get,
266         .node_capabilities_get = cnxk_nix_tm_node_capa_get,
267 };
268
269 int
270 cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
271 {
272         if (!arg)
273                 return -EINVAL;
274
275         /* Check for supported revisions */
276         if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
277                 return -EINVAL;
278
279         *(const void **)arg = &cnxk_tm_ops;
280
281         return 0;
282 }
283
284 int
285 cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
286                                  uint16_t queue_idx, uint16_t tx_rate_mbps)
287 {
288         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
289         uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
290         struct roc_nix *nix = &dev->nix;
291         int rc = -EINVAL;
292
293         /* Check for supported revisions */
294         if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
295                 goto exit;
296
297         if (queue_idx >= eth_dev->data->nb_tx_queues)
298                 goto exit;
299
300         if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
301             eth_dev->data->nb_tx_queues > 1) {
302                 /*
303                  * Disable xmit will be enabled when
304                  * new topology is available.
305                  */
306                 rc = roc_nix_tm_hierarchy_disable(nix);
307                 if (rc)
308                         goto exit;
309
310                 rc = roc_nix_tm_prepare_rate_limited_tree(nix);
311                 if (rc)
312                         goto exit;
313
314                 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
315                 if (rc)
316                         goto exit;
317         }
318
319         return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);
320 exit:
321         return rc;
322 }