4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
36 #include "base/i40e_prototype.h"
37 #include "i40e_ethdev.h"
39 static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
40 struct rte_tm_capabilities *cap,
41 struct rte_tm_error *error);
42 static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
43 uint32_t shaper_profile_id,
44 struct rte_tm_shaper_params *profile,
45 struct rte_tm_error *error);
46 static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
47 uint32_t shaper_profile_id,
48 struct rte_tm_error *error);
49 static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
50 uint32_t parent_node_id, uint32_t priority,
51 uint32_t weight, uint32_t level_id,
52 struct rte_tm_node_params *params,
53 struct rte_tm_error *error);
54 static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
55 struct rte_tm_error *error);
57 const struct rte_tm_ops i40e_tm_ops = {
58 .capabilities_get = i40e_tm_capabilities_get,
59 .shaper_profile_add = i40e_shaper_profile_add,
60 .shaper_profile_delete = i40e_shaper_profile_del,
61 .node_add = i40e_node_add,
62 .node_delete = i40e_node_delete,
66 i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
72 *(const void **)arg = &i40e_tm_ops;
78 i40e_tm_conf_init(struct rte_eth_dev *dev)
80 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
82 /* initialize shaper profile list */
83 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
85 /* initialize node configuration */
86 pf->tm_conf.root = NULL;
87 TAILQ_INIT(&pf->tm_conf.tc_list);
88 TAILQ_INIT(&pf->tm_conf.queue_list);
89 pf->tm_conf.nb_tc_node = 0;
90 pf->tm_conf.nb_queue_node = 0;
91 pf->tm_conf.committed = false;
95 i40e_tm_conf_uninit(struct rte_eth_dev *dev)
97 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
98 struct i40e_tm_shaper_profile *shaper_profile;
99 struct i40e_tm_node *tm_node;
101 /* clear node configuration */
102 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
103 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
106 pf->tm_conf.nb_queue_node = 0;
107 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
108 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
111 pf->tm_conf.nb_tc_node = 0;
112 if (pf->tm_conf.root) {
113 rte_free(pf->tm_conf.root);
114 pf->tm_conf.root = NULL;
117 /* Remove all shaper profiles */
118 while ((shaper_profile =
119 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
120 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
121 shaper_profile, node);
122 rte_free(shaper_profile);
126 static inline uint16_t
127 i40e_tc_nb_get(struct rte_eth_dev *dev)
129 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
130 struct i40e_vsi *main_vsi = pf->main_vsi;
134 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
135 if (main_vsi->enabled_tc & BIT_ULL(i))
143 i40e_tm_capabilities_get(struct rte_eth_dev *dev,
144 struct rte_tm_capabilities *cap,
145 struct rte_tm_error *error)
147 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
148 uint16_t tc_nb = i40e_tc_nb_get(dev);
153 if (tc_nb > hw->func_caps.num_tx_qp)
156 error->type = RTE_TM_ERROR_TYPE_NONE;
158 /* set all the parameters to 0 first. */
159 memset(cap, 0, sizeof(struct rte_tm_capabilities));
162 * support port + TCs + queues
163 * here shows the max capability not the current configuration.
165 cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
166 cap->n_levels_max = 3; /* port, TC, queue */
167 cap->non_leaf_nodes_identical = 1;
168 cap->leaf_nodes_identical = 1;
169 cap->shaper_n_max = cap->n_nodes_max;
170 cap->shaper_private_n_max = cap->n_nodes_max;
171 cap->shaper_private_dual_rate_n_max = 0;
172 cap->shaper_private_rate_min = 0;
173 /* 40Gbps -> 5GBps */
174 cap->shaper_private_rate_max = 5000000000ull;
175 cap->shaper_shared_n_max = 0;
176 cap->shaper_shared_n_nodes_per_shaper_max = 0;
177 cap->shaper_shared_n_shapers_per_node_max = 0;
178 cap->shaper_shared_dual_rate_n_max = 0;
179 cap->shaper_shared_rate_min = 0;
180 cap->shaper_shared_rate_max = 0;
181 cap->sched_n_children_max = hw->func_caps.num_tx_qp;
183 * HW supports SP. But no plan to support it now.
184 * So, all the nodes should have the same priority.
186 cap->sched_sp_n_priorities_max = 1;
187 cap->sched_wfq_n_children_per_group_max = 0;
188 cap->sched_wfq_n_groups_max = 0;
190 * SW only supports fair round robin now.
191 * So, all the nodes should have the same weight.
193 cap->sched_wfq_weight_max = 1;
194 cap->cman_head_drop_supported = 0;
195 cap->dynamic_update_mask = 0;
196 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
197 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
198 cap->cman_wred_context_n_max = 0;
199 cap->cman_wred_context_private_n_max = 0;
200 cap->cman_wred_context_shared_n_max = 0;
201 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
202 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
208 static inline struct i40e_tm_shaper_profile *
209 i40e_shaper_profile_search(struct rte_eth_dev *dev,
210 uint32_t shaper_profile_id)
212 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
213 struct i40e_shaper_profile_list *shaper_profile_list =
214 &pf->tm_conf.shaper_profile_list;
215 struct i40e_tm_shaper_profile *shaper_profile;
217 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
218 if (shaper_profile_id == shaper_profile->shaper_profile_id)
219 return shaper_profile;
226 i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
227 struct rte_tm_error *error)
229 /* min rate not supported */
230 if (profile->committed.rate) {
231 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
232 error->message = "committed rate not supported";
235 /* min bucket size not supported */
236 if (profile->committed.size) {
237 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
238 error->message = "committed bucket size not supported";
241 /* max bucket size not supported */
242 if (profile->peak.size) {
243 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
244 error->message = "peak bucket size not supported";
247 /* length adjustment not supported */
248 if (profile->pkt_length_adjust) {
249 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
250 error->message = "packet length adjustment not supported";
258 i40e_shaper_profile_add(struct rte_eth_dev *dev,
259 uint32_t shaper_profile_id,
260 struct rte_tm_shaper_params *profile,
261 struct rte_tm_error *error)
263 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
264 struct i40e_tm_shaper_profile *shaper_profile;
267 if (!profile || !error)
270 ret = i40e_shaper_profile_param_check(profile, error);
274 shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
276 if (shaper_profile) {
277 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
278 error->message = "profile ID exist";
282 shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
283 sizeof(struct i40e_tm_shaper_profile),
287 shaper_profile->shaper_profile_id = shaper_profile_id;
288 (void)rte_memcpy(&shaper_profile->profile, profile,
289 sizeof(struct rte_tm_shaper_params));
290 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
291 shaper_profile, node);
297 i40e_shaper_profile_del(struct rte_eth_dev *dev,
298 uint32_t shaper_profile_id,
299 struct rte_tm_error *error)
301 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
302 struct i40e_tm_shaper_profile *shaper_profile;
307 shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
309 if (!shaper_profile) {
310 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
311 error->message = "profile ID not exist";
315 /* don't delete a profile if it's used by one or several nodes */
316 if (shaper_profile->reference_count) {
317 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
318 error->message = "profile in use";
322 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
323 rte_free(shaper_profile);
328 static inline struct i40e_tm_node *
329 i40e_tm_node_search(struct rte_eth_dev *dev,
330 uint32_t node_id, enum i40e_tm_node_type *node_type)
332 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
333 struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
334 struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
335 struct i40e_tm_node *tm_node;
337 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
338 *node_type = I40E_TM_NODE_TYPE_PORT;
339 return pf->tm_conf.root;
342 TAILQ_FOREACH(tm_node, tc_list, node) {
343 if (tm_node->id == node_id) {
344 *node_type = I40E_TM_NODE_TYPE_TC;
349 TAILQ_FOREACH(tm_node, queue_list, node) {
350 if (tm_node->id == node_id) {
351 *node_type = I40E_TM_NODE_TYPE_QUEUE;
360 i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id,
361 uint32_t priority, uint32_t weight,
362 struct rte_tm_node_params *params,
363 struct rte_tm_error *error)
365 if (node_id == RTE_TM_NODE_ID_NULL) {
366 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
367 error->message = "invalid node id";
372 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
373 error->message = "priority should be 0";
378 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
379 error->message = "weight must be 1";
383 /* not support shared shaper */
384 if (params->shared_shaper_id) {
385 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
386 error->message = "shared shaper not supported";
389 if (params->n_shared_shapers) {
390 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
391 error->message = "shared shaper not supported";
396 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
397 if (params->nonleaf.wfq_weight_mode) {
399 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
400 error->message = "WFQ not supported";
403 if (params->nonleaf.n_sp_priorities != 1) {
405 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
406 error->message = "SP priority not supported";
408 } else if (params->nonleaf.wfq_weight_mode &&
409 !(*params->nonleaf.wfq_weight_mode)) {
411 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
412 error->message = "WFP should be byte mode";
419 /* for TC or queue node */
420 if (params->leaf.cman) {
421 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
422 error->message = "Congestion management not supported";
425 if (params->leaf.wred.wred_profile_id !=
426 RTE_TM_WRED_PROFILE_ID_NONE) {
428 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
429 error->message = "WRED not supported";
432 if (params->leaf.wred.shared_wred_context_id) {
434 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
435 error->message = "WRED not supported";
438 if (params->leaf.wred.n_shared_wred_contexts) {
440 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
441 error->message = "WRED not supported";
449 * Now the TC and queue configuration is controlled by DCB.
450 * We need check if the node configuration follows the DCB configuration.
451 * In the future, we may use TM to cover DCB.
454 i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
455 uint32_t parent_node_id, uint32_t priority,
456 uint32_t weight, uint32_t level_id,
457 struct rte_tm_node_params *params,
458 struct rte_tm_error *error)
460 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
461 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
462 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
463 enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
464 struct i40e_tm_shaper_profile *shaper_profile;
465 struct i40e_tm_node *tm_node;
466 struct i40e_tm_node *parent_node;
470 if (!params || !error)
473 /* if already committed */
474 if (pf->tm_conf.committed) {
475 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
476 error->message = "already committed";
480 ret = i40e_node_param_check(node_id, parent_node_id, priority, weight,
485 /* check if the node ID is already used */
486 if (i40e_tm_node_search(dev, node_id, &node_type)) {
487 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
488 error->message = "node id already used";
492 /* check the shaper profile id */
493 shaper_profile = i40e_shaper_profile_search(dev,
494 params->shaper_profile_id);
495 if (!shaper_profile) {
496 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
497 error->message = "shaper profile not exist";
501 /* root node if not have a parent */
502 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
504 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
505 level_id > I40E_TM_NODE_TYPE_PORT) {
506 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
507 error->message = "Wrong level";
511 /* obviously no more than one root */
512 if (pf->tm_conf.root) {
513 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
514 error->message = "already have a root";
518 /* add the root node */
519 tm_node = rte_zmalloc("i40e_tm_node",
520 sizeof(struct i40e_tm_node),
524 tm_node->id = node_id;
525 tm_node->priority = priority;
526 tm_node->weight = weight;
527 tm_node->reference_count = 0;
528 tm_node->parent = NULL;
529 tm_node->shaper_profile = shaper_profile;
530 (void)rte_memcpy(&tm_node->params, params,
531 sizeof(struct rte_tm_node_params));
532 pf->tm_conf.root = tm_node;
534 /* increase the reference counter of the shaper profile */
535 shaper_profile->reference_count++;
540 /* TC or queue node */
541 /* check the parent node */
542 parent_node = i40e_tm_node_search(dev, parent_node_id,
545 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
546 error->message = "parent not exist";
549 if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
550 parent_node_type != I40E_TM_NODE_TYPE_TC) {
551 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
552 error->message = "parent is not port or TC";
556 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
557 level_id != parent_node_type + 1) {
558 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
559 error->message = "Wrong level";
563 /* check the node number */
564 if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
565 /* check the TC number */
566 tc_nb = i40e_tc_nb_get(dev);
567 if (pf->tm_conf.nb_tc_node >= tc_nb) {
568 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
569 error->message = "too many TCs";
573 /* check the queue number */
574 if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
575 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
576 error->message = "too many queues";
582 * For queue, the node id means queue id.
584 if (node_id >= hw->func_caps.num_tx_qp) {
585 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
586 error->message = "too large queue id";
591 /* add the TC or queue node */
592 tm_node = rte_zmalloc("i40e_tm_node",
593 sizeof(struct i40e_tm_node),
597 tm_node->id = node_id;
598 tm_node->priority = priority;
599 tm_node->weight = weight;
600 tm_node->reference_count = 0;
601 tm_node->parent = pf->tm_conf.root;
602 tm_node->shaper_profile = shaper_profile;
603 (void)rte_memcpy(&tm_node->params, params,
604 sizeof(struct rte_tm_node_params));
605 if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
606 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
608 pf->tm_conf.nb_tc_node++;
610 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
612 pf->tm_conf.nb_queue_node++;
614 tm_node->parent->reference_count++;
616 /* increase the reference counter of the shaper profile */
617 shaper_profile->reference_count++;
623 i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
624 struct rte_tm_error *error)
626 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
627 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
628 struct i40e_tm_node *tm_node;
633 /* if already committed */
634 if (pf->tm_conf.committed) {
635 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
636 error->message = "already committed";
640 if (node_id == RTE_TM_NODE_ID_NULL) {
641 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
642 error->message = "invalid node id";
646 /* check if the node id exists */
647 tm_node = i40e_tm_node_search(dev, node_id, &node_type);
649 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
650 error->message = "no such node";
654 /* the node should have no child */
655 if (tm_node->reference_count) {
656 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
658 "cannot delete a node which has children";
663 if (node_type == I40E_TM_NODE_TYPE_PORT) {
664 tm_node->shaper_profile->reference_count--;
666 pf->tm_conf.root = NULL;
670 /* TC or queue node */
671 tm_node->shaper_profile->reference_count--;
672 tm_node->parent->reference_count--;
673 if (node_type == I40E_TM_NODE_TYPE_TC) {
674 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
675 pf->tm_conf.nb_tc_node--;
677 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
678 pf->tm_conf.nb_queue_node--;