4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
36 #include "base/i40e_prototype.h"
37 #include "i40e_ethdev.h"
39 static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
40 struct rte_tm_capabilities *cap,
41 struct rte_tm_error *error);
42 static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
43 uint32_t shaper_profile_id,
44 struct rte_tm_shaper_params *profile,
45 struct rte_tm_error *error);
46 static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
47 uint32_t shaper_profile_id,
48 struct rte_tm_error *error);
49 static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
50 uint32_t parent_node_id, uint32_t priority,
51 uint32_t weight, uint32_t level_id,
52 struct rte_tm_node_params *params,
53 struct rte_tm_error *error);
55 const struct rte_tm_ops i40e_tm_ops = {
56 .capabilities_get = i40e_tm_capabilities_get,
57 .shaper_profile_add = i40e_shaper_profile_add,
58 .shaper_profile_delete = i40e_shaper_profile_del,
59 .node_add = i40e_node_add,
63 i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
69 *(const void **)arg = &i40e_tm_ops;
75 i40e_tm_conf_init(struct rte_eth_dev *dev)
77 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
79 /* initialize shaper profile list */
80 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
82 /* initialize node configuration */
83 pf->tm_conf.root = NULL;
84 TAILQ_INIT(&pf->tm_conf.tc_list);
85 TAILQ_INIT(&pf->tm_conf.queue_list);
86 pf->tm_conf.nb_tc_node = 0;
87 pf->tm_conf.nb_queue_node = 0;
88 pf->tm_conf.committed = false;
92 i40e_tm_conf_uninit(struct rte_eth_dev *dev)
94 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
95 struct i40e_tm_shaper_profile *shaper_profile;
96 struct i40e_tm_node *tm_node;
98 /* clear node configuration */
99 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
100 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
103 pf->tm_conf.nb_queue_node = 0;
104 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
105 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
108 pf->tm_conf.nb_tc_node = 0;
109 if (pf->tm_conf.root) {
110 rte_free(pf->tm_conf.root);
111 pf->tm_conf.root = NULL;
114 /* Remove all shaper profiles */
115 while ((shaper_profile =
116 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
117 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
118 shaper_profile, node);
119 rte_free(shaper_profile);
123 static inline uint16_t
124 i40e_tc_nb_get(struct rte_eth_dev *dev)
126 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
127 struct i40e_vsi *main_vsi = pf->main_vsi;
131 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
132 if (main_vsi->enabled_tc & BIT_ULL(i))
140 i40e_tm_capabilities_get(struct rte_eth_dev *dev,
141 struct rte_tm_capabilities *cap,
142 struct rte_tm_error *error)
144 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
145 uint16_t tc_nb = i40e_tc_nb_get(dev);
150 if (tc_nb > hw->func_caps.num_tx_qp)
153 error->type = RTE_TM_ERROR_TYPE_NONE;
155 /* set all the parameters to 0 first. */
156 memset(cap, 0, sizeof(struct rte_tm_capabilities));
159 * support port + TCs + queues
160 * here shows the max capability not the current configuration.
162 cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
163 cap->n_levels_max = 3; /* port, TC, queue */
164 cap->non_leaf_nodes_identical = 1;
165 cap->leaf_nodes_identical = 1;
166 cap->shaper_n_max = cap->n_nodes_max;
167 cap->shaper_private_n_max = cap->n_nodes_max;
168 cap->shaper_private_dual_rate_n_max = 0;
169 cap->shaper_private_rate_min = 0;
170 /* 40Gbps -> 5GBps */
171 cap->shaper_private_rate_max = 5000000000ull;
172 cap->shaper_shared_n_max = 0;
173 cap->shaper_shared_n_nodes_per_shaper_max = 0;
174 cap->shaper_shared_n_shapers_per_node_max = 0;
175 cap->shaper_shared_dual_rate_n_max = 0;
176 cap->shaper_shared_rate_min = 0;
177 cap->shaper_shared_rate_max = 0;
178 cap->sched_n_children_max = hw->func_caps.num_tx_qp;
180 * HW supports SP. But no plan to support it now.
181 * So, all the nodes should have the same priority.
183 cap->sched_sp_n_priorities_max = 1;
184 cap->sched_wfq_n_children_per_group_max = 0;
185 cap->sched_wfq_n_groups_max = 0;
187 * SW only supports fair round robin now.
188 * So, all the nodes should have the same weight.
190 cap->sched_wfq_weight_max = 1;
191 cap->cman_head_drop_supported = 0;
192 cap->dynamic_update_mask = 0;
193 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
194 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
195 cap->cman_wred_context_n_max = 0;
196 cap->cman_wred_context_private_n_max = 0;
197 cap->cman_wred_context_shared_n_max = 0;
198 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
199 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
205 static inline struct i40e_tm_shaper_profile *
206 i40e_shaper_profile_search(struct rte_eth_dev *dev,
207 uint32_t shaper_profile_id)
209 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
210 struct i40e_shaper_profile_list *shaper_profile_list =
211 &pf->tm_conf.shaper_profile_list;
212 struct i40e_tm_shaper_profile *shaper_profile;
214 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
215 if (shaper_profile_id == shaper_profile->shaper_profile_id)
216 return shaper_profile;
223 i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
224 struct rte_tm_error *error)
226 /* min rate not supported */
227 if (profile->committed.rate) {
228 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
229 error->message = "committed rate not supported";
232 /* min bucket size not supported */
233 if (profile->committed.size) {
234 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
235 error->message = "committed bucket size not supported";
238 /* max bucket size not supported */
239 if (profile->peak.size) {
240 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
241 error->message = "peak bucket size not supported";
244 /* length adjustment not supported */
245 if (profile->pkt_length_adjust) {
246 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
247 error->message = "packet length adjustment not supported";
255 i40e_shaper_profile_add(struct rte_eth_dev *dev,
256 uint32_t shaper_profile_id,
257 struct rte_tm_shaper_params *profile,
258 struct rte_tm_error *error)
260 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
261 struct i40e_tm_shaper_profile *shaper_profile;
264 if (!profile || !error)
267 ret = i40e_shaper_profile_param_check(profile, error);
271 shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
273 if (shaper_profile) {
274 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
275 error->message = "profile ID exist";
279 shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
280 sizeof(struct i40e_tm_shaper_profile),
284 shaper_profile->shaper_profile_id = shaper_profile_id;
285 (void)rte_memcpy(&shaper_profile->profile, profile,
286 sizeof(struct rte_tm_shaper_params));
287 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
288 shaper_profile, node);
294 i40e_shaper_profile_del(struct rte_eth_dev *dev,
295 uint32_t shaper_profile_id,
296 struct rte_tm_error *error)
298 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
299 struct i40e_tm_shaper_profile *shaper_profile;
304 shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
306 if (!shaper_profile) {
307 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
308 error->message = "profile ID not exist";
312 /* don't delete a profile if it's used by one or several nodes */
313 if (shaper_profile->reference_count) {
314 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
315 error->message = "profile in use";
319 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
320 rte_free(shaper_profile);
325 static inline struct i40e_tm_node *
326 i40e_tm_node_search(struct rte_eth_dev *dev,
327 uint32_t node_id, enum i40e_tm_node_type *node_type)
329 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
330 struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
331 struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
332 struct i40e_tm_node *tm_node;
334 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
335 *node_type = I40E_TM_NODE_TYPE_PORT;
336 return pf->tm_conf.root;
339 TAILQ_FOREACH(tm_node, tc_list, node) {
340 if (tm_node->id == node_id) {
341 *node_type = I40E_TM_NODE_TYPE_TC;
346 TAILQ_FOREACH(tm_node, queue_list, node) {
347 if (tm_node->id == node_id) {
348 *node_type = I40E_TM_NODE_TYPE_QUEUE;
357 i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id,
358 uint32_t priority, uint32_t weight,
359 struct rte_tm_node_params *params,
360 struct rte_tm_error *error)
362 if (node_id == RTE_TM_NODE_ID_NULL) {
363 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
364 error->message = "invalid node id";
369 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
370 error->message = "priority should be 0";
375 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
376 error->message = "weight must be 1";
380 /* not support shared shaper */
381 if (params->shared_shaper_id) {
382 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
383 error->message = "shared shaper not supported";
386 if (params->n_shared_shapers) {
387 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
388 error->message = "shared shaper not supported";
393 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
394 if (params->nonleaf.wfq_weight_mode) {
396 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
397 error->message = "WFQ not supported";
400 if (params->nonleaf.n_sp_priorities != 1) {
402 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
403 error->message = "SP priority not supported";
405 } else if (params->nonleaf.wfq_weight_mode &&
406 !(*params->nonleaf.wfq_weight_mode)) {
408 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
409 error->message = "WFP should be byte mode";
416 /* for TC or queue node */
417 if (params->leaf.cman) {
418 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
419 error->message = "Congestion management not supported";
422 if (params->leaf.wred.wred_profile_id !=
423 RTE_TM_WRED_PROFILE_ID_NONE) {
425 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
426 error->message = "WRED not supported";
429 if (params->leaf.wred.shared_wred_context_id) {
431 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
432 error->message = "WRED not supported";
435 if (params->leaf.wred.n_shared_wred_contexts) {
437 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
438 error->message = "WRED not supported";
446 * Now the TC and queue configuration is controlled by DCB.
447 * We need check if the node configuration follows the DCB configuration.
448 * In the future, we may use TM to cover DCB.
451 i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
452 uint32_t parent_node_id, uint32_t priority,
453 uint32_t weight, uint32_t level_id,
454 struct rte_tm_node_params *params,
455 struct rte_tm_error *error)
457 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
458 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
459 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
460 enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
461 struct i40e_tm_shaper_profile *shaper_profile;
462 struct i40e_tm_node *tm_node;
463 struct i40e_tm_node *parent_node;
467 if (!params || !error)
470 /* if already committed */
471 if (pf->tm_conf.committed) {
472 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
473 error->message = "already committed";
477 ret = i40e_node_param_check(node_id, parent_node_id, priority, weight,
482 /* check if the node ID is already used */
483 if (i40e_tm_node_search(dev, node_id, &node_type)) {
484 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
485 error->message = "node id already used";
489 /* check the shaper profile id */
490 shaper_profile = i40e_shaper_profile_search(dev,
491 params->shaper_profile_id);
492 if (!shaper_profile) {
493 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
494 error->message = "shaper profile not exist";
498 /* root node if not have a parent */
499 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
501 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
502 level_id > I40E_TM_NODE_TYPE_PORT) {
503 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
504 error->message = "Wrong level";
508 /* obviously no more than one root */
509 if (pf->tm_conf.root) {
510 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
511 error->message = "already have a root";
515 /* add the root node */
516 tm_node = rte_zmalloc("i40e_tm_node",
517 sizeof(struct i40e_tm_node),
521 tm_node->id = node_id;
522 tm_node->priority = priority;
523 tm_node->weight = weight;
524 tm_node->reference_count = 0;
525 tm_node->parent = NULL;
526 tm_node->shaper_profile = shaper_profile;
527 (void)rte_memcpy(&tm_node->params, params,
528 sizeof(struct rte_tm_node_params));
529 pf->tm_conf.root = tm_node;
531 /* increase the reference counter of the shaper profile */
532 shaper_profile->reference_count++;
537 /* TC or queue node */
538 /* check the parent node */
539 parent_node = i40e_tm_node_search(dev, parent_node_id,
542 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
543 error->message = "parent not exist";
546 if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
547 parent_node_type != I40E_TM_NODE_TYPE_TC) {
548 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
549 error->message = "parent is not port or TC";
553 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
554 level_id != parent_node_type + 1) {
555 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
556 error->message = "Wrong level";
560 /* check the node number */
561 if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
562 /* check the TC number */
563 tc_nb = i40e_tc_nb_get(dev);
564 if (pf->tm_conf.nb_tc_node >= tc_nb) {
565 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
566 error->message = "too many TCs";
570 /* check the queue number */
571 if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
572 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
573 error->message = "too many queues";
579 * For queue, the node id means queue id.
581 if (node_id >= hw->func_caps.num_tx_qp) {
582 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
583 error->message = "too large queue id";
588 /* add the TC or queue node */
589 tm_node = rte_zmalloc("i40e_tm_node",
590 sizeof(struct i40e_tm_node),
594 tm_node->id = node_id;
595 tm_node->priority = priority;
596 tm_node->weight = weight;
597 tm_node->reference_count = 0;
598 tm_node->parent = pf->tm_conf.root;
599 tm_node->shaper_profile = shaper_profile;
600 (void)rte_memcpy(&tm_node->params, params,
601 sizeof(struct rte_tm_node_params));
602 if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
603 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
605 pf->tm_conf.nb_tc_node++;
607 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
609 pf->tm_conf.nb_queue_node++;
611 tm_node->parent->reference_count++;
613 /* increase the reference counter of the shaper profile */
614 shaper_profile->reference_count++;