4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
36 #include "ixgbe_ethdev.h"
38 static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
39 struct rte_tm_capabilities *cap,
40 struct rte_tm_error *error);
41 static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
42 uint32_t shaper_profile_id,
43 struct rte_tm_shaper_params *profile,
44 struct rte_tm_error *error);
45 static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
46 uint32_t shaper_profile_id,
47 struct rte_tm_error *error);
48 static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
49 uint32_t parent_node_id, uint32_t priority,
50 uint32_t weight, uint32_t level_id,
51 struct rte_tm_node_params *params,
52 struct rte_tm_error *error);
53 static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
54 struct rte_tm_error *error);
56 const struct rte_tm_ops ixgbe_tm_ops = {
57 .capabilities_get = ixgbe_tm_capabilities_get,
58 .shaper_profile_add = ixgbe_shaper_profile_add,
59 .shaper_profile_delete = ixgbe_shaper_profile_del,
60 .node_add = ixgbe_node_add,
61 .node_delete = ixgbe_node_delete,
65 ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
71 *(const void **)arg = &ixgbe_tm_ops;
77 ixgbe_tm_conf_init(struct rte_eth_dev *dev)
79 struct ixgbe_tm_conf *tm_conf =
80 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
82 /* initialize shaper profile list */
83 TAILQ_INIT(&tm_conf->shaper_profile_list);
85 /* initialize node configuration */
87 TAILQ_INIT(&tm_conf->queue_list);
88 TAILQ_INIT(&tm_conf->tc_list);
89 tm_conf->nb_tc_node = 0;
90 tm_conf->nb_queue_node = 0;
91 tm_conf->committed = false;
95 ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
97 struct ixgbe_tm_conf *tm_conf =
98 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
99 struct ixgbe_tm_shaper_profile *shaper_profile;
100 struct ixgbe_tm_node *tm_node;
102 /* clear node configuration */
103 while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
104 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
107 tm_conf->nb_queue_node = 0;
108 while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
109 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
112 tm_conf->nb_tc_node = 0;
114 rte_free(tm_conf->root);
115 tm_conf->root = NULL;
118 /* Remove all shaper profiles */
119 while ((shaper_profile =
120 TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
121 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
122 shaper_profile, node);
123 rte_free(shaper_profile);
127 static inline uint8_t
128 ixgbe_tc_nb_get(struct rte_eth_dev *dev)
130 struct rte_eth_conf *eth_conf;
133 eth_conf = &dev->data->dev_conf;
134 if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
135 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
136 } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
137 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
150 ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
151 struct rte_tm_capabilities *cap,
152 struct rte_tm_error *error)
154 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
155 uint8_t tc_nb = ixgbe_tc_nb_get(dev);
160 if (tc_nb > hw->mac.max_tx_queues)
163 error->type = RTE_TM_ERROR_TYPE_NONE;
165 /* set all the parameters to 0 first. */
166 memset(cap, 0, sizeof(struct rte_tm_capabilities));
169 * here is the max capability not the current configuration.
171 /* port + TCs + queues */
172 cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
173 hw->mac.max_tx_queues;
174 cap->n_levels_max = 3;
175 cap->non_leaf_nodes_identical = 1;
176 cap->leaf_nodes_identical = 1;
177 cap->shaper_n_max = cap->n_nodes_max;
178 cap->shaper_private_n_max = cap->n_nodes_max;
179 cap->shaper_private_dual_rate_n_max = 0;
180 cap->shaper_private_rate_min = 0;
181 /* 10Gbps -> 1.25GBps */
182 cap->shaper_private_rate_max = 1250000000ull;
183 cap->shaper_shared_n_max = 0;
184 cap->shaper_shared_n_nodes_per_shaper_max = 0;
185 cap->shaper_shared_n_shapers_per_node_max = 0;
186 cap->shaper_shared_dual_rate_n_max = 0;
187 cap->shaper_shared_rate_min = 0;
188 cap->shaper_shared_rate_max = 0;
189 cap->sched_n_children_max = hw->mac.max_tx_queues;
191 * HW supports SP. But no plan to support it now.
192 * So, all the nodes should have the same priority.
194 cap->sched_sp_n_priorities_max = 1;
195 cap->sched_wfq_n_children_per_group_max = 0;
196 cap->sched_wfq_n_groups_max = 0;
198 * SW only supports fair round robin now.
199 * So, all the nodes should have the same weight.
201 cap->sched_wfq_weight_max = 1;
202 cap->cman_head_drop_supported = 0;
203 cap->dynamic_update_mask = 0;
204 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
205 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
206 cap->cman_wred_context_n_max = 0;
207 cap->cman_wred_context_private_n_max = 0;
208 cap->cman_wred_context_shared_n_max = 0;
209 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
210 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
216 static inline struct ixgbe_tm_shaper_profile *
217 ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
218 uint32_t shaper_profile_id)
220 struct ixgbe_tm_conf *tm_conf =
221 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
222 struct ixgbe_shaper_profile_list *shaper_profile_list =
223 &tm_conf->shaper_profile_list;
224 struct ixgbe_tm_shaper_profile *shaper_profile;
226 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
227 if (shaper_profile_id == shaper_profile->shaper_profile_id)
228 return shaper_profile;
235 ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
236 struct rte_tm_error *error)
238 /* min rate not supported */
239 if (profile->committed.rate) {
240 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
241 error->message = "committed rate not supported";
244 /* min bucket size not supported */
245 if (profile->committed.size) {
246 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
247 error->message = "committed bucket size not supported";
250 /* max bucket size not supported */
251 if (profile->peak.size) {
252 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
253 error->message = "peak bucket size not supported";
256 /* length adjustment not supported */
257 if (profile->pkt_length_adjust) {
258 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
259 error->message = "packet length adjustment not supported";
267 ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
268 uint32_t shaper_profile_id,
269 struct rte_tm_shaper_params *profile,
270 struct rte_tm_error *error)
272 struct ixgbe_tm_conf *tm_conf =
273 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
274 struct ixgbe_tm_shaper_profile *shaper_profile;
277 if (!profile || !error)
280 ret = ixgbe_shaper_profile_param_check(profile, error);
284 shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
286 if (shaper_profile) {
287 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
288 error->message = "profile ID exist";
292 shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
293 sizeof(struct ixgbe_tm_shaper_profile),
297 shaper_profile->shaper_profile_id = shaper_profile_id;
298 (void)rte_memcpy(&shaper_profile->profile, profile,
299 sizeof(struct rte_tm_shaper_params));
300 TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
301 shaper_profile, node);
307 ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
308 uint32_t shaper_profile_id,
309 struct rte_tm_error *error)
311 struct ixgbe_tm_conf *tm_conf =
312 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
313 struct ixgbe_tm_shaper_profile *shaper_profile;
318 shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
320 if (!shaper_profile) {
321 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
322 error->message = "profile ID not exist";
326 /* don't delete a profile if it's used by one or several nodes */
327 if (shaper_profile->reference_count) {
328 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
329 error->message = "profile in use";
333 TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
334 rte_free(shaper_profile);
339 static inline struct ixgbe_tm_node *
340 ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
341 enum ixgbe_tm_node_type *node_type)
343 struct ixgbe_tm_conf *tm_conf =
344 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
345 struct ixgbe_tm_node *tm_node;
347 if (tm_conf->root && tm_conf->root->id == node_id) {
348 *node_type = IXGBE_TM_NODE_TYPE_PORT;
349 return tm_conf->root;
352 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
353 if (tm_node->id == node_id) {
354 *node_type = IXGBE_TM_NODE_TYPE_TC;
359 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
360 if (tm_node->id == node_id) {
361 *node_type = IXGBE_TM_NODE_TYPE_QUEUE;
370 ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
371 uint16_t *base, uint16_t *nb)
373 uint8_t nb_tcs = ixgbe_tc_nb_get(dev);
374 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
375 uint16_t vf_num = pci_dev->max_vfs;
384 if (vf_num >= ETH_32_POOLS) {
387 } else if (vf_num >= ETH_16_POOLS) {
397 *base = vf_num * nb_tcs + tc_node_no;
401 if (nb_tcs == ETH_8_TCS) {
402 switch (tc_node_no) {
439 switch (tc_node_no) {
441 * If no VF and no DCB, only 64 queues can be used.
442 * This case also be covered by this "case 0".
468 ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
469 uint32_t priority, uint32_t weight,
470 struct rte_tm_node_params *params,
471 struct rte_tm_error *error)
473 if (node_id == RTE_TM_NODE_ID_NULL) {
474 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
475 error->message = "invalid node id";
480 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
481 error->message = "priority should be 0";
486 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
487 error->message = "weight must be 1";
491 /* not support shared shaper */
492 if (params->shared_shaper_id) {
493 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
494 error->message = "shared shaper not supported";
497 if (params->n_shared_shapers) {
498 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
499 error->message = "shared shaper not supported";
504 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
505 /* check the unsupported parameters */
506 if (params->nonleaf.wfq_weight_mode) {
508 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
509 error->message = "WFQ not supported";
512 if (params->nonleaf.n_sp_priorities != 1) {
514 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
515 error->message = "SP priority not supported";
517 } else if (params->nonleaf.wfq_weight_mode &&
518 !(*params->nonleaf.wfq_weight_mode)) {
520 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
521 error->message = "WFP should be byte mode";
528 /* for TC or queue node */
529 /* check the unsupported parameters */
530 if (params->leaf.cman) {
531 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
532 error->message = "Congestion management not supported";
535 if (params->leaf.wred.wred_profile_id !=
536 RTE_TM_WRED_PROFILE_ID_NONE) {
538 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
539 error->message = "WRED not supported";
542 if (params->leaf.wred.shared_wred_context_id) {
544 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
545 error->message = "WRED not supported";
548 if (params->leaf.wred.n_shared_wred_contexts) {
550 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
551 error->message = "WRED not supported";
559 * Now the TC and queue configuration is controlled by DCB.
560 * We need check if the node configuration follows the DCB configuration.
561 * In the future, we may use TM to cover DCB.
564 ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
565 uint32_t parent_node_id, uint32_t priority,
566 uint32_t weight, uint32_t level_id,
567 struct rte_tm_node_params *params,
568 struct rte_tm_error *error)
570 struct ixgbe_tm_conf *tm_conf =
571 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
572 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
573 enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
574 struct ixgbe_tm_shaper_profile *shaper_profile;
575 struct ixgbe_tm_node *tm_node;
576 struct ixgbe_tm_node *parent_node;
582 if (!params || !error)
585 /* if already committed */
586 if (tm_conf->committed) {
587 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
588 error->message = "already committed";
592 ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
597 /* check if the node ID is already used */
598 if (ixgbe_tm_node_search(dev, node_id, &node_type)) {
599 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
600 error->message = "node id already used";
604 /* check the shaper profile id */
605 shaper_profile = ixgbe_shaper_profile_search(dev,
606 params->shaper_profile_id);
607 if (!shaper_profile) {
608 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
609 error->message = "shaper profile not exist";
613 /* root node if not have a parent */
614 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
616 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
617 level_id > IXGBE_TM_NODE_TYPE_PORT) {
618 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
619 error->message = "Wrong level";
623 /* obviously no more than one root */
625 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
626 error->message = "already have a root";
630 /* add the root node */
631 tm_node = rte_zmalloc("ixgbe_tm_node",
632 sizeof(struct ixgbe_tm_node),
636 tm_node->id = node_id;
637 tm_node->priority = priority;
638 tm_node->weight = weight;
639 tm_node->reference_count = 0;
641 tm_node->parent = NULL;
642 tm_node->shaper_profile = shaper_profile;
643 (void)rte_memcpy(&tm_node->params, params,
644 sizeof(struct rte_tm_node_params));
645 tm_conf->root = tm_node;
647 /* increase the reference counter of the shaper profile */
648 shaper_profile->reference_count++;
653 /* TC or queue node */
654 /* check the parent node */
655 parent_node = ixgbe_tm_node_search(dev, parent_node_id,
658 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
659 error->message = "parent not exist";
662 if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT &&
663 parent_node_type != IXGBE_TM_NODE_TYPE_TC) {
664 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
665 error->message = "parent is not port or TC";
669 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
670 level_id != parent_node_type + 1) {
671 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
672 error->message = "Wrong level";
676 /* check the node number */
677 if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
678 /* check TC number */
679 nb_tcs = ixgbe_tc_nb_get(dev);
680 if (tm_conf->nb_tc_node >= nb_tcs) {
681 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
682 error->message = "too many TCs";
686 /* check queue number */
687 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
688 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
689 error->message = "too many queues";
693 ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
694 if (parent_node->reference_count >= q_nb) {
695 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
696 error->message = "too many queues than TC supported";
702 * For queue, the node id means queue id.
704 if (node_id >= dev->data->nb_tx_queues) {
705 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
706 error->message = "too large queue id";
711 /* add the TC or queue node */
712 tm_node = rte_zmalloc("ixgbe_tm_node",
713 sizeof(struct ixgbe_tm_node),
717 tm_node->id = node_id;
718 tm_node->priority = priority;
719 tm_node->weight = weight;
720 tm_node->reference_count = 0;
721 tm_node->parent = parent_node;
722 tm_node->shaper_profile = shaper_profile;
723 (void)rte_memcpy(&tm_node->params, params,
724 sizeof(struct rte_tm_node_params));
725 if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
726 tm_node->no = parent_node->reference_count;
727 TAILQ_INSERT_TAIL(&tm_conf->tc_list,
729 tm_conf->nb_tc_node++;
731 tm_node->no = q_base + parent_node->reference_count;
732 TAILQ_INSERT_TAIL(&tm_conf->queue_list,
734 tm_conf->nb_queue_node++;
736 tm_node->parent->reference_count++;
738 /* increase the reference counter of the shaper profile */
739 shaper_profile->reference_count++;
745 ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
746 struct rte_tm_error *error)
748 struct ixgbe_tm_conf *tm_conf =
749 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
750 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
751 struct ixgbe_tm_node *tm_node;
756 /* if already committed */
757 if (tm_conf->committed) {
758 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
759 error->message = "already committed";
763 if (node_id == RTE_TM_NODE_ID_NULL) {
764 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
765 error->message = "invalid node id";
769 /* check the if the node id exists */
770 tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
772 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
773 error->message = "no such node";
777 /* the node should have no child */
778 if (tm_node->reference_count) {
779 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
781 "cannot delete a node which has children";
786 if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
787 tm_node->shaper_profile->reference_count--;
789 tm_conf->root = NULL;
793 /* TC or queue node */
794 tm_node->shaper_profile->reference_count--;
795 tm_node->parent->reference_count--;
796 if (node_type == IXGBE_TM_NODE_TYPE_TC) {
797 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
798 tm_conf->nb_tc_node--;
800 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
801 tm_conf->nb_queue_node--;