net/ixgbe: support adding TM shaper profile
[dpdk.git] / drivers / net / ixgbe / ixgbe_tm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_malloc.h>
35
36 #include "ixgbe_ethdev.h"
37
38 static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
39                                      struct rte_tm_capabilities *cap,
40                                      struct rte_tm_error *error);
41 static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
42                                     uint32_t shaper_profile_id,
43                                     struct rte_tm_shaper_params *profile,
44                                     struct rte_tm_error *error);
45
46 const struct rte_tm_ops ixgbe_tm_ops = {
47         .capabilities_get = ixgbe_tm_capabilities_get,
48         .shaper_profile_add = ixgbe_shaper_profile_add,
49 };
50
51 int
52 ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
53                  void *arg)
54 {
55         if (!arg)
56                 return -EINVAL;
57
58         *(const void **)arg = &ixgbe_tm_ops;
59
60         return 0;
61 }
62
63 void
64 ixgbe_tm_conf_init(struct rte_eth_dev *dev)
65 {
66         struct ixgbe_tm_conf *tm_conf =
67                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
68
69         /* initialize shaper profile list */
70         TAILQ_INIT(&tm_conf->shaper_profile_list);
71 }
72
73 void
74 ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
75 {
76         struct ixgbe_tm_conf *tm_conf =
77                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
78         struct ixgbe_tm_shaper_profile *shaper_profile;
79
80         /* Remove all shaper profiles */
81         while ((shaper_profile =
82                TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
83                 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
84                              shaper_profile, node);
85                 rte_free(shaper_profile);
86         }
87 }
88
89 static inline uint8_t
90 ixgbe_tc_nb_get(struct rte_eth_dev *dev)
91 {
92         struct rte_eth_conf *eth_conf;
93         uint8_t nb_tcs = 0;
94
95         eth_conf = &dev->data->dev_conf;
96         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
97                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
98         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
99                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
100                     ETH_32_POOLS)
101                         nb_tcs = ETH_4_TCS;
102                 else
103                         nb_tcs = ETH_8_TCS;
104         } else {
105                 nb_tcs = 1;
106         }
107
108         return nb_tcs;
109 }
110
111 static int
112 ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
113                           struct rte_tm_capabilities *cap,
114                           struct rte_tm_error *error)
115 {
116         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
117         uint8_t tc_nb = ixgbe_tc_nb_get(dev);
118
119         if (!cap || !error)
120                 return -EINVAL;
121
122         if (tc_nb > hw->mac.max_tx_queues)
123                 return -EINVAL;
124
125         error->type = RTE_TM_ERROR_TYPE_NONE;
126
127         /* set all the parameters to 0 first. */
128         memset(cap, 0, sizeof(struct rte_tm_capabilities));
129
130         /**
131          * here is the max capability not the current configuration.
132          */
133         /* port + TCs + queues */
134         cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
135                            hw->mac.max_tx_queues;
136         cap->n_levels_max = 3;
137         cap->non_leaf_nodes_identical = 1;
138         cap->leaf_nodes_identical = 1;
139         cap->shaper_n_max = cap->n_nodes_max;
140         cap->shaper_private_n_max = cap->n_nodes_max;
141         cap->shaper_private_dual_rate_n_max = 0;
142         cap->shaper_private_rate_min = 0;
143         /* 10Gbps -> 1.25GBps */
144         cap->shaper_private_rate_max = 1250000000ull;
145         cap->shaper_shared_n_max = 0;
146         cap->shaper_shared_n_nodes_per_shaper_max = 0;
147         cap->shaper_shared_n_shapers_per_node_max = 0;
148         cap->shaper_shared_dual_rate_n_max = 0;
149         cap->shaper_shared_rate_min = 0;
150         cap->shaper_shared_rate_max = 0;
151         cap->sched_n_children_max = hw->mac.max_tx_queues;
152         /**
153          * HW supports SP. But no plan to support it now.
154          * So, all the nodes should have the same priority.
155          */
156         cap->sched_sp_n_priorities_max = 1;
157         cap->sched_wfq_n_children_per_group_max = 0;
158         cap->sched_wfq_n_groups_max = 0;
159         /**
160          * SW only supports fair round robin now.
161          * So, all the nodes should have the same weight.
162          */
163         cap->sched_wfq_weight_max = 1;
164         cap->cman_head_drop_supported = 0;
165         cap->dynamic_update_mask = 0;
166         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
167         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
168         cap->cman_wred_context_n_max = 0;
169         cap->cman_wred_context_private_n_max = 0;
170         cap->cman_wred_context_shared_n_max = 0;
171         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
172         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
173         cap->stats_mask = 0;
174
175         return 0;
176 }
177
178 static inline struct ixgbe_tm_shaper_profile *
179 ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
180                             uint32_t shaper_profile_id)
181 {
182         struct ixgbe_tm_conf *tm_conf =
183                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
184         struct ixgbe_shaper_profile_list *shaper_profile_list =
185                 &tm_conf->shaper_profile_list;
186         struct ixgbe_tm_shaper_profile *shaper_profile;
187
188         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
189                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
190                         return shaper_profile;
191         }
192
193         return NULL;
194 }
195
196 static int
197 ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
198                                  struct rte_tm_error *error)
199 {
200         /* min rate not supported */
201         if (profile->committed.rate) {
202                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
203                 error->message = "committed rate not supported";
204                 return -EINVAL;
205         }
206         /* min bucket size not supported */
207         if (profile->committed.size) {
208                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
209                 error->message = "committed bucket size not supported";
210                 return -EINVAL;
211         }
212         /* max bucket size not supported */
213         if (profile->peak.size) {
214                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
215                 error->message = "peak bucket size not supported";
216                 return -EINVAL;
217         }
218         /* length adjustment not supported */
219         if (profile->pkt_length_adjust) {
220                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
221                 error->message = "packet length adjustment not supported";
222                 return -EINVAL;
223         }
224
225         return 0;
226 }
227
228 static int
229 ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
230                          uint32_t shaper_profile_id,
231                          struct rte_tm_shaper_params *profile,
232                          struct rte_tm_error *error)
233 {
234         struct ixgbe_tm_conf *tm_conf =
235                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
236         struct ixgbe_tm_shaper_profile *shaper_profile;
237         int ret;
238
239         if (!profile || !error)
240                 return -EINVAL;
241
242         ret = ixgbe_shaper_profile_param_check(profile, error);
243         if (ret)
244                 return ret;
245
246         shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
247
248         if (shaper_profile) {
249                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
250                 error->message = "profile ID exist";
251                 return -EINVAL;
252         }
253
254         shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
255                                      sizeof(struct ixgbe_tm_shaper_profile),
256                                      0);
257         if (!shaper_profile)
258                 return -ENOMEM;
259         shaper_profile->shaper_profile_id = shaper_profile_id;
260         (void)rte_memcpy(&shaper_profile->profile, profile,
261                          sizeof(struct rte_tm_shaper_params));
262         TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
263                           shaper_profile, node);
264
265         return 0;
266 }