1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
6 #define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
11 #include <rte_sched.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_tm_driver.h>
15 #include "rte_eth_softnic.h"
22 PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
30 /** Parameters for the soft device (to be created) */
32 const char *name; /**< Name */
33 uint32_t flags; /**< Flags */
35 /** 0 = Access hard device though API only (potentially slower,
37 * 1 = Access hard device private data structures is allowed
38 * (potentially faster).
42 /** Traffic Management (TM) */
44 uint32_t rate; /**< Rate (bytes/second) */
45 uint32_t nb_queues; /**< Number of queues */
46 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
47 /**< Queue size per traffic class */
48 uint32_t enq_bsz; /**< Enqueue burst size */
49 uint32_t deq_bsz; /**< Dequeue burst size */
53 /** Parameters for the hard device (existing) */
55 char *name; /**< Name */
56 uint16_t tx_queue_id; /**< TX queue ID */
64 #ifndef DEFAULT_BURST_SIZE
65 #define DEFAULT_BURST_SIZE 32
68 #ifndef FLUSH_COUNT_THRESHOLD
69 #define FLUSH_COUNT_THRESHOLD (1 << 17)
72 struct default_internals {
73 struct rte_mbuf **pkts;
80 * Traffic Management (TM) Internals
83 #ifndef TM_MAX_SUBPORTS
84 #define TM_MAX_SUBPORTS 8
87 #ifndef TM_MAX_PIPES_PER_SUBPORT
88 #define TM_MAX_PIPES_PER_SUBPORT 4096
92 struct rte_sched_port_params port_params;
94 struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
96 struct rte_sched_pipe_params
97 pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
98 uint32_t n_pipe_profiles;
99 uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
104 TM_NODE_LEVEL_PORT = 0,
105 TM_NODE_LEVEL_SUBPORT,
112 /* TM Shaper Profile */
113 struct tm_shaper_profile {
114 TAILQ_ENTRY(tm_shaper_profile) node;
115 uint32_t shaper_profile_id;
117 struct rte_tm_shaper_params params;
120 TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile);
122 /* TM Shared Shaper */
123 struct tm_shared_shaper {
124 TAILQ_ENTRY(tm_shared_shaper) node;
125 uint32_t shared_shaper_id;
127 uint32_t shaper_profile_id;
130 TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper);
132 /* TM WRED Profile */
133 struct tm_wred_profile {
134 TAILQ_ENTRY(tm_wred_profile) node;
135 uint32_t wred_profile_id;
137 struct rte_tm_wred_params params;
140 TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile);
144 TAILQ_ENTRY(tm_node) node;
146 uint32_t parent_node_id;
150 struct tm_node *parent_node;
151 struct tm_shaper_profile *shaper_profile;
152 struct tm_wred_profile *wred_profile;
153 struct rte_tm_node_params params;
154 struct rte_tm_node_stats stats;
158 TAILQ_HEAD(tm_node_list, tm_node);
160 /* TM Hierarchy Specification */
161 struct tm_hierarchy {
162 struct tm_shaper_profile_list shaper_profiles;
163 struct tm_shared_shaper_list shared_shapers;
164 struct tm_wred_profile_list wred_profiles;
165 struct tm_node_list nodes;
167 uint32_t n_shaper_profiles;
168 uint32_t n_shared_shapers;
169 uint32_t n_wred_profiles;
172 uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
175 struct tm_internals {
176 /** Hierarchy specification
178 * -Hierarchy is unfrozen at init and when port is stopped.
179 * -Hierarchy is frozen on successful hierarchy commit.
180 * -Run-time hierarchy changes are not allowed, therefore it makes
181 * sense to keep the hierarchy frozen after the port is started.
183 struct tm_hierarchy h;
184 int hierarchy_frozen;
187 struct tm_params params;
190 struct rte_sched_port *sched;
191 struct rte_mbuf **pkts_enq;
192 struct rte_mbuf **pkts_deq;
193 uint32_t pkts_enq_len;
195 uint32_t flush_count;
201 struct pmd_internals {
203 struct pmd_params params;
207 struct default_internals def; /**< Default */
208 struct tm_internals tm; /**< Traffic Management */
217 struct pmd_rx_queue {
221 uint16_t rx_queue_id;
226 * Traffic Management (TM) Operation
228 extern const struct rte_tm_ops pmd_tm_ops;
231 tm_params_check(struct pmd_params *params, uint32_t hard_rate);
234 tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
237 tm_free(struct pmd_internals *p);
240 tm_start(struct pmd_internals *p);
243 tm_stop(struct pmd_internals *p);
246 tm_enabled(struct rte_eth_dev *dev)
248 struct pmd_internals *p = dev->data->dev_private;
250 return (p->params.soft.flags & PMD_FEATURE_TM);
254 tm_used(struct rte_eth_dev *dev)
256 struct pmd_internals *p = dev->data->dev_private;
258 return (p->params.soft.flags & PMD_FEATURE_TM) &&
259 p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
262 #endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */