1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox.
6 #ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
7 #define _RTE_ETH_FAILSAFE_PRIVATE_H_
11 #include <rte_atomic.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_devargs.h>
15 #include <rte_interrupts.h>
17 #define FAILSAFE_DRIVER_NAME "Fail-safe PMD"
18 #define FAILSAFE_OWNER_NAME "Fail-safe"
20 #define PMD_FAILSAFE_MAC_KVARG "mac"
21 #define PMD_FAILSAFE_HOTPLUG_POLL_KVARG "hotplug_poll"
22 #define PMD_FAILSAFE_PARAM_STRING \
24 "exec(<shell command>)," \
30 #define FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS 2000
32 #define FAILSAFE_MAX_ETHPORTS 2
33 #define FAILSAFE_MAX_ETHADDR 128
35 #define DEVARGS_MAXLEN 4096
37 enum rxp_service_state {
47 /* epoll file descriptor */
49 /* event vector to be used by epoll */
50 struct rte_epoll_event *evec;
55 enum rxp_service_state sstate;
61 /* next sub_device to poll */
62 struct sub_device *sdev;
63 unsigned int socket_id;
65 unsigned int enable_events:1;
66 struct rte_eth_rxq_info info;
67 rte_atomic64_t refcnt[];
73 unsigned int socket_id;
74 struct rte_eth_txq_info info;
75 rte_atomic64_t refcnt[];
79 TAILQ_ENTRY(rte_flow) next;
81 struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS];
82 /* flow description for synchronization */
83 struct rte_flow_desc *fd;
95 struct rte_eth_stats stats;
100 /* Exhaustive DPDK device description */
101 struct sub_device *next;
102 struct rte_devargs devargs;
104 struct rte_device *dev;
105 struct rte_eth_dev *edev;
107 /* Device state machine */
108 enum dev_state state;
109 /* Last stats snapshot passed to user */
110 struct fs_stats stats_snapshot;
111 /* Some device are defined as a command line */
113 /* Others are retrieved through a file descriptor */
115 /* fail-safe device backreference */
116 struct rte_eth_dev *fs_dev;
117 /* flag calling for recollection */
118 volatile unsigned int remove:1;
119 /* flow isolation state */
124 struct rte_eth_dev *dev;
126 * Set of sub_devices.
127 * subs[0] is the preferred device
128 * any other is just another slave
130 struct sub_device *subs;
131 uint8_t subs_head; /* if head == tail, no subs */
132 uint8_t subs_tail; /* first invalid */
133 uint8_t subs_tx; /* current emitting device */
134 uint8_t current_probed;
136 TAILQ_HEAD(sub_flows, rte_flow) flow_list;
137 /* current number of mac_addr slots allocated. */
138 uint32_t nb_mac_addr;
139 struct ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR];
140 uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
141 /* current capabilities */
142 struct rte_eth_dev_info infos;
143 struct rte_eth_dev_owner my_owner; /* Unique owner. */
144 struct rte_intr_handle intr_handle; /* Port interrupt handle. */
146 * Fail-safe state machine.
147 * This level will be tracking state of the EAL and eth
148 * layer at large as defined by the user application.
149 * It will then steer the sub_devices toward the same
150 * synchronized state.
152 enum dev_state state;
153 struct rte_eth_stats stats_accumulator;
155 * Rx interrupts/events proxy.
156 * The PMD issues Rx events to the EAL on behalf of its subdevices,
157 * it does that by registering an event-fd for each of its queues with
158 * the EAL. A PMD service thread listens to all the Rx events from the
159 * subdevices, when an Rx event is issued by a subdevice it will be
160 * caught by this service with will trigger an Rx event in the
161 * appropriate failsafe Rx queue.
164 unsigned int pending_alarm:1; /* An alarm is pending */
165 /* flow isolation state */
171 int failsafe_rx_intr_install(struct rte_eth_dev *dev);
172 void failsafe_rx_intr_uninstall(struct rte_eth_dev *dev);
173 int failsafe_rx_intr_install_subdevice(struct sub_device *sdev);
174 void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev);
178 int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev);
179 int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev);
183 void set_burst_fn(struct rte_eth_dev *dev, int force_safe);
185 uint16_t failsafe_rx_burst(void *rxq,
186 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
187 uint16_t failsafe_tx_burst(void *txq,
188 struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
190 uint16_t failsafe_rx_burst_fast(void *rxq,
191 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
192 uint16_t failsafe_tx_burst_fast(void *txq,
193 struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
197 int failsafe_args_parse(struct rte_eth_dev *dev, const char *params);
198 void failsafe_args_free(struct rte_eth_dev *dev);
199 int failsafe_args_count_subdevice(struct rte_eth_dev *dev, const char *params);
200 int failsafe_args_parse_subs(struct rte_eth_dev *dev);
204 int failsafe_eal_init(struct rte_eth_dev *dev);
205 int failsafe_eal_uninit(struct rte_eth_dev *dev);
209 int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
210 void failsafe_dev_remove(struct rte_eth_dev *dev);
211 void failsafe_stats_increment(struct rte_eth_stats *to,
212 struct rte_eth_stats *from);
213 int failsafe_eth_rmv_event_callback(uint16_t port_id,
214 enum rte_eth_event_type type,
215 void *arg, void *out);
216 int failsafe_eth_lsc_event_callback(uint16_t port_id,
217 enum rte_eth_event_type event,
218 void *cb_arg, void *out);
222 extern const char pmd_failsafe_driver_name[];
223 extern const struct eth_dev_ops failsafe_ops;
224 extern const struct rte_flow_ops fs_flow_ops;
225 extern uint64_t hotplug_poll;
226 extern int mac_from_arg;
230 /* dev: (struct rte_eth_dev *) fail-safe device */
232 ((struct fs_priv *)(dev)->data->dev_private)
234 /* sdev: (struct sub_device *) */
238 /* sdev: (struct sub_device *) */
239 #define PORT_ID(sdev) \
240 (ETH(sdev)->data->port_id)
242 /* sdev: (struct sub_device *) */
243 #define SUB_ID(sdev) \
247 * Stateful iterator construct over fail-safe sub-devices:
248 * s: (struct sub_device *), iterator
249 * i: (uint8_t), increment
250 * dev: (struct rte_eth_dev *), fail-safe ethdev
251 * state: (enum dev_state), minimum acceptable device state
253 #define FOREACH_SUBDEV_STATE(s, i, dev, state) \
254 for (s = fs_find_next((dev), 0, state, &i); \
256 s = fs_find_next((dev), i + 1, state, &i))
259 * Iterator construct over fail-safe sub-devices:
260 * s: (struct sub_device *), iterator
261 * i: (uint8_t), increment
262 * dev: (struct rte_eth_dev *), fail-safe ethdev
264 #define FOREACH_SUBDEV(s, i, dev) \
265 FOREACH_SUBDEV_STATE(s, i, dev, DEV_UNDEFINED)
267 /* dev: (struct rte_eth_dev *) fail-safe device */
268 #define PREFERRED_SUBDEV(dev) \
269 (&PRIV(dev)->subs[0])
271 /* dev: (struct rte_eth_dev *) fail-safe device */
272 #define TX_SUBDEV(dev) \
273 (PRIV(dev)->subs_tx >= PRIV(dev)->subs_tail ? NULL \
274 : (PRIV(dev)->subs[PRIV(dev)->subs_tx].state < DEV_PROBED ? NULL \
275 : &PRIV(dev)->subs[PRIV(dev)->subs_tx]))
278 * s: (struct sub_device *)
279 * ops: (struct eth_dev_ops) member
281 #define SUBOPS(s, ops) \
282 (ETH(s)->dev_ops->ops)
289 * a: (rte_atomic64_t)
291 #define FS_ATOMIC_P(a) \
292 rte_atomic64_set(&(a), 1)
295 * a: (rte_atomic64_t)
297 #define FS_ATOMIC_V(a) \
298 rte_atomic64_set(&(a), 0)
301 * s: (struct sub_device *)
304 #define FS_ATOMIC_RX(s, i) \
306 &((struct rxq *)((s)->fs_dev->data->rx_queues[i]))->refcnt[(s)->sid] \
309 * s: (struct sub_device *)
312 #define FS_ATOMIC_TX(s, i) \
314 &((struct txq *)((s)->fs_dev->data->tx_queues[i]))->refcnt[(s)->sid] \
317 #define LOG__(level, m, ...) \
318 RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__)
319 #define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
320 #define DEBUG(...) LOG_(DEBUG, __VA_ARGS__)
321 #define INFO(...) LOG_(INFO, __VA_ARGS__)
322 #define WARN(...) LOG_(WARNING, __VA_ARGS__)
323 #define ERROR(...) LOG_(ERR, __VA_ARGS__)
325 /* inlined functions */
327 static inline struct sub_device *
328 fs_find_next(struct rte_eth_dev *dev,
330 enum dev_state min_state,
333 struct sub_device *subs;
336 subs = PRIV(dev)->subs;
337 tail = PRIV(dev)->subs_tail;
339 if (subs[sid].state >= min_state)
350 * Switch emitting device.
351 * If banned is set, banned must not be considered for
352 * the role of emitting device.
355 fs_switch_dev(struct rte_eth_dev *dev,
356 struct sub_device *banned)
358 struct sub_device *txd;
359 enum dev_state req_state;
361 req_state = PRIV(dev)->state;
362 txd = TX_SUBDEV(dev);
363 if (PREFERRED_SUBDEV(dev)->state >= req_state &&
364 PREFERRED_SUBDEV(dev) != banned) {
365 if (txd != PREFERRED_SUBDEV(dev) &&
367 (req_state == DEV_STARTED) ||
368 (txd && txd->state < DEV_STARTED))) {
369 DEBUG("Switching tx_dev to preferred sub_device");
370 PRIV(dev)->subs_tx = 0;
372 } else if ((txd && txd->state < req_state) ||
375 struct sub_device *sdev = NULL;
378 /* Using acceptable device */
379 FOREACH_SUBDEV_STATE(sdev, i, dev, req_state) {
382 DEBUG("Switching tx_dev to sub_device %d",
384 PRIV(dev)->subs_tx = i;
387 if (i >= PRIV(dev)->subs_tail || sdev == NULL) {
388 DEBUG("No device ready, deactivating tx_dev");
389 PRIV(dev)->subs_tx = PRIV(dev)->subs_tail;
394 set_burst_fn(dev, 0);
399 * Adjust error value and rte_errno to the fail-safe actual error value.
402 fs_err(struct sub_device *sdev, int err)
404 /* A device removal shouldn't be reported as an error. */
405 if (sdev->remove == 1 || err == -EIO)
406 return rte_errno = 0;
409 #endif /* _RTE_ETH_FAILSAFE_PRIVATE_H_ */