1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
6 #ifndef _ETH_FAILSAFE_PRIVATE_H_
7 #define _ETH_FAILSAFE_PRIVATE_H_
10 #include <sys/queue.h>
13 #include <rte_atomic.h>
15 #include <ethdev_driver.h>
16 #include <rte_devargs.h>
18 #include <rte_interrupts.h>
20 #define FAILSAFE_DRIVER_NAME "Fail-safe PMD"
21 #define FAILSAFE_OWNER_NAME "Fail-safe"
23 #define PMD_FAILSAFE_MAC_KVARG "mac"
24 #define PMD_FAILSAFE_HOTPLUG_POLL_KVARG "hotplug_poll"
25 #define PMD_FAILSAFE_PARAM_STRING \
27 "exec(<shell command>)," \
33 #define FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS 2000
35 #define FAILSAFE_MAX_ETHPORTS 2
36 #define FAILSAFE_MAX_ETHADDR 128
38 #define DEVARGS_MAXLEN 4096
40 enum rxp_service_state {
50 /* epoll file descriptor */
52 /* event vector to be used by epoll */
53 struct rte_epoll_event *evec;
58 enum rxp_service_state sstate;
61 #define FS_RX_PROXY_INIT (struct rx_proxy){ \
66 .sstate = SS_NO_SERVICE, \
72 /* next sub_device to poll */
73 struct sub_device *sdev;
74 unsigned int socket_id;
76 unsigned int enable_events:1;
77 struct rte_eth_rxq_info info;
78 rte_atomic64_t refcnt[];
84 unsigned int socket_id;
85 struct rte_eth_txq_info info;
86 rte_atomic64_t refcnt[];
90 TAILQ_ENTRY(rte_flow) next;
92 struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS];
93 /* flow description for synchronization */
94 struct rte_flow_conv_rule rule;
107 struct rte_eth_stats stats;
112 * Allocated in shared memory.
115 /* Exhaustive DPDK device description */
116 struct sub_device *next;
117 struct rte_devargs devargs;
118 struct rte_bus *bus; /* for primary process only. */
119 struct rte_device *dev; /* for primary process only. */
121 /* Device state machine */
122 enum dev_state state;
123 /* Last stats snapshot passed to user */
124 struct fs_stats stats_snapshot;
125 /* Some device are defined as a command line */
127 /* Others are retrieved through a file descriptor */
129 /* fail-safe device backreference */
130 uint16_t fs_port_id; /* shared between processes */
131 /* sub device port id*/
132 uint16_t sdev_port_id; /* shared between processes */
133 /* flag calling for recollection */
134 volatile unsigned int remove:1;
135 /* flow isolation state */
137 /* RMV callback registration state */
138 unsigned int rmv_callback:1;
139 /* LSC callback registration state */
140 unsigned int lsc_callback:1;
144 * This is referenced by eth_dev->data->dev_private
145 * This is shared between processes.
148 struct rte_eth_dev_data *data; /* backreference to shared data. */
150 * Set of sub_devices.
151 * subs[0] is the preferred device
152 * any other is just another sub device
154 struct sub_device *subs; /* shared between processes */
155 uint8_t subs_head; /* if head == tail, no subs */
156 uint8_t subs_tail; /* first invalid */
157 uint8_t subs_tx; /* current emitting device */
158 uint8_t current_probed;
160 TAILQ_HEAD(sub_flows, rte_flow) flow_list;
161 /* current number of mac_addr slots allocated. */
162 uint32_t nb_mac_addr;
163 struct rte_ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR];
164 uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
165 uint32_t nb_mcast_addr;
166 struct rte_ether_addr *mcast_addrs;
167 /* current capabilities */
168 struct rte_eth_dev_owner my_owner; /* Unique owner. */
169 struct rte_intr_handle intr_handle; /* Port interrupt handle. */
171 * Fail-safe state machine.
172 * This level will be tracking state of the EAL and eth
173 * layer at large as defined by the user application.
174 * It will then steer the sub_devices toward the same
175 * synchronized state.
177 enum dev_state state;
178 struct rte_eth_stats stats_accumulator;
180 * Rx interrupts/events proxy.
181 * The PMD issues Rx events to the EAL on behalf of its subdevices,
182 * it does that by registering an event-fd for each of its queues with
183 * the EAL. A PMD service thread listens to all the Rx events from the
184 * subdevices, when an Rx event is issued by a subdevice it will be
185 * caught by this service with will trigger an Rx event in the
186 * appropriate failsafe Rx queue.
189 pthread_mutex_t hotplug_mutex;
190 /* Hot-plug mutex is locked by the alarm mechanism. */
191 volatile unsigned int alarm_lock:1;
192 unsigned int pending_alarm:1; /* An alarm is pending */
193 /* flow isolation state */
199 int failsafe_rx_intr_install(struct rte_eth_dev *dev);
200 void failsafe_rx_intr_uninstall(struct rte_eth_dev *dev);
201 int failsafe_rx_intr_install_subdevice(struct sub_device *sdev);
202 void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev);
206 int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev);
207 int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev);
211 void failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe);
213 uint16_t failsafe_rx_burst(void *rxq,
214 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
215 uint16_t failsafe_tx_burst(void *txq,
216 struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
218 uint16_t failsafe_rx_burst_fast(void *rxq,
219 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
220 uint16_t failsafe_tx_burst_fast(void *txq,
221 struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
225 int failsafe_args_parse(struct rte_eth_dev *dev, const char *params);
226 void failsafe_args_free(struct rte_eth_dev *dev);
227 int failsafe_args_count_subdevice(struct rte_eth_dev *dev, const char *params);
228 int failsafe_args_parse_subs(struct rte_eth_dev *dev);
232 int failsafe_eal_init(struct rte_eth_dev *dev);
233 int failsafe_eal_uninit(struct rte_eth_dev *dev);
237 int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
238 void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev);
239 int failsafe_eth_dev_close(struct rte_eth_dev *dev);
240 void failsafe_dev_remove(struct rte_eth_dev *dev);
241 void failsafe_stats_increment(struct rte_eth_stats *to,
242 struct rte_eth_stats *from);
243 int failsafe_eth_rmv_event_callback(uint16_t port_id,
244 enum rte_eth_event_type type,
245 void *arg, void *out);
246 int failsafe_eth_lsc_event_callback(uint16_t port_id,
247 enum rte_eth_event_type event,
248 void *cb_arg, void *out);
249 int failsafe_eth_new_event_callback(uint16_t port_id,
250 enum rte_eth_event_type event,
251 void *cb_arg, void *out);
255 extern const char pmd_failsafe_driver_name[];
256 extern const struct eth_dev_ops failsafe_ops;
257 extern const struct rte_flow_ops fs_flow_ops;
258 extern uint64_t failsafe_hotplug_poll;
259 extern int failsafe_mac_from_arg;
263 /* dev: (struct rte_eth_dev *) fail-safe device */
265 ((struct fs_priv *)(dev)->data->dev_private)
267 /* sdev: (struct sub_device *) */
269 ((sdev)->sdev_port_id == RTE_MAX_ETHPORTS ? \
270 NULL : &rte_eth_devices[(sdev)->sdev_port_id])
272 /* sdev: (struct sub_device *) */
273 #define PORT_ID(sdev) \
274 ((sdev)->sdev_port_id)
276 /* sdev: (struct sub_device *) */
277 #define SUB_ID(sdev) \
281 * Stateful iterator construct over fail-safe sub-devices:
282 * s: (struct sub_device *), iterator
283 * i: (uint8_t), increment
284 * dev: (struct rte_eth_dev *), fail-safe ethdev
285 * state: (enum dev_state), minimum acceptable device state
287 #define FOREACH_SUBDEV_STATE(s, i, dev, state) \
288 for (s = fs_find_next((dev), 0, state, &i); \
290 s = fs_find_next((dev), i + 1, state, &i))
293 * Iterator construct over fail-safe sub-devices:
294 * s: (struct sub_device *), iterator
295 * i: (uint8_t), increment
296 * dev: (struct rte_eth_dev *), fail-safe ethdev
298 #define FOREACH_SUBDEV(s, i, dev) \
299 FOREACH_SUBDEV_STATE(s, i, dev, DEV_UNDEFINED)
301 /* dev: (struct rte_eth_dev *) fail-safe device */
302 #define PREFERRED_SUBDEV(dev) \
303 (&PRIV(dev)->subs[0])
305 /* dev: (struct rte_eth_dev *) fail-safe device */
306 #define TX_SUBDEV(dev) \
307 (PRIV(dev)->subs_tx >= PRIV(dev)->subs_tail ? NULL \
308 : (PRIV(dev)->subs[PRIV(dev)->subs_tx].state < DEV_PROBED ? NULL \
309 : &PRIV(dev)->subs[PRIV(dev)->subs_tx]))
312 * s: (struct sub_device *)
313 * ops: (struct eth_dev_ops) member
315 #define SUBOPS(s, ops) \
316 (ETH(s)->dev_ops->ops)
323 * a: (rte_atomic64_t)
325 #define FS_ATOMIC_P(a) \
326 rte_atomic64_set(&(a), 1)
329 * a: (rte_atomic64_t)
331 #define FS_ATOMIC_V(a) \
332 rte_atomic64_set(&(a), 0)
335 * s: (struct sub_device *)
338 #define FS_ATOMIC_RX(s, i) \
341 (fs_dev(s)->data->rx_queues[i]))->refcnt[(s)->sid])
343 * s: (struct sub_device *)
346 #define FS_ATOMIC_TX(s, i) \
349 (fs_dev(s)->data->tx_queues[i]))->refcnt[(s)->sid])
351 #ifdef RTE_EXEC_ENV_FREEBSD
352 #define FS_THREADID_TYPE void*
353 #define FS_THREADID_FMT "p"
355 #define FS_THREADID_TYPE unsigned long
356 #define FS_THREADID_FMT "lu"
359 extern int failsafe_logtype;
361 #define LOG__(l, m, ...) \
362 rte_log(RTE_LOG_ ## l, failsafe_logtype, \
363 "net_failsafe: " m "%c", __VA_ARGS__)
365 #define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
366 #define DEBUG(...) LOG_(DEBUG, __VA_ARGS__)
367 #define INFO(...) LOG_(INFO, __VA_ARGS__)
368 #define WARN(...) LOG_(WARNING, __VA_ARGS__)
369 #define ERROR(...) LOG_(ERR, __VA_ARGS__)
371 /* inlined functions */
373 static inline struct sub_device *
374 fs_find_next(struct rte_eth_dev *dev,
376 enum dev_state min_state,
379 struct sub_device *subs;
382 subs = PRIV(dev)->subs;
383 tail = PRIV(dev)->subs_tail;
385 if (subs[sid].state >= min_state)
395 static inline struct rte_eth_dev *
396 fs_dev(struct sub_device *sdev) {
397 return &rte_eth_devices[sdev->fs_port_id];
401 * Lock hot-plug mutex.
402 * is_alarm means that the caller is, for sure, the hot-plug alarm mechanism.
405 fs_lock(struct rte_eth_dev *dev, unsigned int is_alarm)
410 ret = pthread_mutex_trylock(&PRIV(dev)->hotplug_mutex);
412 DEBUG("Hot-plug mutex lock trying failed(%s), will try"
413 " again later...", strerror(ret));
416 PRIV(dev)->alarm_lock = 1;
418 ret = pthread_mutex_lock(&PRIV(dev)->hotplug_mutex);
420 ERROR("Cannot lock mutex(%s)", strerror(ret));
428 * Unlock hot-plug mutex.
429 * is_alarm means that the caller is, for sure, the hot-plug alarm mechanism.
432 fs_unlock(struct rte_eth_dev *dev, unsigned int is_alarm)
437 RTE_ASSERT(PRIV(dev)->alarm_lock == 1);
438 PRIV(dev)->alarm_lock = 0;
440 ret = pthread_mutex_unlock(&PRIV(dev)->hotplug_mutex);
442 ERROR("Cannot unlock hot-plug mutex(%s)", strerror(ret));
446 * Switch emitting device.
447 * If banned is set, banned must not be considered for
448 * the role of emitting device.
451 fs_switch_dev(struct rte_eth_dev *dev,
452 struct sub_device *banned)
454 struct sub_device *txd;
455 enum dev_state req_state;
457 req_state = PRIV(dev)->state;
458 txd = TX_SUBDEV(dev);
459 if (PREFERRED_SUBDEV(dev)->state >= req_state &&
460 PREFERRED_SUBDEV(dev) != banned) {
461 if (txd != PREFERRED_SUBDEV(dev) &&
463 (req_state == DEV_STARTED) ||
464 (txd && txd->state < DEV_STARTED))) {
465 DEBUG("Switching tx_dev to preferred sub_device");
466 PRIV(dev)->subs_tx = 0;
468 } else if ((txd && txd->state < req_state) ||
471 struct sub_device *sdev = NULL;
474 /* Using acceptable device */
475 FOREACH_SUBDEV_STATE(sdev, i, dev, req_state) {
478 DEBUG("Switching tx_dev to sub_device %d",
480 PRIV(dev)->subs_tx = i;
483 if (i >= PRIV(dev)->subs_tail || sdev == NULL) {
484 DEBUG("No device ready, deactivating tx_dev");
485 PRIV(dev)->subs_tx = PRIV(dev)->subs_tail;
490 failsafe_set_burst_fn(dev, 0);
495 * Adjust error value and rte_errno to the fail-safe actual error value.
498 fs_err(struct sub_device *sdev, int err)
500 /* A device removal shouldn't be reported as an error. */
501 if (sdev->remove == 1 || err == -EIO)
502 return rte_errno = 0;
505 #endif /* _ETH_FAILSAFE_PRIVATE_H_ */