X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffailsafe%2Ffailsafe_private.h;h=0361cf434698d622348edc3e4c86a24b250ed576;hb=5dce9fcdb2308becb7de7470118af3eeccfe4fd7;hp=62b5e24b8217e2d57526dce5ca633cb898745de3;hpb=a46f8d584eb88feb2a05ca3459d4a00b7d7654aa;p=dpdk.git diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h index 62b5e24b82..0361cf4346 100644 --- a/drivers/net/failsafe/failsafe_private.h +++ b/drivers/net/failsafe/failsafe_private.h @@ -34,6 +34,9 @@ #ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_ #define _RTE_ETH_FAILSAFE_PRIVATE_H_ +#include + +#include #include #include #include @@ -41,11 +44,16 @@ #define FAILSAFE_DRIVER_NAME "Fail-safe PMD" #define PMD_FAILSAFE_MAC_KVARG "mac" +#define PMD_FAILSAFE_HOTPLUG_POLL_KVARG "hotplug_poll" #define PMD_FAILSAFE_PARAM_STRING \ "dev()," \ - "mac=mac_addr" \ + "exec()," \ + "mac=mac_addr," \ + "hotplug_poll=u64" \ "" +#define FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS 2000 + #define FAILSAFE_MAX_ETHPORTS 2 #define FAILSAFE_MAX_ETHADDR 128 @@ -58,6 +66,7 @@ struct rxq { uint8_t last_polled; unsigned int socket_id; struct rte_eth_rxq_info info; + rte_atomic64_t refcnt[]; }; struct txq { @@ -65,6 +74,15 @@ struct txq { uint16_t qid; unsigned int socket_id; struct rte_eth_txq_info info; + rte_atomic64_t refcnt[]; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; + /* sub_flows */ + struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS]; + /* flow description for synchronization */ + struct rte_flow_desc *fd; }; enum dev_state { @@ -81,8 +99,17 @@ struct sub_device { struct rte_bus *bus; struct rte_device *dev; struct rte_eth_dev *edev; + uint8_t sid; /* Device state machine */ enum dev_state state; + /* Some device are defined as a command line */ + char *cmdline; + /* fail-safe device backreference */ + struct rte_eth_dev *fs_dev; + /* flag calling for recollection */ + volatile unsigned int remove:1; + /* flow isolation state */ + int flow_isolated:1; }; struct fs_priv { @@ -97,36 +124,75 @@ struct fs_priv { uint8_t subs_tail; /* first invalid */ uint8_t subs_tx; /* current emitting device */ uint8_t current_probed; + /* flow mapping */ + TAILQ_HEAD(sub_flows, rte_flow) flow_list; /* current number of mac_addr slots allocated. */ uint32_t nb_mac_addr; struct ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR]; uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR]; /* current capabilities */ struct rte_eth_dev_info infos; + /* + * Fail-safe state machine. + * This level will be tracking state of the EAL and eth + * layer at large as defined by the user application. + * It will then steer the sub_devices toward the same + * synchronized state. + */ + enum dev_state state; + unsigned int pending_alarm:1; /* An alarm is pending */ + /* flow isolation state */ + int flow_isolated:1; }; +/* MISC */ + +int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev); +int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev); + /* RX / TX */ +void set_burst_fn(struct rte_eth_dev *dev, int force_safe); + uint16_t failsafe_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t failsafe_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t failsafe_rx_burst_fast(void *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t failsafe_tx_burst_fast(void *txq, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); + /* ARGS */ int failsafe_args_parse(struct rte_eth_dev *dev, const char *params); void failsafe_args_free(struct rte_eth_dev *dev); int failsafe_args_count_subdevice(struct rte_eth_dev *dev, const char *params); +int failsafe_args_parse_subs(struct rte_eth_dev *dev); /* EAL */ int failsafe_eal_init(struct rte_eth_dev *dev); int failsafe_eal_uninit(struct rte_eth_dev *dev); +/* ETH_DEV */ + +int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev); +void failsafe_dev_remove(struct rte_eth_dev *dev); +int failsafe_eth_rmv_event_callback(uint8_t port_id, + enum rte_eth_event_type type, + void *arg, void *out); +int failsafe_eth_lsc_event_callback(uint8_t port_id, + enum rte_eth_event_type event, + void *cb_arg, void *out); + /* GLOBALS */ extern const char pmd_failsafe_driver_name[]; extern const struct eth_dev_ops failsafe_ops; +extern const struct rte_flow_ops fs_flow_ops; +extern uint64_t hotplug_poll; extern int mac_from_arg; /* HELPERS */ @@ -143,6 +209,10 @@ extern int mac_from_arg; #define PORT_ID(sdev) \ (ETH(sdev)->data->port_id) +/* sdev: (struct sub_device *) */ +#define SUB_ID(sdev) \ + ((sdev)->sid) + /** * Stateful iterator construct over fail-safe sub-devices: * s: (struct sub_device *), iterator @@ -181,6 +251,39 @@ extern int mac_from_arg; #define SUBOPS(s, ops) \ (ETH(s)->dev_ops->ops) +/** + * Atomic guard + */ + +/** + * a: (rte_atomic64_t) + */ +#define FS_ATOMIC_P(a) \ + rte_atomic64_add(&(a), 1) + +/** + * a: (rte_atomic64_t) + */ +#define FS_ATOMIC_V(a) \ + rte_atomic64_sub(&(a), 1) + +/** + * s: (struct sub_device *) + * i: uint16_t qid + */ +#define FS_ATOMIC_RX(s, i) \ + rte_atomic64_read( \ + &((struct rxq *)((s)->fs_dev->data->rx_queues[i]))->refcnt[(s)->sid] \ + ) +/** + * s: (struct sub_device *) + * i: uint16_t qid + */ +#define FS_ATOMIC_TX(s, i) \ + rte_atomic64_read( \ + &((struct txq *)((s)->fs_dev->data->tx_queues[i]))->refcnt[(s)->sid] \ + ) + #define LOG__(level, m, ...) \ RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__) #define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n') @@ -205,4 +308,52 @@ fs_find_next(struct rte_eth_dev *dev, uint8_t sid, return sid; } +/* + * Switch emitting device. + * If banned is set, banned must not be considered for + * the role of emitting device. + */ +static inline void +fs_switch_dev(struct rte_eth_dev *dev, + struct sub_device *banned) +{ + struct sub_device *txd; + enum dev_state req_state; + + req_state = PRIV(dev)->state; + txd = TX_SUBDEV(dev); + if (PREFERRED_SUBDEV(dev)->state >= req_state && + PREFERRED_SUBDEV(dev) != banned) { + if (txd != PREFERRED_SUBDEV(dev) && + (txd == NULL || + (req_state == DEV_STARTED) || + (txd && txd->state < DEV_STARTED))) { + DEBUG("Switching tx_dev to preferred sub_device"); + PRIV(dev)->subs_tx = 0; + } + } else if ((txd && txd->state < req_state) || + txd == NULL || + txd == banned) { + struct sub_device *sdev; + uint8_t i; + + /* Using acceptable device */ + FOREACH_SUBDEV_STATE(sdev, i, dev, req_state) { + if (sdev == banned) + continue; + DEBUG("Switching tx_dev to sub_device %d", + i); + PRIV(dev)->subs_tx = i; + break; + } + } else if (txd && txd->state < req_state) { + DEBUG("No device ready, deactivating tx_dev"); + PRIV(dev)->subs_tx = PRIV(dev)->subs_tail; + } else { + return; + } + set_burst_fn(dev, 0); + rte_wmb(); +} + #endif /* _RTE_ETH_FAILSAFE_PRIVATE_H_ */