+static int
+slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
+{
+ struct rte_flow *flow;
+ struct rte_flow_error ferror;
+ uint16_t slave_port_id = internals->slaves[slave_id].port_id;
+
+ if (internals->flow_isolated_valid != 0) {
+ rte_eth_dev_stop(slave_port_id);
+ if (rte_flow_isolate(slave_port_id, internals->flow_isolated,
+ &ferror)) {
+ RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave"
+ " %d: %s", slave_id, ferror.message ?
+ ferror.message : "(no stated reason)");
+ return -1;
+ }
+ }
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ flow->flows[slave_id] = rte_flow_create(slave_port_id,
+ flow->rule.attr,
+ flow->rule.pattern,
+ flow->rule.actions,
+ &ferror);
+ if (flow->flows[slave_id] == NULL) {
+ RTE_BOND_LOG(ERR, "Cannot create flow for slave"
+ " %d: %s", slave_id,
+ ferror.message ? ferror.message :
+ "(no stated reason)");
+ /* Destroy successful bond flows from the slave */
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ if (flow->flows[slave_id] != NULL) {
+ rte_flow_destroy(slave_port_id,
+ flow->flows[slave_id],
+ &ferror);
+ flow->flows[slave_id] = NULL;
+ }
+ }
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void
+eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals,
+ const struct rte_eth_dev_info *di)
+{
+ struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
+
+ internals->reta_size = di->reta_size;
+
+ /* Inherit Rx offload capabilities from the first slave device */
+ internals->rx_offload_capa = di->rx_offload_capa;
+ internals->rx_queue_offload_capa = di->rx_queue_offload_capa;
+ internals->flow_type_rss_offloads = di->flow_type_rss_offloads;
+
+ /* Inherit maximum Rx packet size from the first slave device */
+ internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
+
+ /* Inherit default Rx queue settings from the first slave device */
+ memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i));
+
+ /*
+ * Turn off descriptor prefetch and writeback by default for all
+ * slave devices. Applications may tweak this setting if need be.
+ */
+ rxconf_i->rx_thresh.pthresh = 0;
+ rxconf_i->rx_thresh.hthresh = 0;
+ rxconf_i->rx_thresh.wthresh = 0;
+
+ /* Setting this to zero should effectively enable default values */
+ rxconf_i->rx_free_thresh = 0;
+
+ /* Disable deferred start by default for all slave devices */
+ rxconf_i->rx_deferred_start = 0;
+}
+
+static void
+eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals,
+ const struct rte_eth_dev_info *di)
+{
+ struct rte_eth_txconf *txconf_i = &internals->default_txconf;
+
+ /* Inherit Tx offload capabilities from the first slave device */
+ internals->tx_offload_capa = di->tx_offload_capa;
+ internals->tx_queue_offload_capa = di->tx_queue_offload_capa;
+
+ /* Inherit default Tx queue settings from the first slave device */
+ memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i));
+
+ /*
+ * Turn off descriptor prefetch and writeback by default for all
+ * slave devices. Applications may tweak this setting if need be.
+ */
+ txconf_i->tx_thresh.pthresh = 0;
+ txconf_i->tx_thresh.hthresh = 0;
+ txconf_i->tx_thresh.wthresh = 0;
+
+ /*
+ * Setting these parameters to zero assumes that default
+ * values will be configured implicitly by slave devices.
+ */
+ txconf_i->tx_free_thresh = 0;
+ txconf_i->tx_rs_thresh = 0;
+
+ /* Disable deferred start by default for all slave devices */
+ txconf_i->tx_deferred_start = 0;
+}
+
+static void
+eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,
+ const struct rte_eth_dev_info *di)
+{
+ struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
+ const struct rte_eth_rxconf *rxconf = &di->default_rxconf;
+
+ internals->rx_offload_capa &= di->rx_offload_capa;
+ internals->rx_queue_offload_capa &= di->rx_queue_offload_capa;
+ internals->flow_type_rss_offloads &= di->flow_type_rss_offloads;
+
+ /*
+ * If at least one slave device suggests enabling this
+ * setting by default, enable it for all slave devices
+ * since disabling it may not be necessarily supported.
+ */
+ if (rxconf->rx_drop_en == 1)
+ rxconf_i->rx_drop_en = 1;
+
+ /*
+ * Adding a new slave device may cause some of previously inherited
+ * offloads to be withdrawn from the internal rx_queue_offload_capa
+ * value. Thus, the new internal value of default Rx queue offloads
+ * has to be masked by rx_queue_offload_capa to make sure that only
+ * commonly supported offloads are preserved from both the previous
+ * value and the value being inhereted from the new slave device.
+ */
+ rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
+ internals->rx_queue_offload_capa;
+
+ /*
+ * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
+ * the power of 2, the lower one is GCD
+ */
+ if (internals->reta_size > di->reta_size)
+ internals->reta_size = di->reta_size;
+
+ if (!internals->max_rx_pktlen &&
+ di->max_rx_pktlen < internals->candidate_max_rx_pktlen)
+ internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
+}
+
+static void
+eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,
+ const struct rte_eth_dev_info *di)
+{
+ struct rte_eth_txconf *txconf_i = &internals->default_txconf;
+ const struct rte_eth_txconf *txconf = &di->default_txconf;
+
+ internals->tx_offload_capa &= di->tx_offload_capa;
+ internals->tx_queue_offload_capa &= di->tx_queue_offload_capa;
+
+ /*
+ * Adding a new slave device may cause some of previously inherited
+ * offloads to be withdrawn from the internal tx_queue_offload_capa
+ * value. Thus, the new internal value of default Tx queue offloads
+ * has to be masked by tx_queue_offload_capa to make sure that only
+ * commonly supported offloads are preserved from both the previous
+ * value and the value being inhereted from the new slave device.
+ */
+ txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
+ internals->tx_queue_offload_capa;
+}
+
+static void
+eth_bond_slave_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim,
+ const struct rte_eth_desc_lim *slave_desc_lim)
+{
+ memcpy(bond_desc_lim, slave_desc_lim, sizeof(*bond_desc_lim));
+}
+
+static int
+eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim,
+ const struct rte_eth_desc_lim *slave_desc_lim)
+{
+ bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max,
+ slave_desc_lim->nb_max);
+ bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min,
+ slave_desc_lim->nb_min);
+ bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align,
+ slave_desc_lim->nb_align);
+
+ if (bond_desc_lim->nb_min > bond_desc_lim->nb_max ||
+ bond_desc_lim->nb_align > bond_desc_lim->nb_max) {
+ RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits");
+ return -EINVAL;
+ }
+
+ /* Treat maximum number of segments equal to 0 as unspecified */
+ if (slave_desc_lim->nb_seg_max != 0 &&
+ (bond_desc_lim->nb_seg_max == 0 ||
+ slave_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max))
+ bond_desc_lim->nb_seg_max = slave_desc_lim->nb_seg_max;
+ if (slave_desc_lim->nb_mtu_seg_max != 0 &&
+ (bond_desc_lim->nb_mtu_seg_max == 0 ||
+ slave_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max))
+ bond_desc_lim->nb_mtu_seg_max = slave_desc_lim->nb_mtu_seg_max;
+
+ return 0;
+}
+