X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbonding%2Frte_eth_bond_pmd.c;h=f9b7b595df8e09342656480d0cefa15b8aef4877;hb=9039c8125730;hp=f375592b782d0609578544f22465e042694a1a3a;hpb=a7c528e5d71ff3f569898d268f9de129fdfc152b;p=dpdk.git diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index f375592b78..f9b7b595df 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -2,6 +2,7 @@ * Copyright(c) 2010-2017 Intel Corporation */ #include +#include #include #include @@ -160,8 +161,7 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, uint16_t slave_port) { struct rte_eth_dev_info slave_info; struct rte_flow_error error; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); + struct bond_dev_private *internals = bond_dev->data->dev_private; const struct rte_flow_action_queue lacp_queue_conf = { .index = 0, @@ -186,7 +186,15 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, return -1; } - rte_eth_dev_info_get(slave_port, &slave_info); + ret = rte_eth_dev_info_get(slave_port, &slave_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "%s: Error during getting device (port %u) info: %s\n", + __func__, slave_port, strerror(-ret)); + + return ret; + } + if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues || slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) { RTE_BOND_LOG(ERR, @@ -201,14 +209,22 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, int bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) { struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id]; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); + struct bond_dev_private *internals = bond_dev->data->dev_private; struct rte_eth_dev_info bond_info; uint16_t idx; + int ret; /* Verify if all slaves in bonding supports flow director and */ if (internals->slave_count > 0) { - rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info); + ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "%s: Error during getting device (port %u) info: %s\n", + __func__, bond_dev->data->port_id, + strerror(-ret)); + + return ret; + } internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues; internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues; @@ -227,9 +243,7 @@ int bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) { struct rte_flow_error error; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); - + struct bond_dev_private *internals = bond_dev->data->dev_private; struct rte_flow_action_queue lacp_queue_conf = { .index = internals->mode4.dedicated_queues.rx_qid, }; @@ -257,139 +271,9 @@ bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) { return 0; } -static uint16_t -bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, - uint16_t nb_pkts) -{ - struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; - struct bond_dev_private *internals = bd_rx_q->dev_private; - uint16_t num_rx_total = 0; /* Total number of received packets */ - uint16_t slaves[RTE_MAX_ETHPORTS]; - uint16_t slave_count; - uint16_t active_slave; - uint16_t i; - - /* Copy slave list to protect against slave up/down changes during tx - * bursting */ - slave_count = internals->active_slave_count; - active_slave = internals->active_slave; - memcpy(slaves, internals->active_slaves, - sizeof(internals->active_slaves[0]) * slave_count); - - for (i = 0; i < slave_count && nb_pkts; i++) { - uint16_t num_rx_slave; - - /* Read packets from this slave */ - num_rx_slave = rte_eth_rx_burst(slaves[active_slave], - bd_rx_q->queue_id, - bufs + num_rx_total, nb_pkts); - num_rx_total += num_rx_slave; - nb_pkts -= num_rx_slave; - - if (++active_slave == slave_count) - active_slave = 0; - } - - if (++internals->active_slave >= slave_count) - internals->active_slave = 0; - - return num_rx_total; -} - -static uint16_t -bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, - uint16_t nb_bufs) -{ - struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; - struct bond_dev_private *internals = bd_tx_q->dev_private; - - uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; - uint16_t slave_count; - - uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS]; - uint16_t dist_slave_count; - - /* 2-D array to sort mbufs for transmission on each slave into */ - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs]; - /* Number of mbufs for transmission on each slave */ - uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 }; - /* Mapping array generated by hash function to map mbufs to slaves */ - uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 }; - - uint16_t slave_tx_count; - uint16_t total_tx_count = 0, total_tx_fail_count = 0; - - uint16_t i; - - if (unlikely(nb_bufs == 0)) - return 0; - - /* Copy slave list to protect against slave up/down changes during tx - * bursting */ - slave_count = internals->active_slave_count; - if (unlikely(slave_count < 1)) - return 0; - - memcpy(slave_port_ids, internals->active_slaves, - sizeof(slave_port_ids[0]) * slave_count); - - - dist_slave_count = 0; - for (i = 0; i < slave_count; i++) { - struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; - - if (ACTOR_STATE(port, DISTRIBUTING)) - dist_slave_port_ids[dist_slave_count++] = - slave_port_ids[i]; - } - - if (unlikely(dist_slave_count < 1)) - return 0; - - /* - * Populate slaves mbuf with the packets which are to be sent on it - * selecting output slave using hash based on xmit policy - */ - internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count, - bufs_slave_port_idxs); - - for (i = 0; i < nb_bufs; i++) { - /* Populate slave mbuf arrays with mbufs for that slave. */ - uint16_t slave_idx = bufs_slave_port_idxs[i]; - - slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i]; - } - - - /* Send packet burst on each slave device */ - for (i = 0; i < dist_slave_count; i++) { - if (slave_nb_bufs[i] == 0) - continue; - - slave_tx_count = rte_eth_tx_burst(dist_slave_port_ids[i], - bd_tx_q->queue_id, slave_bufs[i], - slave_nb_bufs[i]); - - total_tx_count += slave_tx_count; - - /* If tx burst fails move packets to end of bufs */ - if (unlikely(slave_tx_count < slave_nb_bufs[i])) { - int slave_tx_fail_count = slave_nb_bufs[i] - - slave_tx_count; - total_tx_fail_count += slave_tx_fail_count; - memcpy(&bufs[nb_bufs - total_tx_fail_count], - &slave_bufs[i][slave_tx_count], - slave_tx_fail_count * sizeof(bufs[0])); - } - } - - return total_tx_count; -} - - -static uint16_t -bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, - uint16_t nb_pkts) +static inline uint16_t +rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, + bool dedicated_rxq) { /* Cast to structure, containing bonded device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; @@ -406,7 +290,8 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t slave_count, idx; uint8_t collecting; /* current slave collecting status */ - const uint8_t promisc = internals->promiscuous_en; + const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id); + const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id); uint8_t subtype; uint16_t i; uint16_t j; @@ -437,28 +322,31 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* Handle slow protocol packets. */ while (j < num_rx_total) { - - /* If packet is not pure L2 and is known, skip it */ - if ((bufs[j]->packet_type & ~RTE_PTYPE_L2_ETHER) != 0) { - j++; - continue; - } - if (j + 3 < num_rx_total) rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *)); hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *); subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype; - /* Remove packet from array if it is slow packet or slave is not - * in collecting state or bonding interface is not in promiscuous - * mode and packet address does not match. */ - if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) || + /* Remove packet from array if: + * - it is slow packet but no dedicated rxq is present, + * - slave is not in collecting state, + * - bonding interface is not in promiscuous mode: + * - packet is unicast and address does not match, + * - packet is multicast and bonding interface + * is not in allmulti, + */ + if (unlikely( + (!dedicated_rxq && + is_lacp_packets(hdr->ether_type, subtype, + bufs[j])) || !collecting || (!promisc && - !rte_is_multicast_ether_addr(&hdr->d_addr) && - !rte_is_same_ether_addr(bond_mac, - &hdr->d_addr)))) { + ((rte_is_unicast_ether_addr(&hdr->d_addr) && + !rte_is_same_ether_addr(bond_mac, + &hdr->d_addr)) || + (!allmulti && + rte_is_multicast_ether_addr(&hdr->d_addr)))))) { if (hdr->ether_type == ether_type_slow_be) { bond_mode_8023ad_handle_slow_pkt( @@ -485,6 +373,20 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, return num_rx_total; } +static uint16_t +bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + return rx_burst_8023ad(queue, bufs, nb_pkts, false); +} + +static uint16_t +bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + return rx_burst_8023ad(queue, bufs, nb_pkts, true); +} + #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) uint32_t burstnumberRX; uint32_t burstnumberTX; @@ -607,7 +509,7 @@ mode6_debug(const char __attribute__((unused)) *info, strlcpy(buf, info, 16); #endif - if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) { + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset); ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String); #ifdef RTE_LIBRTE_BOND_DEBUG_ALB @@ -653,7 +555,7 @@ bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bond_mode_alb_arp_recv(eth_h, offset, internals); } #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) - else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) + else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX); #endif } @@ -811,12 +713,12 @@ burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts, vlan_offset = get_vlan_offset(eth_hdr, &proto); - if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) { + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) { struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *) ((char *)(eth_hdr + 1) + vlan_offset); l3hash = ipv4_hash(ipv4_hdr); - } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) { + } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) { struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *) ((char *)(eth_hdr + 1) + vlan_offset); l3hash = ipv6_hash(ipv6_hdr); @@ -839,8 +741,8 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, size_t vlan_offset; int i; - struct udp_hdr *udp_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_udp_hdr *udp_hdr; + struct rte_tcp_hdr *tcp_hdr; uint32_t hash, l3hash, l4hash; for (i = 0; i < nb_pkts; i++) { @@ -851,7 +753,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, l3hash = 0; l4hash = 0; - if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) { + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) { struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *) ((char *)(eth_hdr + 1) + vlan_offset); size_t ip_hdr_offset; @@ -862,11 +764,11 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) { ip_hdr_offset = (ipv4_hdr->version_ihl - & IPV4_HDR_IHL_MASK) * - IPV4_IHL_MULTIPLIER; + & RTE_IPV4_HDR_IHL_MASK) * + RTE_IPV4_IHL_MULTIPLIER; if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *) + tcp_hdr = (struct rte_tcp_hdr *) ((char *)ipv4_hdr + ip_hdr_offset); if ((size_t)tcp_hdr + sizeof(*tcp_hdr) @@ -874,7 +776,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, l4hash = HASH_L4_PORTS(tcp_hdr); } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *) + udp_hdr = (struct rte_udp_hdr *) ((char *)ipv4_hdr + ip_hdr_offset); if ((size_t)udp_hdr + sizeof(*udp_hdr) @@ -882,16 +784,16 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, l4hash = HASH_L4_PORTS(udp_hdr); } } - } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) { + } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) { struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *) ((char *)(eth_hdr + 1) + vlan_offset); l3hash = ipv6_hash(ipv6_hdr); if (ipv6_hdr->proto == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1); + tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1); l4hash = HASH_L4_PORTS(tcp_hdr); } else if (ipv6_hdr->proto == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1); + udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1); l4hash = HASH_L4_PORTS(udp_hdr); } } @@ -1216,16 +1118,13 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) return num_tx_total; } -static uint16_t -bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, - uint16_t nb_bufs) +static inline uint16_t +tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs, + uint16_t *slave_port_ids, uint16_t slave_count) { struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; struct bond_dev_private *internals = bd_tx_q->dev_private; - uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; - uint16_t slave_count; - /* Array to sort mbufs for transmission on each slave into */ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs]; /* Number of mbufs for transmission on each slave */ @@ -1238,18 +1137,6 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t i; - if (unlikely(nb_bufs == 0)) - return 0; - - /* Copy slave list to protect against slave up/down changes during tx - * bursting */ - slave_count = internals->active_slave_count; - if (unlikely(slave_count < 1)) - return 0; - - memcpy(slave_port_ids, internals->active_slaves, - sizeof(slave_port_ids[0]) * slave_count); - /* * Populate slaves mbuf with the packets which are to be sent on it * selecting output slave using hash based on xmit policy @@ -1290,7 +1177,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, } static uint16_t -bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, +bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs) { struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; @@ -1299,18 +1186,36 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; uint16_t slave_count; + if (unlikely(nb_bufs == 0)) + return 0; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting + */ + slave_count = internals->active_slave_count; + if (unlikely(slave_count < 1)) + return 0; + + memcpy(slave_port_ids, internals->active_slaves, + sizeof(slave_port_ids[0]) * slave_count); + return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids, + slave_count); +} + +static inline uint16_t +tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs, + bool dedicated_txq) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t slave_count; + uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS]; uint16_t dist_slave_count; - /* 2-D array to sort mbufs for transmission on each slave into */ - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs]; - /* Number of mbufs for transmission on each slave */ - uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 }; - /* Mapping array generated by hash function to map mbufs to slaves */ - uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 }; - uint16_t slave_tx_count; - uint16_t total_tx_count = 0, total_tx_fail_count = 0; uint16_t i; @@ -1323,6 +1228,9 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, memcpy(slave_port_ids, internals->active_slaves, sizeof(slave_port_ids[0]) * slave_count); + if (dedicated_txq) + goto skip_tx_ring; + /* Check for LACP control packets and send if available */ for (i = 0; i < slave_count; i++) { struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; @@ -1344,6 +1252,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, } } +skip_tx_ring: if (unlikely(nb_bufs == 0)) return 0; @@ -1356,53 +1265,25 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, slave_port_ids[i]; } - if (likely(dist_slave_count > 0)) { - - /* - * Populate slaves mbuf with the packets which are to be sent - * on it, selecting output slave using hash based on xmit policy - */ - internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count, - bufs_slave_port_idxs); - - for (i = 0; i < nb_bufs; i++) { - /* - * Populate slave mbuf arrays with mbufs for that - * slave - */ - uint16_t slave_idx = bufs_slave_port_idxs[i]; - - slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = - bufs[i]; - } - - - /* Send packet burst on each slave device */ - for (i = 0; i < dist_slave_count; i++) { - if (slave_nb_bufs[i] == 0) - continue; - - slave_tx_count = rte_eth_tx_burst( - dist_slave_port_ids[i], - bd_tx_q->queue_id, slave_bufs[i], - slave_nb_bufs[i]); - - total_tx_count += slave_tx_count; + if (unlikely(dist_slave_count < 1)) + return 0; - /* If tx burst fails move packets to end of bufs */ - if (unlikely(slave_tx_count < slave_nb_bufs[i])) { - int slave_tx_fail_count = slave_nb_bufs[i] - - slave_tx_count; - total_tx_fail_count += slave_tx_fail_count; + return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids, + dist_slave_count); +} - memcpy(&bufs[nb_bufs - total_tx_fail_count], - &slave_bufs[i][slave_tx_count], - slave_tx_fail_count * sizeof(bufs[0])); - } - } - } +static uint16_t +bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, + uint16_t nb_bufs) +{ + return tx_burst_8023ad(queue, bufs, nb_bufs, false); +} - return total_tx_count; +static uint16_t +bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, + uint16_t nb_bufs) +{ + return tx_burst_8023ad(queue, bufs, nb_bufs, true); } static uint16_t @@ -1735,8 +1616,7 @@ slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev, struct rte_eth_dev *slave_eth_dev) { int errval = 0; - struct bond_dev_private *internals = (struct bond_dev_private *) - bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id]; if (port->slow_pool == NULL) { @@ -1802,8 +1682,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, uint16_t q_id; struct rte_flow_error flow_error; - struct bond_dev_private *internals = (struct bond_dev_private *) - bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; /* Stop slave */ rte_eth_dev_stop(slave_eth_dev->data->port_id); @@ -2036,7 +1915,7 @@ bond_ethdev_primary_set(struct bond_dev_private *internals, } } -static void +static int bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev); static int @@ -2079,10 +1958,6 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) } } - /* If bonded device is configure in promiscuous mode then re-apply config */ - if (internals->promiscuous_en) - bond_ethdev_promiscuous_enable(eth_dev); - if (internals->mode == BONDING_MODE_8023AD) { if (internals->mode4.dedicated_queues.enabled == 1) { internals->mode4.dedicated_queues.rx_qid = @@ -2240,10 +2115,12 @@ bond_ethdev_close(struct rte_eth_dev *dev) /* forward declaration */ static int bond_ethdev_configure(struct rte_eth_dev *dev); -static void +static int bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct bond_dev_private *internals = dev->data->dev_private; + struct bond_slave_details slave; + int ret; uint16_t max_nb_rx_queues = UINT16_MAX; uint16_t max_nb_tx_queues = UINT16_MAX; @@ -2265,8 +2142,17 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) uint16_t idx; for (idx = 0; idx < internals->slave_count; idx++) { - rte_eth_dev_info_get(internals->slaves[idx].port_id, - &slave_info); + slave = internals->slaves[idx]; + ret = rte_eth_dev_info_get(slave.port_id, &slave_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "%s: Error during getting device (port %u) info: %s\n", + __func__, + slave.port_id, + strerror(-ret)); + + return ret; + } if (slave_info.max_rx_queues < max_nb_rx_queues) max_nb_rx_queues = slave_info.max_rx_queues; @@ -2312,6 +2198,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; dev_info->reta_size = internals->reta_size; + + return 0; } static int @@ -2421,8 +2309,8 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg) if (cb_arg == NULL) return; - bonded_ethdev = (struct rte_eth_dev *)cb_arg; - internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private; + bonded_ethdev = cb_arg; + internals = bonded_ethdev->data->dev_private; if (!bonded_ethdev->data->dev_started || !internals->link_status_polling_enabled) @@ -2594,25 +2482,41 @@ bond_ethdev_stats_reset(struct rte_eth_dev *dev) rte_eth_stats_reset(internals->slaves[i].port_id); } -static void +static int bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct bond_dev_private *internals = eth_dev->data->dev_private; int i; - - internals->promiscuous_en = 1; + int ret = 0; + uint16_t port_id; switch (internals->mode) { /* Promiscuous mode is propagated to all slaves */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: - for (i = 0; i < internals->slave_count; i++) - rte_eth_promiscuous_enable(internals->slaves[i].port_id); - break; - /* In mode4 promiscus mode is managed when slave is added/removed */ - case BONDING_MODE_8023AD: + case BONDING_MODE_8023AD: { + unsigned int slave_ok = 0; + + for (i = 0; i < internals->slave_count; i++) { + port_id = internals->slaves[i].port_id; + + ret = rte_eth_promiscuous_enable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to enable promiscuous mode for port %u: %s", + port_id, rte_strerror(-ret)); + else + slave_ok++; + } + /* + * Report success if operation is successful on at least + * on one slave. Otherwise return last error code. + */ + if (slave_ok > 0) + ret = 0; break; + } /* Promiscuous mode is propagated only to primary slave */ case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: @@ -2621,29 +2525,58 @@ bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev) /* Do not touch promisc when there cannot be primary ports */ if (internals->slave_count == 0) break; - rte_eth_promiscuous_enable(internals->current_primary_port); + port_id = internals->current_primary_port; + ret = rte_eth_promiscuous_enable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to enable promiscuous mode for port %u: %s", + port_id, rte_strerror(-ret)); } + + return ret; } -static void +static int bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) { struct bond_dev_private *internals = dev->data->dev_private; int i; - - internals->promiscuous_en = 0; + int ret = 0; + uint16_t port_id; switch (internals->mode) { /* Promiscuous mode is propagated to all slaves */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: - for (i = 0; i < internals->slave_count; i++) - rte_eth_promiscuous_disable(internals->slaves[i].port_id); - break; - /* In mode4 promiscus mode is set managed when slave is added/removed */ - case BONDING_MODE_8023AD: + case BONDING_MODE_8023AD: { + unsigned int slave_ok = 0; + + for (i = 0; i < internals->slave_count; i++) { + port_id = internals->slaves[i].port_id; + + if (internals->mode == BONDING_MODE_8023AD && + bond_mode_8023ad_ports[port_id].forced_rx_flags == + BOND_8023AD_FORCED_PROMISC) { + slave_ok++; + continue; + } + ret = rte_eth_promiscuous_disable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to disable promiscuous mode for port %u: %s", + port_id, rte_strerror(-ret)); + else + slave_ok++; + } + /* + * Report success if operation is successful on at least + * on one slave. Otherwise return last error code. + */ + if (slave_ok > 0) + ret = 0; break; + } /* Promiscuous mode is propagated only to primary slave */ case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: @@ -2652,7 +2585,78 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) /* Do not touch promisc when there cannot be primary ports */ if (internals->slave_count == 0) break; - rte_eth_promiscuous_disable(internals->current_primary_port); + port_id = internals->current_primary_port; + ret = rte_eth_promiscuous_disable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to disable promiscuous mode for port %u: %s", + port_id, rte_strerror(-ret)); + } + + return ret; +} + +static void +bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + int i; + + switch (internals->mode) { + /* allmulti mode is propagated to all slaves */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + case BONDING_MODE_8023AD: + for (i = 0; i < internals->slave_count; i++) { + uint16_t port_id = internals->slaves[i].port_id; + + rte_eth_allmulticast_enable(port_id); + } + break; + /* allmulti mode is propagated only to primary slave */ + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + /* Do not touch allmulti when there cannot be primary ports */ + if (internals->slave_count == 0) + break; + rte_eth_allmulticast_enable(internals->current_primary_port); + } +} + +static void +bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + int i; + + switch (internals->mode) { + /* allmulti mode is propagated to all slaves */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + case BONDING_MODE_8023AD: + for (i = 0; i < internals->slave_count; i++) { + uint16_t port_id = internals->slaves[i].port_id; + + if (internals->mode == BONDING_MODE_8023AD && + bond_mode_8023ad_ports[port_id].forced_rx_flags == + BOND_8023AD_FORCED_ALLMULTI) + continue; + rte_eth_allmulticast_disable(port_id); + } + break; + /* allmulti mode is propagated only to primary slave */ + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + /* Do not touch allmulti when there cannot be primary ports */ + if (internals->slave_count == 0) + break; + rte_eth_allmulticast_disable(internals->current_primary_port); } } @@ -3050,6 +3054,8 @@ const struct eth_dev_ops default_dev_ops = { .stats_reset = bond_ethdev_stats_reset, .promiscuous_enable = bond_ethdev_promiscuous_enable, .promiscuous_disable = bond_ethdev_promiscuous_disable, + .allmulticast_enable = bond_ethdev_allmulticast_enable, + .allmulticast_disable = bond_ethdev_allmulticast_disable, .reta_update = bond_ethdev_rss_reta_update, .reta_query = bond_ethdev_rss_reta_query, .rss_hash_update = bond_ethdev_rss_hash_update,