bond: change warning
[dpdk.git] / lib / librte_pmd_bond / rte_eth_bond_pmd.c
index 3db473b..7a17f53 100644 (file)
@@ -31,6 +31,8 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 #include <stdlib.h>
+#include <netinet/in.h>
+
 #include <rte_mbuf.h>
 #include <rte_malloc.h>
 #include <rte_ethdev.h>
@@ -48,6 +50,9 @@
 #include "rte_eth_bond_8023ad_private.h"
 
 #define REORDER_PERIOD_MS 10
+
+#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
+
 /* Table for statistics in mode 5 TLB */
 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
 
@@ -276,90 +281,109 @@ ipv6_hash(struct ipv6_hdr *ipv6_hdr)
                        (word_src_addr[3] ^ word_dst_addr[3]);
 }
 
-static uint32_t
-udp_hash(struct udp_hdr *hdr)
-{
-       return hdr->src_port ^ hdr->dst_port;
-}
-
-static inline uint16_t
-xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy)
+static inline size_t
+get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
 {
-       struct ether_hdr *eth_hdr;
-       struct udp_hdr *udp_hdr;
-       size_t eth_offset = 0;
-       uint32_t hash = 0;
+       size_t vlan_offset = 0;
 
-       if (slave_count == 1)
-               return 0;
+       if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+               struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+               vlan_offset = sizeof(struct vlan_hdr);
+               *proto = vlan_hdr->eth_proto;
 
-       switch (policy) {
-       case BALANCE_XMIT_POLICY_LAYER2:
-               eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+               if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+                       vlan_hdr = vlan_hdr + 1;
 
-               hash = ether_hash(eth_hdr);
-               hash ^= hash >> 8;
-               return hash % slave_count;
+                       *proto = vlan_hdr->eth_proto;
+                       vlan_offset += sizeof(struct vlan_hdr);
+               }
+       }
+       return vlan_offset;
+}
 
-       case BALANCE_XMIT_POLICY_LAYER23:
-               eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+uint16_t
+xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
+{
+       struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
 
-               if (buf->ol_flags & PKT_RX_VLAN_PKT)
-                       eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
-               else
-                       eth_offset = sizeof(struct ether_hdr);
+       uint32_t hash = ether_hash(eth_hdr);
 
-               if (buf->ol_flags & PKT_RX_IPV4_HDR) {
-                       struct ipv4_hdr *ipv4_hdr;
-                       ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(buf,
-                                       unsigned char *) + eth_offset);
+       return (hash ^= hash >> 8) % slave_count;
+}
 
-                       hash = ether_hash(eth_hdr) ^ ipv4_hash(ipv4_hdr);
+uint16_t
+xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
+{
+       struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+       uint16_t proto = eth_hdr->ether_type;
+       size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
+       uint32_t hash, l3hash = 0;
+
+       hash = ether_hash(eth_hdr);
+
+       if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+               struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+                               ((char *)(eth_hdr + 1) + vlan_offset);
+               l3hash = ipv4_hash(ipv4_hdr);
+
+       } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+               struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+                               ((char *)(eth_hdr + 1) + vlan_offset);
+               l3hash = ipv6_hash(ipv6_hdr);
+       }
 
-               } else {
-                       struct ipv6_hdr *ipv6_hdr;
+       hash = hash ^ l3hash;
+       hash ^= hash >> 16;
+       hash ^= hash >> 8;
 
-                       ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(buf,
-                                       unsigned char *) + eth_offset);
+       return hash % slave_count;
+}
 
-                       hash = ether_hash(eth_hdr) ^ ipv6_hash(ipv6_hdr);
+uint16_t
+xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
+{
+       struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+       uint16_t proto = eth_hdr->ether_type;
+       size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
+
+       struct udp_hdr *udp_hdr = NULL;
+       struct tcp_hdr *tcp_hdr = NULL;
+       uint32_t hash, l3hash = 0, l4hash = 0;
+
+       if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+               struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+                               ((char *)(eth_hdr + 1) + vlan_offset);
+               size_t ip_hdr_offset;
+
+               l3hash = ipv4_hash(ipv4_hdr);
+
+               ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+                               IPV4_IHL_MULTIPLIER;
+
+               if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
+                       tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
+                                       ip_hdr_offset);
+                       l4hash = HASH_L4_PORTS(tcp_hdr);
+               } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
+                       udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
+                                       ip_hdr_offset);
+                       l4hash = HASH_L4_PORTS(udp_hdr);
                }
-               break;
-
-       case BALANCE_XMIT_POLICY_LAYER34:
-               if (buf->ol_flags & PKT_RX_VLAN_PKT)
-                       eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
-               else
-                       eth_offset = sizeof(struct ether_hdr);
-
-               if (buf->ol_flags & PKT_RX_IPV4_HDR) {
-                       struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
-                                       (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
-
-                       if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
-                               udp_hdr = (struct udp_hdr *)
-                                               (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
-                                                               sizeof(struct ipv4_hdr));
-                               hash = ipv4_hash(ipv4_hdr) ^ udp_hash(udp_hdr);
-                       } else {
-                               hash = ipv4_hash(ipv4_hdr);
-                       }
-               } else {
-                       struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
-                                       (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
-
-                       if (ipv6_hdr->proto == IPPROTO_UDP) {
-                               udp_hdr = (struct udp_hdr *)
-                                               (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
-                                                               sizeof(struct ipv6_hdr));
-                               hash = ipv6_hash(ipv6_hdr) ^ udp_hash(udp_hdr);
-                       } else {
-                               hash = ipv6_hash(ipv6_hdr);
-                       }
+       } else if  (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+               struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+                               ((char *)(eth_hdr + 1) + vlan_offset);
+               l3hash = ipv6_hash(ipv6_hdr);
+
+               if (ipv6_hdr->proto == IPPROTO_TCP) {
+                       tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
+                       l4hash = HASH_L4_PORTS(tcp_hdr);
+               } else if (ipv6_hdr->proto == IPPROTO_UDP) {
+                       udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
+                       l4hash = HASH_L4_PORTS(udp_hdr);
                }
-               break;
        }
 
+       hash = l3hash ^ l4hash;
        hash ^= hash >> 16;
        hash ^= hash >> 8;
 
@@ -536,8 +560,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
        /* Populate slaves mbuf with the packets which are to be sent on it  */
        for (i = 0; i < nb_pkts; i++) {
                /* Select output slave using hash based on xmit policy */
-               op_slave_id = xmit_slave_hash(bufs[i], num_of_slaves,
-                               internals->balance_xmit_policy);
+               op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
 
                /* Populate slave mbuf arrays with mbufs for that slave */
                slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
@@ -575,7 +598,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 
        uint8_t num_of_slaves;
        uint8_t slaves[RTE_MAX_ETHPORTS];
-        /* possitions in slaves, not ID */
+        /* positions in slaves, not ID */
        uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
        uint8_t distributing_count;
 
@@ -622,8 +645,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
                /* Populate slaves mbuf with the packets which are to be sent on it */
                for (i = 0; i < nb_pkts; i++) {
                        /* Select output slave using hash based on xmit policy */
-                       op_slave_idx = xmit_slave_hash(bufs[i], distributing_count,
-                                       internals->balance_xmit_policy);
+                       op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
 
                        /* Populate slave mbuf arrays with mbufs for that slave. Use only
                         * slaves that are currently distributing. */
@@ -764,8 +786,6 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
 {
        struct ether_addr *mac_addr;
 
-       mac_addr = eth_dev->data->mac_addrs;
-
        if (eth_dev == NULL) {
                RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
                return -1;
@@ -776,6 +796,8 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
                return -1;
        }
 
+       mac_addr = eth_dev->data->mac_addrs;
+
        ether_addr_copy(mac_addr, dst_mac_addr);
        return 0;
 }
@@ -785,8 +807,6 @@ mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
 {
        struct ether_addr *mac_addr;
 
-       mac_addr = eth_dev->data->mac_addrs;
-
        if (eth_dev == NULL) {
                RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
                return -1;
@@ -797,6 +817,8 @@ mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
                return -1;
        }
 
+       mac_addr = eth_dev->data->mac_addrs;
+
        /* If new MAC is different to current MAC then update */
        if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
                memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
@@ -891,9 +913,9 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
 
                eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
                eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
-               RTE_BOND_LOG(WARNING,
+               RTE_LOG(WARNING, PMD,
                                "Using mode 4, it is necessary to do TX burst and RX burst "
-                               "at least every 100ms.");
+                               "at least every 100ms.\n");
                break;
        case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
                eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
@@ -1344,12 +1366,8 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        struct bond_dev_private *internals = dev->data->dev_private;
        struct rte_eth_stats slave_stats;
-
        int i;
 
-       /* clear bonded stats before populating from slaves */
-       memset(stats, 0, sizeof(*stats));
-
        for (i = 0; i < internals->slave_count; i++) {
                rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
 
@@ -1604,32 +1622,32 @@ bond_init(const char *name, const char *params)
        /* Parse link bonding mode */
        if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
                if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
-                               &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) {
-                       RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name);
-                       return -1;
+                               &bond_ethdev_parse_slave_mode_kvarg,
+                               &bonding_mode) != 0) {
+                       RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
+                                       name);
+                       goto parse_error;
                }
        } else {
-               RTE_LOG(ERR, EAL,
-                               "Mode must be specified only once for bonded device %s\n",
-                               name);
-               return -1;
+               RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
+                               "device %s\n", name);
+               goto parse_error;
        }
 
        /* Parse socket id to create bonding device on */
        arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
        if (arg_count == 1) {
                if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
-                               &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) {
-                       RTE_LOG(ERR, EAL,
-                                       "Invalid socket Id specified for bonded device %s\n",
-                                       name);
-                       return -1;
+                               &bond_ethdev_parse_socket_id_kvarg, &socket_id)
+                               != 0) {
+                       RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
+                                       "bonded device %s\n", name);
+                       goto parse_error;
                }
        } else if (arg_count > 1) {
-               RTE_LOG(ERR, EAL,
-                               "Socket Id can be specified only once for bonded device %s\n",
-                               name);
-               return -1;
+               RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
+                               "bonded device %s\n", name);
+               goto parse_error;
        } else {
                socket_id = rte_socket_id();
        }
@@ -1637,18 +1655,21 @@ bond_init(const char *name, const char *params)
        /* Create link bonding eth device */
        port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
        if (port_id < 0) {
-               RTE_LOG(ERR, EAL,
-                               "Failed to create socket %s in mode %u on socket %u.\n",
-                               name, bonding_mode, socket_id);
-               return -1;
+               RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
+                               "socket %u.\n", name, bonding_mode, socket_id);
+               goto parse_error;
        }
        internals = rte_eth_devices[port_id].data->dev_private;
        internals->kvlist = kvlist;
 
-       RTE_LOG(INFO, EAL,
-                       "Create bonded device %s on port %d in mode %u on socket %u.\n",
-                       name, port_id, bonding_mode, socket_id);
+       RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
+                       "socket %u.\n", name, port_id, bonding_mode, socket_id);
        return 0;
+
+parse_error:
+       rte_kvargs_free(kvlist);
+
+       return -1;
 }
 
 /* this part will resolve the slave portids after all the other pdev and vdev