X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbonding%2Frte_eth_bond_pmd.c;h=6a6ed890a0c34b6bc9ea55b8ec253a6b83452656;hb=168c59cfe42b;hp=6e137a6edc1dae299245638fe50ee706106a0b14;hpb=538da7a1cad25fbdffe298c8ca76fc4dbd262d1b;p=dpdk.git diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 6e137a6edc..6a6ed890a0 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -37,15 +37,15 @@ get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto) { size_t vlan_offset = 0; - if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto || - rte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) { + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto || + rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) { struct rte_vlan_hdr *vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); vlan_offset = sizeof(struct rte_vlan_hdr); *proto = vlan_hdr->eth_proto; - if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) { + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) { vlan_hdr = vlan_hdr + 1; *proto = vlan_hdr->eth_proto; vlan_offset += sizeof(struct rte_vlan_hdr); @@ -108,7 +108,8 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs, static inline uint8_t is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf) { - const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW); + const uint16_t ether_type_slow_be = + rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW); return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) && (ethertype == ether_type_slow_be && @@ -122,7 +123,7 @@ is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf) static struct rte_flow_item_eth flow_item_eth_type_8023ad = { .dst.addr_bytes = { 0 }, .src.addr_bytes = { 0 }, - .type = RTE_BE16(ETHER_TYPE_SLOW), + .type = RTE_BE16(RTE_ETHER_TYPE_SLOW), }; static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = { @@ -159,8 +160,7 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, uint16_t slave_port) { struct rte_eth_dev_info slave_info; struct rte_flow_error error; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); + struct bond_dev_private *internals = bond_dev->data->dev_private; const struct rte_flow_action_queue lacp_queue_conf = { .index = 0, @@ -200,8 +200,7 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, int bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) { struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id]; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); + struct bond_dev_private *internals = bond_dev->data->dev_private; struct rte_eth_dev_info bond_info; uint16_t idx; @@ -226,9 +225,7 @@ int bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) { struct rte_flow_error error; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); - + struct bond_dev_private *internals = bond_dev->data->dev_private; struct rte_flow_action_queue lacp_queue_conf = { .index = internals->mode4.dedicated_queues.rx_qid, }; @@ -398,7 +395,8 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs; struct rte_ether_hdr *hdr; - const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW); + const uint16_t ether_type_slow_be = + rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW); uint16_t num_rx_total = 0; /* Total number of received packets */ uint16_t slaves[RTE_MAX_ETHPORTS]; uint16_t slave_count, idx; @@ -589,7 +587,7 @@ mode6_debug(const char __attribute__((unused)) *info, struct rte_ether_hdr *eth_h, uint16_t port, uint32_t __attribute__((unused)) *burstnumber) { - struct ipv4_hdr *ipv4_h; + struct rte_ipv4_hdr *ipv4_h; #ifdef RTE_LIBRTE_BOND_DEBUG_ALB struct rte_arp_hdr *arp_h; char dst_ip[16]; @@ -605,8 +603,8 @@ mode6_debug(const char __attribute__((unused)) *info, strlcpy(buf, info, 16); #endif - if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { - ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset); + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset); ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String); #ifdef RTE_LIBRTE_BOND_DEBUG_ALB ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String); @@ -615,7 +613,7 @@ mode6_debug(const char __attribute__((unused)) *info, update_client_stats(ipv4_h->src_addr, port, burstnumber); } #ifdef RTE_LIBRTE_BOND_DEBUG_ALB - else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { + else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset); ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String); ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String); @@ -644,14 +642,14 @@ bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) ether_type = eth_h->ether_type; offset = get_vlan_offset(eth_h, ðer_type); - if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX); #endif bond_mode_alb_arp_recv(eth_h, offset, internals); } #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) - else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) + else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX); #endif } @@ -753,13 +751,13 @@ ether_hash(struct rte_ether_hdr *eth_hdr) } static inline uint32_t -ipv4_hash(struct ipv4_hdr *ipv4_hdr) +ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr) { return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr; } static inline uint32_t -ipv6_hash(struct ipv6_hdr *ipv6_hdr) +ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr) { unaligned_uint32_t *word_src_addr = (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]); @@ -809,13 +807,13 @@ burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts, vlan_offset = get_vlan_offset(eth_hdr, &proto); - if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { - struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *) ((char *)(eth_hdr + 1) + vlan_offset); l3hash = ipv4_hash(ipv4_hdr); - } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { - struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) + } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *) ((char *)(eth_hdr + 1) + vlan_offset); l3hash = ipv6_hash(ipv6_hdr); } @@ -837,8 +835,8 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, size_t vlan_offset; int i; - struct udp_hdr *udp_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_udp_hdr *udp_hdr; + struct rte_tcp_hdr *tcp_hdr; uint32_t hash, l3hash, l4hash; for (i = 0; i < nb_pkts; i++) { @@ -849,8 +847,8 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, l3hash = 0; l4hash = 0; - if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { - struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *) ((char *)(eth_hdr + 1) + vlan_offset); size_t ip_hdr_offset; @@ -860,11 +858,11 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) { ip_hdr_offset = (ipv4_hdr->version_ihl - & IPV4_HDR_IHL_MASK) * - IPV4_IHL_MULTIPLIER; + & RTE_IPV4_HDR_IHL_MASK) * + RTE_IPV4_IHL_MULTIPLIER; if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *) + tcp_hdr = (struct rte_tcp_hdr *) ((char *)ipv4_hdr + ip_hdr_offset); if ((size_t)tcp_hdr + sizeof(*tcp_hdr) @@ -872,7 +870,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, l4hash = HASH_L4_PORTS(tcp_hdr); } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *) + udp_hdr = (struct rte_udp_hdr *) ((char *)ipv4_hdr + ip_hdr_offset); if ((size_t)udp_hdr + sizeof(*udp_hdr) @@ -880,16 +878,16 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, l4hash = HASH_L4_PORTS(udp_hdr); } } - } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { - struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) + } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *) ((char *)(eth_hdr + 1) + vlan_offset); l3hash = ipv6_hash(ipv6_hdr); if (ipv6_hdr->proto == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1); + tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1); l4hash = HASH_L4_PORTS(tcp_hdr); } else if (ipv6_hdr->proto == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1); + udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1); l4hash = HASH_L4_PORTS(udp_hdr); } } @@ -1107,7 +1105,7 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) ether_type = eth_h->ether_type; offset = get_vlan_offset(eth_h, ðer_type); - if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals); /* Change src mac in eth header */ @@ -1733,8 +1731,7 @@ slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev, struct rte_eth_dev *slave_eth_dev) { int errval = 0; - struct bond_dev_private *internals = (struct bond_dev_private *) - bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id]; if (port->slow_pool == NULL) { @@ -1800,8 +1797,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, uint16_t q_id; struct rte_flow_error flow_error; - struct bond_dev_private *internals = (struct bond_dev_private *) - bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; /* Stop slave */ rte_eth_dev_stop(slave_eth_dev->data->port_id); @@ -2252,7 +2248,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ? internals->candidate_max_rx_pktlen : - ETHER_MAX_JUMBO_FRAME_LEN; + RTE_ETHER_MAX_JUMBO_FRAME_LEN; /* Max number of tx/rx queues that the bonded device can support is the * minimum values of the bonded slaves, as all slaves must be capable @@ -2419,8 +2415,8 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg) if (cb_arg == NULL) return; - bonded_ethdev = (struct rte_eth_dev *)cb_arg; - internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private; + bonded_ethdev = cb_arg; + internals = bonded_ethdev->data->dev_private; if (!bonded_ethdev->data->dev_started || !internals->link_status_polling_enabled) @@ -3084,12 +3080,12 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) eth_dev->data->nb_tx_queues = (uint16_t)1; /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS, 0, socket_id); if (eth_dev->data->mac_addrs == NULL) { RTE_BOND_LOG(ERR, "Failed to allocate %u bytes needed to store MAC addresses", - ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS); + RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS); goto err; } @@ -3148,7 +3144,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) } vlan_filter_bmp_size = - rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1); + rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1); internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size, RTE_CACHE_LINE_SIZE); if (internals->vlan_filter_bmpmem == NULL) { @@ -3158,7 +3154,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) goto err; } - internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1, + internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1, internals->vlan_filter_bmpmem, vlan_filter_bmp_size); if (internals->vlan_filter_bmp == NULL) { RTE_BOND_LOG(ERR,