static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
static inline size_t
-get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
+get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
{
size_t vlan_offset = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto ||
- rte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) {
- struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
+ struct rte_vlan_hdr *vlan_hdr =
+ (struct rte_vlan_hdr *)(eth_hdr + 1);
- vlan_offset = sizeof(struct vlan_hdr);
+ vlan_offset = sizeof(struct rte_vlan_hdr);
*proto = vlan_hdr->eth_proto;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
vlan_hdr = vlan_hdr + 1;
*proto = vlan_hdr->eth_proto;
- vlan_offset += sizeof(struct vlan_hdr);
+ vlan_offset += sizeof(struct rte_vlan_hdr);
}
}
return vlan_offset;
static inline uint8_t
is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
{
- const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+ const uint16_t ether_type_slow_be =
+ rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
(ethertype == ether_type_slow_be &&
static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
.dst.addr_bytes = { 0 },
.src.addr_bytes = { 0 },
- .type = RTE_BE16(ETHER_TYPE_SLOW),
+ .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
};
static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
uint16_t slave_port) {
struct rte_eth_dev_info slave_info;
struct rte_flow_error error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
const struct rte_flow_action_queue lacp_queue_conf = {
.index = 0,
int
bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
struct rte_eth_dev_info bond_info;
uint16_t idx;
bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
struct rte_flow_error error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
-
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
struct rte_flow_action_queue lacp_queue_conf = {
.index = internals->mode4.dedicated_queues.rx_qid,
};
struct bond_dev_private *internals = bd_rx_q->dev_private;
struct rte_eth_dev *bonded_eth_dev =
&rte_eth_devices[internals->port_id];
- struct ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
- struct ether_hdr *hdr;
+ struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
+ struct rte_ether_hdr *hdr;
- const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+ const uint16_t ether_type_slow_be =
+ rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
uint16_t num_rx_total = 0; /* Total number of received packets */
uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t slave_count, idx;
if (j + 3 < num_rx_total)
rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
- hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
+ hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
/* Remove packet from array if it is slow packet or slave is not
if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
!collecting ||
(!promisc &&
- !is_multicast_ether_addr(&hdr->d_addr) &&
- !is_same_ether_addr(bond_mac,
+ !rte_is_multicast_ether_addr(&hdr->d_addr) &&
+ !rte_is_same_ether_addr(bond_mac,
&hdr->d_addr)))) {
if (hdr->ether_type == ether_type_slow_be) {
arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
{
switch (arp_op) {
- case ARP_OP_REQUEST:
- snprintf(buf, buf_len, "%s", "ARP Request");
+ case RTE_ARP_OP_REQUEST:
+ strlcpy(buf, "ARP Request", buf_len);
return;
- case ARP_OP_REPLY:
- snprintf(buf, buf_len, "%s", "ARP Reply");
+ case RTE_ARP_OP_REPLY:
+ strlcpy(buf, "ARP Reply", buf_len);
return;
- case ARP_OP_REVREQUEST:
- snprintf(buf, buf_len, "%s", "Reverse ARP Request");
+ case RTE_ARP_OP_REVREQUEST:
+ strlcpy(buf, "Reverse ARP Request", buf_len);
return;
- case ARP_OP_REVREPLY:
- snprintf(buf, buf_len, "%s", "Reverse ARP Reply");
+ case RTE_ARP_OP_REVREPLY:
+ strlcpy(buf, "Reverse ARP Reply", buf_len);
return;
- case ARP_OP_INVREQUEST:
- snprintf(buf, buf_len, "%s", "Peer Identify Request");
+ case RTE_ARP_OP_INVREQUEST:
+ strlcpy(buf, "Peer Identify Request", buf_len);
return;
- case ARP_OP_INVREPLY:
- snprintf(buf, buf_len, "%s", "Peer Identify Reply");
+ case RTE_ARP_OP_INVREPLY:
+ strlcpy(buf, "Peer Identify Reply", buf_len);
return;
default:
break;
}
- snprintf(buf, buf_len, "%s", "Unknown");
+ strlcpy(buf, "Unknown", buf_len);
return;
}
#endif
#endif
static void
-mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
- uint16_t port, uint32_t __attribute__((unused)) *burstnumber)
+mode6_debug(const char __attribute__((unused)) *info,
+ struct rte_ether_hdr *eth_h, uint16_t port,
+ uint32_t __attribute__((unused)) *burstnumber)
{
- struct ipv4_hdr *ipv4_h;
+ struct rte_ipv4_hdr *ipv4_h;
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
- struct arp_hdr *arp_h;
+ struct rte_arp_hdr *arp_h;
char dst_ip[16];
char ArpOp[24];
char buf[16];
strlcpy(buf, info, 16);
#endif
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
- ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+ ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
update_client_stats(ipv4_h->src_addr, port, burstnumber);
}
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
- arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
+ arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
- arp_op_name(rte_be_to_cpu_16(arp_h->arp_op),
+ arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
ArpOp, sizeof(ArpOp));
MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
}
{
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
struct bond_dev_private *internals = bd_tx_q->dev_private;
- struct ether_hdr *eth_h;
+ struct rte_ether_hdr *eth_h;
uint16_t ether_type, offset;
uint16_t nb_recv_pkts;
int i;
nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
for (i = 0; i < nb_recv_pkts; i++) {
- eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
+ eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
ether_type = eth_h->ether_type;
offset = get_vlan_offset(eth_h, ðer_type);
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
#endif
bond_mode_alb_arp_recv(eth_h, offset, internals);
}
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
- else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
#endif
}
}
static inline uint16_t
-ether_hash(struct ether_hdr *eth_hdr)
+ether_hash(struct rte_ether_hdr *eth_hdr)
{
unaligned_uint16_t *word_src_addr =
(unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
}
static inline uint32_t
-ipv4_hash(struct ipv4_hdr *ipv4_hdr)
+ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
{
return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
}
static inline uint32_t
-ipv6_hash(struct ipv6_hdr *ipv6_hdr)
+ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
{
unaligned_uint32_t *word_src_addr =
(unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
uint16_t slave_count, uint16_t *slaves)
{
- struct ether_hdr *eth_hdr;
+ struct rte_ether_hdr *eth_hdr;
uint32_t hash;
int i;
for (i = 0; i < nb_pkts; i++) {
- eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
hash = ether_hash(eth_hdr);
uint16_t slave_count, uint16_t *slaves)
{
uint16_t i;
- struct ether_hdr *eth_hdr;
+ struct rte_ether_hdr *eth_hdr;
uint16_t proto;
size_t vlan_offset;
uint32_t hash, l3hash;
for (i = 0; i < nb_pkts; i++) {
- eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
l3hash = 0;
proto = eth_hdr->ether_type;
vlan_offset = get_vlan_offset(eth_hdr, &proto);
- if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
+ struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv4_hash(ipv4_hdr);
- } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
+ struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv6_hash(ipv6_hdr);
}
burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
uint16_t slave_count, uint16_t *slaves)
{
- struct ether_hdr *eth_hdr;
+ struct rte_ether_hdr *eth_hdr;
uint16_t proto;
size_t vlan_offset;
int i;
- struct udp_hdr *udp_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_udp_hdr *udp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
uint32_t hash, l3hash, l4hash;
for (i = 0; i < nb_pkts; i++) {
- eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
+ size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
proto = eth_hdr->ether_type;
vlan_offset = get_vlan_offset(eth_hdr, &proto);
l3hash = 0;
l4hash = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
+ struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
size_t ip_hdr_offset;
if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
== 0)) {
ip_hdr_offset = (ipv4_hdr->version_ihl
- & IPV4_HDR_IHL_MASK) *
- IPV4_IHL_MULTIPLIER;
+ & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER;
if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)
+ tcp_hdr = (struct rte_tcp_hdr *)
((char *)ipv4_hdr +
ip_hdr_offset);
- l4hash = HASH_L4_PORTS(tcp_hdr);
+ if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
+ < pkt_end)
+ l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv4_hdr->next_proto_id ==
IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)
+ udp_hdr = (struct rte_udp_hdr *)
((char *)ipv4_hdr +
ip_hdr_offset);
- l4hash = HASH_L4_PORTS(udp_hdr);
+ if ((size_t)udp_hdr + sizeof(*udp_hdr)
+ < pkt_end)
+ l4hash = HASH_L4_PORTS(udp_hdr);
}
}
- } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
+ struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv6_hash(ipv6_hdr);
if (ipv6_hdr->proto == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
+ tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv6_hdr->proto == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
+ udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(udp_hdr);
}
}
uint16_t num_of_slaves = internals->active_slave_count;
uint16_t slaves[RTE_MAX_ETHPORTS];
- struct ether_hdr *ether_hdr;
- struct ether_addr primary_slave_addr;
- struct ether_addr active_slave_addr;
+ struct rte_ether_hdr *ether_hdr;
+ struct rte_ether_addr primary_slave_addr;
+ struct rte_ether_addr active_slave_addr;
if (num_of_slaves < 1)
return num_tx_total;
sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
- ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
+ rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
if (nb_pkts > 3) {
for (i = 0; i < 3; i++)
if (j + 3 < nb_pkts)
rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
- ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
- if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
- ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
+ ether_hdr = rte_pktmbuf_mtod(bufs[j],
+ struct rte_ether_hdr *);
+ if (rte_is_same_ether_addr(ðer_hdr->s_addr,
+ &primary_slave_addr))
+ rte_ether_addr_copy(&active_slave_addr,
+ ðer_hdr->s_addr);
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
#endif
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
struct bond_dev_private *internals = bd_tx_q->dev_private;
- struct ether_hdr *eth_h;
+ struct rte_ether_hdr *eth_h;
uint16_t ether_type, offset;
struct client_data *client_info;
/* Search tx buffer for ARP packets and forward them to alb */
for (i = 0; i < nb_pkts; i++) {
- eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
+ eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
ether_type = eth_h->ether_type;
offset = get_vlan_offset(eth_h, ðer_type);
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
/* Change src mac in eth header */
"Failed to allocate ARP packet from pool");
continue;
}
- pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
- + client_info->vlan_count * sizeof(struct vlan_hdr);
+ pkt_size = sizeof(struct rte_ether_hdr) +
+ sizeof(struct rte_arp_hdr) +
+ client_info->vlan_count *
+ sizeof(struct rte_vlan_hdr);
upd_pkt->data_len = pkt_size;
upd_pkt->pkt_len = pkt_size;
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
/* Print TX stats including update packets */
for (j = 0; j < slave_bufs_pkts[i]; j++) {
- eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
+ eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
+ struct rte_ether_hdr *);
mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
}
#endif
}
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
for (j = 0; j < update_bufs_pkts[i]; j++) {
- eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
+ eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
+ struct rte_ether_hdr *);
mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
}
#endif
}
int
-mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
+mac_address_get(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *dst_mac_addr)
{
- struct ether_addr *mac_addr;
+ struct rte_ether_addr *mac_addr;
if (eth_dev == NULL) {
RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
mac_addr = eth_dev->data->mac_addrs;
- ether_addr_copy(mac_addr, dst_mac_addr);
+ rte_ether_addr_copy(mac_addr, dst_mac_addr);
return 0;
}
int
-mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
+mac_address_set(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *new_mac_addr)
{
- struct ether_addr *mac_addr;
+ struct rte_ether_addr *mac_addr;
if (eth_dev == NULL) {
RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
return 0;
}
-static const struct ether_addr null_mac_addr;
+static const struct rte_ether_addr null_mac_addr;
/*
* Add additional MAC addresses to the slave
uint16_t slave_port_id)
{
int i, ret;
- struct ether_addr *mac_addr;
+ struct rte_ether_addr *mac_addr;
for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
mac_addr = &bonded_eth_dev->data->mac_addrs[i];
- if (is_same_ether_addr(mac_addr, &null_mac_addr))
+ if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
break;
ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
uint16_t slave_port_id)
{
int i, rc, ret;
- struct ether_addr *mac_addr;
+ struct rte_ether_addr *mac_addr;
rc = 0;
for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
mac_addr = &bonded_eth_dev->data->mac_addrs[i];
- if (is_same_ether_addr(mac_addr, &null_mac_addr))
+ if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
break;
ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
struct rte_eth_dev *slave_eth_dev)
{
int errval = 0;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
if (port->slow_pool == NULL) {
uint16_t q_id;
struct rte_flow_error flow_error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
/* Stop slave */
rte_eth_dev_stop(slave_eth_dev->data->port_id);
slave_details->link_status_wait_to_complete = 0;
/* clean tlb_last_obytes when adding port for bonding device */
memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
- sizeof(struct ether_addr));
+ sizeof(struct rte_ether_addr));
}
void
}
if (internals->user_defined_mac == 0) {
- struct ether_addr *new_mac_addr = NULL;
+ struct rte_ether_addr *new_mac_addr = NULL;
for (i = 0; i < internals->slave_count; i++)
if (internals->slaves[i].port_id == internals->primary_port)
dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
internals->candidate_max_rx_pktlen :
- ETHER_MAX_JUMBO_FRAME_LEN;
+ RTE_ETHER_MAX_JUMBO_FRAME_LEN;
/* Max number of tx/rx queues that the bonded device can support is the
* minimum values of the bonded slaves, as all slaves must be capable
if (cb_arg == NULL)
return;
- bonded_ethdev = (struct rte_eth_dev *)cb_arg;
- internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
+ bonded_ethdev = cb_arg;
+ internals = bonded_ethdev->data->dev_private;
if (!bonded_ethdev->data->dev_started ||
!internals->link_status_polling_enabled)
}
static int
-bond_ethdev_mac_address_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
{
if (mac_address_set(dev, addr)) {
RTE_BOND_LOG(ERR, "Failed to update MAC address");
}
static int
-bond_ethdev_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- __rte_unused uint32_t index, uint32_t vmdq)
+bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ __rte_unused uint32_t index, uint32_t vmdq)
{
struct rte_eth_dev *slave_eth_dev;
struct bond_dev_private *internals = dev->data->dev_private;
goto end;
}
- struct ether_addr *mac_addr = &dev->data->mac_addrs[index];
+ struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
for (i = 0; i < internals->slave_count; i++)
rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
eth_dev->data->nb_tx_queues = (uint16_t)1;
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
BOND_MAX_MAC_ADDRS, 0, socket_id);
if (eth_dev->data->mac_addrs == NULL) {
RTE_BOND_LOG(ERR,
"Failed to allocate %u bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
+ RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
goto err;
}
}
vlan_filter_bmp_size =
- rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
+ rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
RTE_CACHE_LINE_SIZE);
if (internals->vlan_filter_bmpmem == NULL) {
goto err;
}
- internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
+ internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
if (internals->vlan_filter_bmp == NULL) {
RTE_BOND_LOG(ERR,
/* Parse MAC address for bonded device */
arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
if (arg_count == 1) {
- struct ether_addr bond_mac;
+ struct rte_ether_addr bond_mac;
if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
&bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {