ethdev: add an argument to internal callback function
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <stdlib.h>
34 #include <netinet/in.h>
35
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
39 #include <rte_tcp.h>
40 #include <rte_udp.h>
41 #include <rte_ip.h>
42 #include <rte_ip_frag.h>
43 #include <rte_devargs.h>
44 #include <rte_kvargs.h>
45 #include <rte_vdev.h>
46 #include <rte_alarm.h>
47 #include <rte_cycles.h>
48
49 #include "rte_eth_bond.h"
50 #include "rte_eth_bond_private.h"
51 #include "rte_eth_bond_8023ad_private.h"
52
53 #define REORDER_PERIOD_MS 10
54
55 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
56
57 /* Table for statistics in mode 5 TLB */
58 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
59
60 static inline size_t
61 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
62 {
63         size_t vlan_offset = 0;
64
65         if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
66                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
67
68                 vlan_offset = sizeof(struct vlan_hdr);
69                 *proto = vlan_hdr->eth_proto;
70
71                 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
72                         vlan_hdr = vlan_hdr + 1;
73                         *proto = vlan_hdr->eth_proto;
74                         vlan_offset += sizeof(struct vlan_hdr);
75                 }
76         }
77         return vlan_offset;
78 }
79
80 static uint16_t
81 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
82 {
83         struct bond_dev_private *internals;
84
85         uint16_t num_rx_slave = 0;
86         uint16_t num_rx_total = 0;
87
88         int i;
89
90         /* Cast to structure, containing bonded device's port id and queue id */
91         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
92
93         internals = bd_rx_q->dev_private;
94
95
96         for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
97                 /* Offset of pointer to *bufs increases as packets are received
98                  * from other slaves */
99                 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
100                                 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
101                 if (num_rx_slave) {
102                         num_rx_total += num_rx_slave;
103                         nb_pkts -= num_rx_slave;
104                 }
105         }
106
107         return num_rx_total;
108 }
109
110 static uint16_t
111 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
112                 uint16_t nb_pkts)
113 {
114         struct bond_dev_private *internals;
115
116         /* Cast to structure, containing bonded device's port id and queue id */
117         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
118
119         internals = bd_rx_q->dev_private;
120
121         return rte_eth_rx_burst(internals->current_primary_port,
122                         bd_rx_q->queue_id, bufs, nb_pkts);
123 }
124
125 static uint16_t
126 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
127                 uint16_t nb_pkts)
128 {
129         /* Cast to structure, containing bonded device's port id and queue id */
130         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
131         struct bond_dev_private *internals = bd_rx_q->dev_private;
132         struct ether_addr bond_mac;
133
134         struct ether_hdr *hdr;
135
136         const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
137         uint16_t num_rx_total = 0;      /* Total number of received packets */
138         uint8_t slaves[RTE_MAX_ETHPORTS];
139         uint8_t slave_count;
140
141         uint8_t collecting;  /* current slave collecting status */
142         const uint8_t promisc = internals->promiscuous_en;
143         uint8_t i, j, k;
144
145         rte_eth_macaddr_get(internals->port_id, &bond_mac);
146         /* Copy slave list to protect against slave up/down changes during tx
147          * bursting */
148         slave_count = internals->active_slave_count;
149         memcpy(slaves, internals->active_slaves,
150                         sizeof(internals->active_slaves[0]) * slave_count);
151
152         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
153                 j = num_rx_total;
154                 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
155
156                 /* Read packets from this slave */
157                 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
158                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
159
160                 for (k = j; k < 2 && k < num_rx_total; k++)
161                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
162
163                 /* Handle slow protocol packets. */
164                 while (j < num_rx_total) {
165                         if (j + 3 < num_rx_total)
166                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
167
168                         hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
169                         /* Remove packet from array if it is slow packet or slave is not
170                          * in collecting state or bondign interface is not in promiscus
171                          * mode and packet address does not match. */
172                         if (unlikely(hdr->ether_type == ether_type_slow_be ||
173                                 !collecting || (!promisc &&
174                                         !is_multicast_ether_addr(&hdr->d_addr) &&
175                                         !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
176
177                                 if (hdr->ether_type == ether_type_slow_be) {
178                                         bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
179                                                 bufs[j]);
180                                 } else
181                                         rte_pktmbuf_free(bufs[j]);
182
183                                 /* Packet is managed by mode 4 or dropped, shift the array */
184                                 num_rx_total--;
185                                 if (j < num_rx_total) {
186                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
187                                                 (num_rx_total - j));
188                                 }
189                         } else
190                                 j++;
191                 }
192         }
193
194         return num_rx_total;
195 }
196
197 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
198 uint32_t burstnumberRX;
199 uint32_t burstnumberTX;
200
201 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
202
203 static void
204 arp_op_name(uint16_t arp_op, char *buf)
205 {
206         switch (arp_op) {
207         case ARP_OP_REQUEST:
208                 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
209                 return;
210         case ARP_OP_REPLY:
211                 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
212                 return;
213         case ARP_OP_REVREQUEST:
214                 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
215                                 "Reverse ARP Request");
216                 return;
217         case ARP_OP_REVREPLY:
218                 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
219                                 "Reverse ARP Reply");
220                 return;
221         case ARP_OP_INVREQUEST:
222                 snprintf(buf, sizeof("Peer Identify Request"), "%s",
223                                 "Peer Identify Request");
224                 return;
225         case ARP_OP_INVREPLY:
226                 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
227                                 "Peer Identify Reply");
228                 return;
229         default:
230                 break;
231         }
232         snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
233         return;
234 }
235 #endif
236 #define MaxIPv4String   16
237 static void
238 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
239 {
240         uint32_t ipv4_addr;
241
242         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
243         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
244                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
245                 ipv4_addr & 0xFF);
246 }
247
248 #define MAX_CLIENTS_NUMBER      128
249 uint8_t active_clients;
250 struct client_stats_t {
251         uint8_t port;
252         uint32_t ipv4_addr;
253         uint32_t ipv4_rx_packets;
254         uint32_t ipv4_tx_packets;
255 };
256 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
257
258 static void
259 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
260 {
261         int i = 0;
262
263         for (; i < MAX_CLIENTS_NUMBER; i++)     {
264                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
265                         /* Just update RX packets number for this client */
266                         if (TXorRXindicator == &burstnumberRX)
267                                 client_stats[i].ipv4_rx_packets++;
268                         else
269                                 client_stats[i].ipv4_tx_packets++;
270                         return;
271                 }
272         }
273         /* We have a new client. Insert him to the table, and increment stats */
274         if (TXorRXindicator == &burstnumberRX)
275                 client_stats[active_clients].ipv4_rx_packets++;
276         else
277                 client_stats[active_clients].ipv4_tx_packets++;
278         client_stats[active_clients].ipv4_addr = addr;
279         client_stats[active_clients].port = port;
280         active_clients++;
281
282 }
283
284 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
285 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber)     \
286                 RTE_LOG(DEBUG, PMD, \
287                 "%s " \
288                 "port:%d " \
289                 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
290                 "SrcIP:%s " \
291                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
292                 "DstIP:%s " \
293                 "%s " \
294                 "%d\n", \
295                 info, \
296                 port, \
297                 eth_h->s_addr.addr_bytes[0], \
298                 eth_h->s_addr.addr_bytes[1], \
299                 eth_h->s_addr.addr_bytes[2], \
300                 eth_h->s_addr.addr_bytes[3], \
301                 eth_h->s_addr.addr_bytes[4], \
302                 eth_h->s_addr.addr_bytes[5], \
303                 src_ip, \
304                 eth_h->d_addr.addr_bytes[0], \
305                 eth_h->d_addr.addr_bytes[1], \
306                 eth_h->d_addr.addr_bytes[2], \
307                 eth_h->d_addr.addr_bytes[3], \
308                 eth_h->d_addr.addr_bytes[4], \
309                 eth_h->d_addr.addr_bytes[5], \
310                 dst_ip, \
311                 arp_op, \
312                 ++burstnumber)
313 #endif
314
315 static void
316 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
317                 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
318 {
319         struct ipv4_hdr *ipv4_h;
320 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
321         struct arp_hdr *arp_h;
322         char dst_ip[16];
323         char ArpOp[24];
324         char buf[16];
325 #endif
326         char src_ip[16];
327
328         uint16_t ether_type = eth_h->ether_type;
329         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
330
331 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
332         snprintf(buf, 16, "%s", info);
333 #endif
334
335         if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
336                 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
337                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
338 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
339                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
340                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
341 #endif
342                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
343         }
344 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
345         else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
346                 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
347                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
348                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
349                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
350                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
351         }
352 #endif
353 }
354 #endif
355
356 static uint16_t
357 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
358 {
359         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
360         struct bond_dev_private *internals = bd_tx_q->dev_private;
361         struct ether_hdr *eth_h;
362         uint16_t ether_type, offset;
363         uint16_t nb_recv_pkts;
364         int i;
365
366         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
367
368         for (i = 0; i < nb_recv_pkts; i++) {
369                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
370                 ether_type = eth_h->ether_type;
371                 offset = get_vlan_offset(eth_h, &ether_type);
372
373                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
374 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
375                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
376 #endif
377                         bond_mode_alb_arp_recv(eth_h, offset, internals);
378                 }
379 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
380                 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
381                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
382 #endif
383         }
384
385         return nb_recv_pkts;
386 }
387
388 static uint16_t
389 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
390                 uint16_t nb_pkts)
391 {
392         struct bond_dev_private *internals;
393         struct bond_tx_queue *bd_tx_q;
394
395         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
396         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
397
398         uint8_t num_of_slaves;
399         uint8_t slaves[RTE_MAX_ETHPORTS];
400
401         uint16_t num_tx_total = 0, num_tx_slave;
402
403         static int slave_idx = 0;
404         int i, cslave_idx = 0, tx_fail_total = 0;
405
406         bd_tx_q = (struct bond_tx_queue *)queue;
407         internals = bd_tx_q->dev_private;
408
409         /* Copy slave list to protect against slave up/down changes during tx
410          * bursting */
411         num_of_slaves = internals->active_slave_count;
412         memcpy(slaves, internals->active_slaves,
413                         sizeof(internals->active_slaves[0]) * num_of_slaves);
414
415         if (num_of_slaves < 1)
416                 return num_tx_total;
417
418         /* Populate slaves mbuf with which packets are to be sent on it  */
419         for (i = 0; i < nb_pkts; i++) {
420                 cslave_idx = (slave_idx + i) % num_of_slaves;
421                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
422         }
423
424         /* increment current slave index so the next call to tx burst starts on the
425          * next slave */
426         slave_idx = ++cslave_idx;
427
428         /* Send packet burst on each slave device */
429         for (i = 0; i < num_of_slaves; i++) {
430                 if (slave_nb_pkts[i] > 0) {
431                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
432                                         slave_bufs[i], slave_nb_pkts[i]);
433
434                         /* if tx burst fails move packets to end of bufs */
435                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
436                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
437
438                                 tx_fail_total += tx_fail_slave;
439
440                                 memcpy(&bufs[nb_pkts - tx_fail_total],
441                                                 &slave_bufs[i][num_tx_slave],
442                                                 tx_fail_slave * sizeof(bufs[0]));
443                         }
444                         num_tx_total += num_tx_slave;
445                 }
446         }
447
448         return num_tx_total;
449 }
450
451 static uint16_t
452 bond_ethdev_tx_burst_active_backup(void *queue,
453                 struct rte_mbuf **bufs, uint16_t nb_pkts)
454 {
455         struct bond_dev_private *internals;
456         struct bond_tx_queue *bd_tx_q;
457
458         bd_tx_q = (struct bond_tx_queue *)queue;
459         internals = bd_tx_q->dev_private;
460
461         if (internals->active_slave_count < 1)
462                 return 0;
463
464         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
465                         bufs, nb_pkts);
466 }
467
468 static inline uint16_t
469 ether_hash(struct ether_hdr *eth_hdr)
470 {
471         unaligned_uint16_t *word_src_addr =
472                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
473         unaligned_uint16_t *word_dst_addr =
474                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
475
476         return (word_src_addr[0] ^ word_dst_addr[0]) ^
477                         (word_src_addr[1] ^ word_dst_addr[1]) ^
478                         (word_src_addr[2] ^ word_dst_addr[2]);
479 }
480
481 static inline uint32_t
482 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
483 {
484         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
485 }
486
487 static inline uint32_t
488 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
489 {
490         unaligned_uint32_t *word_src_addr =
491                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
492         unaligned_uint32_t *word_dst_addr =
493                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
494
495         return (word_src_addr[0] ^ word_dst_addr[0]) ^
496                         (word_src_addr[1] ^ word_dst_addr[1]) ^
497                         (word_src_addr[2] ^ word_dst_addr[2]) ^
498                         (word_src_addr[3] ^ word_dst_addr[3]);
499 }
500
501 uint16_t
502 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
503 {
504         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
505
506         uint32_t hash = ether_hash(eth_hdr);
507
508         return (hash ^= hash >> 8) % slave_count;
509 }
510
511 uint16_t
512 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
513 {
514         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
515         uint16_t proto = eth_hdr->ether_type;
516         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
517         uint32_t hash, l3hash = 0;
518
519         hash = ether_hash(eth_hdr);
520
521         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
522                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
523                                 ((char *)(eth_hdr + 1) + vlan_offset);
524                 l3hash = ipv4_hash(ipv4_hdr);
525
526         } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
527                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
528                                 ((char *)(eth_hdr + 1) + vlan_offset);
529                 l3hash = ipv6_hash(ipv6_hdr);
530         }
531
532         hash = hash ^ l3hash;
533         hash ^= hash >> 16;
534         hash ^= hash >> 8;
535
536         return hash % slave_count;
537 }
538
539 uint16_t
540 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
541 {
542         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
543         uint16_t proto = eth_hdr->ether_type;
544         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
545
546         struct udp_hdr *udp_hdr = NULL;
547         struct tcp_hdr *tcp_hdr = NULL;
548         uint32_t hash, l3hash = 0, l4hash = 0;
549
550         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
551                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
552                                 ((char *)(eth_hdr + 1) + vlan_offset);
553                 size_t ip_hdr_offset;
554
555                 l3hash = ipv4_hash(ipv4_hdr);
556
557                 /* there is no L4 header in fragmented packet */
558                 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
559                         ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
560                                         IPV4_IHL_MULTIPLIER;
561
562                         if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
563                                 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
564                                                 ip_hdr_offset);
565                                 l4hash = HASH_L4_PORTS(tcp_hdr);
566                         } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
567                                 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
568                                                 ip_hdr_offset);
569                                 l4hash = HASH_L4_PORTS(udp_hdr);
570                         }
571                 }
572         } else if  (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
573                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
574                                 ((char *)(eth_hdr + 1) + vlan_offset);
575                 l3hash = ipv6_hash(ipv6_hdr);
576
577                 if (ipv6_hdr->proto == IPPROTO_TCP) {
578                         tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
579                         l4hash = HASH_L4_PORTS(tcp_hdr);
580                 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
581                         udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
582                         l4hash = HASH_L4_PORTS(udp_hdr);
583                 }
584         }
585
586         hash = l3hash ^ l4hash;
587         hash ^= hash >> 16;
588         hash ^= hash >> 8;
589
590         return hash % slave_count;
591 }
592
593 struct bwg_slave {
594         uint64_t bwg_left_int;
595         uint64_t bwg_left_remainder;
596         uint8_t slave;
597 };
598
599 void
600 bond_tlb_activate_slave(struct bond_dev_private *internals) {
601         int i;
602
603         for (i = 0; i < internals->active_slave_count; i++) {
604                 tlb_last_obytets[internals->active_slaves[i]] = 0;
605         }
606 }
607
608 static int
609 bandwidth_cmp(const void *a, const void *b)
610 {
611         const struct bwg_slave *bwg_a = a;
612         const struct bwg_slave *bwg_b = b;
613         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
614         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
615                         (int64_t)bwg_a->bwg_left_remainder;
616         if (diff > 0)
617                 return 1;
618         else if (diff < 0)
619                 return -1;
620         else if (diff2 > 0)
621                 return 1;
622         else if (diff2 < 0)
623                 return -1;
624         else
625                 return 0;
626 }
627
628 static void
629 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
630                 struct bwg_slave *bwg_slave)
631 {
632         struct rte_eth_link link_status;
633
634         rte_eth_link_get(port_id, &link_status);
635         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
636         if (link_bwg == 0)
637                 return;
638         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
639         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
640         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
641 }
642
643 static void
644 bond_ethdev_update_tlb_slave_cb(void *arg)
645 {
646         struct bond_dev_private *internals = arg;
647         struct rte_eth_stats slave_stats;
648         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
649         uint8_t slave_count;
650         uint64_t tx_bytes;
651
652         uint8_t update_stats = 0;
653         uint8_t i, slave_id;
654
655         internals->slave_update_idx++;
656
657
658         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
659                 update_stats = 1;
660
661         for (i = 0; i < internals->active_slave_count; i++) {
662                 slave_id = internals->active_slaves[i];
663                 rte_eth_stats_get(slave_id, &slave_stats);
664                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
665                 bandwidth_left(slave_id, tx_bytes,
666                                 internals->slave_update_idx, &bwg_array[i]);
667                 bwg_array[i].slave = slave_id;
668
669                 if (update_stats) {
670                         tlb_last_obytets[slave_id] = slave_stats.obytes;
671                 }
672         }
673
674         if (update_stats == 1)
675                 internals->slave_update_idx = 0;
676
677         slave_count = i;
678         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
679         for (i = 0; i < slave_count; i++)
680                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
681
682         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
683                         (struct bond_dev_private *)internals);
684 }
685
686 static uint16_t
687 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
688 {
689         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
690         struct bond_dev_private *internals = bd_tx_q->dev_private;
691
692         struct rte_eth_dev *primary_port =
693                         &rte_eth_devices[internals->primary_port];
694         uint16_t num_tx_total = 0;
695         uint8_t i, j;
696
697         uint8_t num_of_slaves = internals->active_slave_count;
698         uint8_t slaves[RTE_MAX_ETHPORTS];
699
700         struct ether_hdr *ether_hdr;
701         struct ether_addr primary_slave_addr;
702         struct ether_addr active_slave_addr;
703
704         if (num_of_slaves < 1)
705                 return num_tx_total;
706
707         memcpy(slaves, internals->tlb_slaves_order,
708                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
709
710
711         ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
712
713         if (nb_pkts > 3) {
714                 for (i = 0; i < 3; i++)
715                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
716         }
717
718         for (i = 0; i < num_of_slaves; i++) {
719                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
720                 for (j = num_tx_total; j < nb_pkts; j++) {
721                         if (j + 3 < nb_pkts)
722                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
723
724                         ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
725                         if (is_same_ether_addr(&ether_hdr->s_addr, &primary_slave_addr))
726                                 ether_addr_copy(&active_slave_addr, &ether_hdr->s_addr);
727 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
728                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
729 #endif
730                 }
731
732                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
733                                 bufs + num_tx_total, nb_pkts - num_tx_total);
734
735                 if (num_tx_total == nb_pkts)
736                         break;
737         }
738
739         return num_tx_total;
740 }
741
742 void
743 bond_tlb_disable(struct bond_dev_private *internals)
744 {
745         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
746 }
747
748 void
749 bond_tlb_enable(struct bond_dev_private *internals)
750 {
751         bond_ethdev_update_tlb_slave_cb(internals);
752 }
753
754 static uint16_t
755 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
756 {
757         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
758         struct bond_dev_private *internals = bd_tx_q->dev_private;
759
760         struct ether_hdr *eth_h;
761         uint16_t ether_type, offset;
762
763         struct client_data *client_info;
764
765         /*
766          * We create transmit buffers for every slave and one additional to send
767          * through tlb. In worst case every packet will be send on one port.
768          */
769         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
770         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
771
772         /*
773          * We create separate transmit buffers for update packets as they wont be
774          * counted in num_tx_total.
775          */
776         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
777         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
778
779         struct rte_mbuf *upd_pkt;
780         size_t pkt_size;
781
782         uint16_t num_send, num_not_send = 0;
783         uint16_t num_tx_total = 0;
784         uint8_t slave_idx;
785
786         int i, j;
787
788         /* Search tx buffer for ARP packets and forward them to alb */
789         for (i = 0; i < nb_pkts; i++) {
790                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
791                 ether_type = eth_h->ether_type;
792                 offset = get_vlan_offset(eth_h, &ether_type);
793
794                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
795                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
796
797                         /* Change src mac in eth header */
798                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
799
800                         /* Add packet to slave tx buffer */
801                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
802                         slave_bufs_pkts[slave_idx]++;
803                 } else {
804                         /* If packet is not ARP, send it with TLB policy */
805                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
806                                         bufs[i];
807                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
808                 }
809         }
810
811         /* Update connected client ARP tables */
812         if (internals->mode6.ntt) {
813                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
814                         client_info = &internals->mode6.client_table[i];
815
816                         if (client_info->in_use) {
817                                 /* Allocate new packet to send ARP update on current slave */
818                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
819                                 if (upd_pkt == NULL) {
820                                         RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
821                                         continue;
822                                 }
823                                 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
824                                                 + client_info->vlan_count * sizeof(struct vlan_hdr);
825                                 upd_pkt->data_len = pkt_size;
826                                 upd_pkt->pkt_len = pkt_size;
827
828                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
829                                                 internals);
830
831                                 /* Add packet to update tx buffer */
832                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
833                                 update_bufs_pkts[slave_idx]++;
834                         }
835                 }
836                 internals->mode6.ntt = 0;
837         }
838
839         /* Send ARP packets on proper slaves */
840         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
841                 if (slave_bufs_pkts[i] > 0) {
842                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
843                                         slave_bufs[i], slave_bufs_pkts[i]);
844                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
845                                 bufs[nb_pkts - 1 - num_not_send - j] =
846                                                 slave_bufs[i][nb_pkts - 1 - j];
847                         }
848
849                         num_tx_total += num_send;
850                         num_not_send += slave_bufs_pkts[i] - num_send;
851
852 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
853         /* Print TX stats including update packets */
854                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
855                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
856                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
857                         }
858 #endif
859                 }
860         }
861
862         /* Send update packets on proper slaves */
863         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
864                 if (update_bufs_pkts[i] > 0) {
865                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
866                                         update_bufs_pkts[i]);
867                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
868                                 rte_pktmbuf_free(update_bufs[i][j]);
869                         }
870 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
871                         for (j = 0; j < update_bufs_pkts[i]; j++) {
872                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
873                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
874                         }
875 #endif
876                 }
877         }
878
879         /* Send non-ARP packets using tlb policy */
880         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
881                 num_send = bond_ethdev_tx_burst_tlb(queue,
882                                 slave_bufs[RTE_MAX_ETHPORTS],
883                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
884
885                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
886                         bufs[nb_pkts - 1 - num_not_send - j] =
887                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
888                 }
889
890                 num_tx_total += num_send;
891                 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
892         }
893
894         return num_tx_total;
895 }
896
897 static uint16_t
898 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
899                 uint16_t nb_pkts)
900 {
901         struct bond_dev_private *internals;
902         struct bond_tx_queue *bd_tx_q;
903
904         uint8_t num_of_slaves;
905         uint8_t slaves[RTE_MAX_ETHPORTS];
906
907         uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
908
909         int i, op_slave_id;
910
911         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
912         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
913
914         bd_tx_q = (struct bond_tx_queue *)queue;
915         internals = bd_tx_q->dev_private;
916
917         /* Copy slave list to protect against slave up/down changes during tx
918          * bursting */
919         num_of_slaves = internals->active_slave_count;
920         memcpy(slaves, internals->active_slaves,
921                         sizeof(internals->active_slaves[0]) * num_of_slaves);
922
923         if (num_of_slaves < 1)
924                 return num_tx_total;
925
926         /* Populate slaves mbuf with the packets which are to be sent on it  */
927         for (i = 0; i < nb_pkts; i++) {
928                 /* Select output slave using hash based on xmit policy */
929                 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
930
931                 /* Populate slave mbuf arrays with mbufs for that slave */
932                 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
933         }
934
935         /* Send packet burst on each slave device */
936         for (i = 0; i < num_of_slaves; i++) {
937                 if (slave_nb_pkts[i] > 0) {
938                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
939                                         slave_bufs[i], slave_nb_pkts[i]);
940
941                         /* if tx burst fails move packets to end of bufs */
942                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
943                                 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
944
945                                 tx_fail_total += slave_tx_fail_count;
946                                 memcpy(&bufs[nb_pkts - tx_fail_total],
947                                                 &slave_bufs[i][num_tx_slave],
948                                                 slave_tx_fail_count * sizeof(bufs[0]));
949                         }
950
951                         num_tx_total += num_tx_slave;
952                 }
953         }
954
955         return num_tx_total;
956 }
957
958 static uint16_t
959 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
960                 uint16_t nb_pkts)
961 {
962         struct bond_dev_private *internals;
963         struct bond_tx_queue *bd_tx_q;
964
965         uint8_t num_of_slaves;
966         uint8_t slaves[RTE_MAX_ETHPORTS];
967          /* positions in slaves, not ID */
968         uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
969         uint8_t distributing_count;
970
971         uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
972         uint16_t i, j, op_slave_idx;
973         const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
974
975         /* Allocate additional packets in case 8023AD mode. */
976         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
977         void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
978
979         /* Total amount of packets in slave_bufs */
980         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
981         /* Slow packets placed in each slave */
982         uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
983
984         bd_tx_q = (struct bond_tx_queue *)queue;
985         internals = bd_tx_q->dev_private;
986
987         /* Copy slave list to protect against slave up/down changes during tx
988          * bursting */
989         num_of_slaves = internals->active_slave_count;
990         if (num_of_slaves < 1)
991                 return num_tx_total;
992
993         memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
994
995         distributing_count = 0;
996         for (i = 0; i < num_of_slaves; i++) {
997                 struct port *port = &mode_8023ad_ports[slaves[i]];
998
999                 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1000                                 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
1001                 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1002
1003                 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1004                         slave_bufs[i][j] = slow_pkts[j];
1005
1006                 if (ACTOR_STATE(port, DISTRIBUTING))
1007                         distributing_offsets[distributing_count++] = i;
1008         }
1009
1010         if (likely(distributing_count > 0)) {
1011                 /* Populate slaves mbuf with the packets which are to be sent on it */
1012                 for (i = 0; i < nb_pkts; i++) {
1013                         /* Select output slave using hash based on xmit policy */
1014                         op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1015
1016                         /* Populate slave mbuf arrays with mbufs for that slave. Use only
1017                          * slaves that are currently distributing. */
1018                         uint8_t slave_offset = distributing_offsets[op_slave_idx];
1019                         slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1020                         slave_nb_pkts[slave_offset]++;
1021                 }
1022         }
1023
1024         /* Send packet burst on each slave device */
1025         for (i = 0; i < num_of_slaves; i++) {
1026                 if (slave_nb_pkts[i] == 0)
1027                         continue;
1028
1029                 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1030                                 slave_bufs[i], slave_nb_pkts[i]);
1031
1032                 /* If tx burst fails drop slow packets */
1033                 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1034                         rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1035
1036                 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1037                 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1038
1039                 /* If tx burst fails move packets to end of bufs */
1040                 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1041                         uint16_t j = nb_pkts - num_tx_fail_total;
1042                         for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1043                                 bufs[j] = slave_bufs[i][num_tx_slave];
1044                 }
1045         }
1046
1047         return num_tx_total;
1048 }
1049
1050 static uint16_t
1051 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1052                 uint16_t nb_pkts)
1053 {
1054         struct bond_dev_private *internals;
1055         struct bond_tx_queue *bd_tx_q;
1056
1057         uint8_t tx_failed_flag = 0, num_of_slaves;
1058         uint8_t slaves[RTE_MAX_ETHPORTS];
1059
1060         uint16_t max_nb_of_tx_pkts = 0;
1061
1062         int slave_tx_total[RTE_MAX_ETHPORTS];
1063         int i, most_successful_tx_slave = -1;
1064
1065         bd_tx_q = (struct bond_tx_queue *)queue;
1066         internals = bd_tx_q->dev_private;
1067
1068         /* Copy slave list to protect against slave up/down changes during tx
1069          * bursting */
1070         num_of_slaves = internals->active_slave_count;
1071         memcpy(slaves, internals->active_slaves,
1072                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1073
1074         if (num_of_slaves < 1)
1075                 return 0;
1076
1077         /* Increment reference count on mbufs */
1078         for (i = 0; i < nb_pkts; i++)
1079                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1080
1081         /* Transmit burst on each active slave */
1082         for (i = 0; i < num_of_slaves; i++) {
1083                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1084                                         bufs, nb_pkts);
1085
1086                 if (unlikely(slave_tx_total[i] < nb_pkts))
1087                         tx_failed_flag = 1;
1088
1089                 /* record the value and slave index for the slave which transmits the
1090                  * maximum number of packets */
1091                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1092                         max_nb_of_tx_pkts = slave_tx_total[i];
1093                         most_successful_tx_slave = i;
1094                 }
1095         }
1096
1097         /* if slaves fail to transmit packets from burst, the calling application
1098          * is not expected to know about multiple references to packets so we must
1099          * handle failures of all packets except those of the most successful slave
1100          */
1101         if (unlikely(tx_failed_flag))
1102                 for (i = 0; i < num_of_slaves; i++)
1103                         if (i != most_successful_tx_slave)
1104                                 while (slave_tx_total[i] < nb_pkts)
1105                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1106
1107         return max_nb_of_tx_pkts;
1108 }
1109
1110 void
1111 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1112                 struct rte_eth_link *slave_dev_link)
1113 {
1114         struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1115         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1116
1117         if (slave_dev_link->link_status &&
1118                 bonded_eth_dev->data->dev_started) {
1119                 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1120                 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1121
1122                 internals->link_props_set = 1;
1123         }
1124 }
1125
1126 void
1127 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1128 {
1129         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1130
1131         memset(&(bonded_eth_dev->data->dev_link), 0,
1132                         sizeof(bonded_eth_dev->data->dev_link));
1133
1134         internals->link_props_set = 0;
1135 }
1136
1137 int
1138 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1139                 struct rte_eth_link *slave_dev_link)
1140 {
1141         if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1142                 bonded_dev_link->link_speed !=  slave_dev_link->link_speed)
1143                 return -1;
1144
1145         return 0;
1146 }
1147
1148 int
1149 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1150 {
1151         struct ether_addr *mac_addr;
1152
1153         if (eth_dev == NULL) {
1154                 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1155                 return -1;
1156         }
1157
1158         if (dst_mac_addr == NULL) {
1159                 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1160                 return -1;
1161         }
1162
1163         mac_addr = eth_dev->data->mac_addrs;
1164
1165         ether_addr_copy(mac_addr, dst_mac_addr);
1166         return 0;
1167 }
1168
1169 int
1170 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1171 {
1172         struct ether_addr *mac_addr;
1173
1174         if (eth_dev == NULL) {
1175                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1176                 return -1;
1177         }
1178
1179         if (new_mac_addr == NULL) {
1180                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1181                 return -1;
1182         }
1183
1184         mac_addr = eth_dev->data->mac_addrs;
1185
1186         /* If new MAC is different to current MAC then update */
1187         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1188                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1189
1190         return 0;
1191 }
1192
1193 int
1194 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1195 {
1196         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1197         int i;
1198
1199         /* Update slave devices MAC addresses */
1200         if (internals->slave_count < 1)
1201                 return -1;
1202
1203         switch (internals->mode) {
1204         case BONDING_MODE_ROUND_ROBIN:
1205         case BONDING_MODE_BALANCE:
1206         case BONDING_MODE_BROADCAST:
1207                 for (i = 0; i < internals->slave_count; i++) {
1208                         if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1209                                         bonded_eth_dev->data->mac_addrs)) {
1210                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1211                                                 internals->slaves[i].port_id);
1212                                 return -1;
1213                         }
1214                 }
1215                 break;
1216         case BONDING_MODE_8023AD:
1217                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1218                 break;
1219         case BONDING_MODE_ACTIVE_BACKUP:
1220         case BONDING_MODE_TLB:
1221         case BONDING_MODE_ALB:
1222         default:
1223                 for (i = 0; i < internals->slave_count; i++) {
1224                         if (internals->slaves[i].port_id ==
1225                                         internals->current_primary_port) {
1226                                 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1227                                                 bonded_eth_dev->data->mac_addrs)) {
1228                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1229                                                         internals->current_primary_port);
1230                                         return -1;
1231                                 }
1232                         } else {
1233                                 if (mac_address_set(
1234                                                 &rte_eth_devices[internals->slaves[i].port_id],
1235                                                 &internals->slaves[i].persisted_mac_addr)) {
1236                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1237                                                         internals->slaves[i].port_id);
1238                                         return -1;
1239                                 }
1240                         }
1241                 }
1242         }
1243
1244         return 0;
1245 }
1246
1247 int
1248 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1249 {
1250         struct bond_dev_private *internals;
1251
1252         internals = eth_dev->data->dev_private;
1253
1254         switch (mode) {
1255         case BONDING_MODE_ROUND_ROBIN:
1256                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1257                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1258                 break;
1259         case BONDING_MODE_ACTIVE_BACKUP:
1260                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1261                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1262                 break;
1263         case BONDING_MODE_BALANCE:
1264                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1265                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1266                 break;
1267         case BONDING_MODE_BROADCAST:
1268                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1269                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1270                 break;
1271         case BONDING_MODE_8023AD:
1272                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1273                         return -1;
1274
1275                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1276                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1277                 RTE_LOG(WARNING, PMD,
1278                                 "Using mode 4, it is necessary to do TX burst and RX burst "
1279                                 "at least every 100ms.\n");
1280                 break;
1281         case BONDING_MODE_TLB:
1282                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1283                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1284                 break;
1285         case BONDING_MODE_ALB:
1286                 if (bond_mode_alb_enable(eth_dev) != 0)
1287                         return -1;
1288
1289                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1290                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1291                 break;
1292         default:
1293                 return -1;
1294         }
1295
1296         internals->mode = mode;
1297
1298         return 0;
1299 }
1300
1301 int
1302 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1303                 struct rte_eth_dev *slave_eth_dev)
1304 {
1305         struct bond_rx_queue *bd_rx_q;
1306         struct bond_tx_queue *bd_tx_q;
1307
1308         uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
1309         uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
1310         int errval;
1311         uint16_t q_id;
1312
1313         /* Stop slave */
1314         rte_eth_dev_stop(slave_eth_dev->data->port_id);
1315
1316         /* Enable interrupts on slave device if supported */
1317         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1318                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1319
1320         /* If RSS is enabled for bonding, try to enable it for slaves  */
1321         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1322                 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1323                                 != 0) {
1324                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1325                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1326                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1327                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1328                 } else {
1329                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1330                 }
1331
1332                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1333                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1334                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1335                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1336         }
1337
1338         slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
1339                         bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
1340
1341         /* Configure device */
1342         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1343                         bonded_eth_dev->data->nb_rx_queues,
1344                         bonded_eth_dev->data->nb_tx_queues,
1345                         &(slave_eth_dev->data->dev_conf));
1346         if (errval != 0) {
1347                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1348                                 slave_eth_dev->data->port_id, errval);
1349                 return errval;
1350         }
1351
1352         /* Setup Rx Queues */
1353         /* Use existing queues, if any */
1354         for (q_id = old_nb_rx_queues;
1355              q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1356                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1357
1358                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1359                                 bd_rx_q->nb_rx_desc,
1360                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1361                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1362                 if (errval != 0) {
1363                         RTE_BOND_LOG(ERR,
1364                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1365                                         slave_eth_dev->data->port_id, q_id, errval);
1366                         return errval;
1367                 }
1368         }
1369
1370         /* Setup Tx Queues */
1371         /* Use existing queues, if any */
1372         for (q_id = old_nb_tx_queues;
1373              q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1374                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1375
1376                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1377                                 bd_tx_q->nb_tx_desc,
1378                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1379                                 &bd_tx_q->tx_conf);
1380                 if (errval != 0) {
1381                         RTE_BOND_LOG(ERR,
1382                                         "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1383                                         slave_eth_dev->data->port_id, q_id, errval);
1384                         return errval;
1385                 }
1386         }
1387
1388         /* Start device */
1389         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1390         if (errval != 0) {
1391                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1392                                 slave_eth_dev->data->port_id, errval);
1393                 return -1;
1394         }
1395
1396         /* If RSS is enabled for bonding, synchronize RETA */
1397         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1398                 int i;
1399                 struct bond_dev_private *internals;
1400
1401                 internals = bonded_eth_dev->data->dev_private;
1402
1403                 for (i = 0; i < internals->slave_count; i++) {
1404                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1405                                 errval = rte_eth_dev_rss_reta_update(
1406                                                 slave_eth_dev->data->port_id,
1407                                                 &internals->reta_conf[0],
1408                                                 internals->slaves[i].reta_size);
1409                                 if (errval != 0) {
1410                                         RTE_LOG(WARNING, PMD,
1411                                                         "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1412                                                         " RSS Configuration for bonding may be inconsistent.\n",
1413                                                         slave_eth_dev->data->port_id, errval);
1414                                 }
1415                                 break;
1416                         }
1417                 }
1418         }
1419
1420         /* If lsc interrupt is set, check initial slave's link status */
1421         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1422                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1423                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
1424
1425         return 0;
1426 }
1427
1428 void
1429 slave_remove(struct bond_dev_private *internals,
1430                 struct rte_eth_dev *slave_eth_dev)
1431 {
1432         uint8_t i;
1433
1434         for (i = 0; i < internals->slave_count; i++)
1435                 if (internals->slaves[i].port_id ==
1436                                 slave_eth_dev->data->port_id)
1437                         break;
1438
1439         if (i < (internals->slave_count - 1))
1440                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1441                                 sizeof(internals->slaves[0]) *
1442                                 (internals->slave_count - i - 1));
1443
1444         internals->slave_count--;
1445 }
1446
1447 static void
1448 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1449
1450 void
1451 slave_add(struct bond_dev_private *internals,
1452                 struct rte_eth_dev *slave_eth_dev)
1453 {
1454         struct bond_slave_details *slave_details =
1455                         &internals->slaves[internals->slave_count];
1456
1457         slave_details->port_id = slave_eth_dev->data->port_id;
1458         slave_details->last_link_status = 0;
1459
1460         /* Mark slave devices that don't support interrupts so we can
1461          * compensate when we start the bond
1462          */
1463         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1464                 slave_details->link_status_poll_enabled = 1;
1465         }
1466
1467         slave_details->link_status_wait_to_complete = 0;
1468         /* clean tlb_last_obytes when adding port for bonding device */
1469         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1470                         sizeof(struct ether_addr));
1471 }
1472
1473 void
1474 bond_ethdev_primary_set(struct bond_dev_private *internals,
1475                 uint8_t slave_port_id)
1476 {
1477         int i;
1478
1479         if (internals->active_slave_count < 1)
1480                 internals->current_primary_port = slave_port_id;
1481         else
1482                 /* Search bonded device slave ports for new proposed primary port */
1483                 for (i = 0; i < internals->active_slave_count; i++) {
1484                         if (internals->active_slaves[i] == slave_port_id)
1485                                 internals->current_primary_port = slave_port_id;
1486                 }
1487 }
1488
1489 static void
1490 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1491
1492 static int
1493 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1494 {
1495         struct bond_dev_private *internals;
1496         int i;
1497
1498         /* slave eth dev will be started by bonded device */
1499         if (check_for_bonded_ethdev(eth_dev)) {
1500                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1501                                 eth_dev->data->port_id);
1502                 return -1;
1503         }
1504
1505         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1506         eth_dev->data->dev_started = 1;
1507
1508         internals = eth_dev->data->dev_private;
1509
1510         if (internals->slave_count == 0) {
1511                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1512                 return -1;
1513         }
1514
1515         if (internals->user_defined_mac == 0) {
1516                 struct ether_addr *new_mac_addr = NULL;
1517
1518                 for (i = 0; i < internals->slave_count; i++)
1519                         if (internals->slaves[i].port_id == internals->primary_port)
1520                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1521
1522                 if (new_mac_addr == NULL)
1523                         return -1;
1524
1525                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1526                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1527                                         eth_dev->data->port_id);
1528                         return -1;
1529                 }
1530         }
1531
1532         /* Update all slave devices MACs*/
1533         if (mac_address_slaves_update(eth_dev) != 0)
1534                 return -1;
1535
1536         /* If bonded device is configure in promiscuous mode then re-apply config */
1537         if (internals->promiscuous_en)
1538                 bond_ethdev_promiscuous_enable(eth_dev);
1539
1540         /* Reconfigure each slave device if starting bonded device */
1541         for (i = 0; i < internals->slave_count; i++) {
1542                 if (slave_configure(eth_dev,
1543                                 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1544                         RTE_BOND_LOG(ERR,
1545                                         "bonded port (%d) failed to reconfigure slave device (%d)",
1546                                         eth_dev->data->port_id, internals->slaves[i].port_id);
1547                         return -1;
1548                 }
1549                 /* We will need to poll for link status if any slave doesn't
1550                  * support interrupts
1551                  */
1552                 if (internals->slaves[i].link_status_poll_enabled)
1553                         internals->link_status_polling_enabled = 1;
1554         }
1555         /* start polling if needed */
1556         if (internals->link_status_polling_enabled) {
1557                 rte_eal_alarm_set(
1558                         internals->link_status_polling_interval_ms * 1000,
1559                         bond_ethdev_slave_link_status_change_monitor,
1560                         (void *)&rte_eth_devices[internals->port_id]);
1561         }
1562
1563         if (internals->user_defined_primary_port)
1564                 bond_ethdev_primary_set(internals, internals->primary_port);
1565
1566         if (internals->mode == BONDING_MODE_8023AD)
1567                 bond_mode_8023ad_start(eth_dev);
1568
1569         if (internals->mode == BONDING_MODE_TLB ||
1570                         internals->mode == BONDING_MODE_ALB)
1571                 bond_tlb_enable(internals);
1572
1573         return 0;
1574 }
1575
1576 static void
1577 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1578 {
1579         uint8_t i;
1580
1581         if (dev->data->rx_queues != NULL) {
1582                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1583                         rte_free(dev->data->rx_queues[i]);
1584                         dev->data->rx_queues[i] = NULL;
1585                 }
1586                 dev->data->nb_rx_queues = 0;
1587         }
1588
1589         if (dev->data->tx_queues != NULL) {
1590                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1591                         rte_free(dev->data->tx_queues[i]);
1592                         dev->data->tx_queues[i] = NULL;
1593                 }
1594                 dev->data->nb_tx_queues = 0;
1595         }
1596 }
1597
1598 void
1599 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1600 {
1601         struct bond_dev_private *internals = eth_dev->data->dev_private;
1602         uint8_t i;
1603
1604         if (internals->mode == BONDING_MODE_8023AD) {
1605                 struct port *port;
1606                 void *pkt = NULL;
1607
1608                 bond_mode_8023ad_stop(eth_dev);
1609
1610                 /* Discard all messages to/from mode 4 state machines */
1611                 for (i = 0; i < internals->active_slave_count; i++) {
1612                         port = &mode_8023ad_ports[internals->active_slaves[i]];
1613
1614                         RTE_ASSERT(port->rx_ring != NULL);
1615                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1616                                 rte_pktmbuf_free(pkt);
1617
1618                         RTE_ASSERT(port->tx_ring != NULL);
1619                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1620                                 rte_pktmbuf_free(pkt);
1621                 }
1622         }
1623
1624         if (internals->mode == BONDING_MODE_TLB ||
1625                         internals->mode == BONDING_MODE_ALB) {
1626                 bond_tlb_disable(internals);
1627                 for (i = 0; i < internals->active_slave_count; i++)
1628                         tlb_last_obytets[internals->active_slaves[i]] = 0;
1629         }
1630
1631         internals->active_slave_count = 0;
1632         internals->link_status_polling_enabled = 0;
1633         for (i = 0; i < internals->slave_count; i++)
1634                 internals->slaves[i].last_link_status = 0;
1635
1636         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1637         eth_dev->data->dev_started = 0;
1638 }
1639
1640 void
1641 bond_ethdev_close(struct rte_eth_dev *dev)
1642 {
1643         struct bond_dev_private *internals = dev->data->dev_private;
1644
1645         bond_ethdev_free_queues(dev);
1646         rte_bitmap_reset(internals->vlan_filter_bmp);
1647 }
1648
1649 /* forward declaration */
1650 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1651
1652 static void
1653 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1654 {
1655         struct bond_dev_private *internals = dev->data->dev_private;
1656
1657         dev_info->max_mac_addrs = 1;
1658
1659         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
1660                                   internals->candidate_max_rx_pktlen : 2048;
1661
1662         dev_info->max_rx_queues = (uint16_t)128;
1663         dev_info->max_tx_queues = (uint16_t)512;
1664
1665         dev_info->min_rx_bufsize = 0;
1666         dev_info->pci_dev = NULL;
1667
1668         dev_info->rx_offload_capa = internals->rx_offload_capa;
1669         dev_info->tx_offload_capa = internals->tx_offload_capa;
1670         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1671
1672         dev_info->reta_size = internals->reta_size;
1673 }
1674
1675 static int
1676 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1677 {
1678         int res;
1679         uint8_t i;
1680         struct bond_dev_private *internals = dev->data->dev_private;
1681
1682         /* don't do this while a slave is being added */
1683         rte_spinlock_lock(&internals->lock);
1684
1685         if (on)
1686                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
1687         else
1688                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
1689
1690         for (i = 0; i < internals->slave_count; i++) {
1691                 uint8_t port_id = internals->slaves[i].port_id;
1692
1693                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1694                 if (res == ENOTSUP)
1695                         RTE_LOG(WARNING, PMD,
1696                                 "Setting VLAN filter on slave port %u not supported.\n",
1697                                 port_id);
1698         }
1699
1700         rte_spinlock_unlock(&internals->lock);
1701         return 0;
1702 }
1703
1704 static int
1705 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1706                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1707                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1708 {
1709         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1710                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1711                                         0, dev->data->numa_node);
1712         if (bd_rx_q == NULL)
1713                 return -1;
1714
1715         bd_rx_q->queue_id = rx_queue_id;
1716         bd_rx_q->dev_private = dev->data->dev_private;
1717
1718         bd_rx_q->nb_rx_desc = nb_rx_desc;
1719
1720         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1721         bd_rx_q->mb_pool = mb_pool;
1722
1723         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1724
1725         return 0;
1726 }
1727
1728 static int
1729 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1730                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1731                 const struct rte_eth_txconf *tx_conf)
1732 {
1733         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
1734                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1735                                         0, dev->data->numa_node);
1736
1737         if (bd_tx_q == NULL)
1738                 return -1;
1739
1740         bd_tx_q->queue_id = tx_queue_id;
1741         bd_tx_q->dev_private = dev->data->dev_private;
1742
1743         bd_tx_q->nb_tx_desc = nb_tx_desc;
1744         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1745
1746         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1747
1748         return 0;
1749 }
1750
1751 static void
1752 bond_ethdev_rx_queue_release(void *queue)
1753 {
1754         if (queue == NULL)
1755                 return;
1756
1757         rte_free(queue);
1758 }
1759
1760 static void
1761 bond_ethdev_tx_queue_release(void *queue)
1762 {
1763         if (queue == NULL)
1764                 return;
1765
1766         rte_free(queue);
1767 }
1768
1769 static void
1770 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1771 {
1772         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1773         struct bond_dev_private *internals;
1774
1775         /* Default value for polling slave found is true as we don't want to
1776          * disable the polling thread if we cannot get the lock */
1777         int i, polling_slave_found = 1;
1778
1779         if (cb_arg == NULL)
1780                 return;
1781
1782         bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1783         internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1784
1785         if (!bonded_ethdev->data->dev_started ||
1786                 !internals->link_status_polling_enabled)
1787                 return;
1788
1789         /* If device is currently being configured then don't check slaves link
1790          * status, wait until next period */
1791         if (rte_spinlock_trylock(&internals->lock)) {
1792                 if (internals->slave_count > 0)
1793                         polling_slave_found = 0;
1794
1795                 for (i = 0; i < internals->slave_count; i++) {
1796                         if (!internals->slaves[i].link_status_poll_enabled)
1797                                 continue;
1798
1799                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1800                         polling_slave_found = 1;
1801
1802                         /* Update slave link status */
1803                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1804                                         internals->slaves[i].link_status_wait_to_complete);
1805
1806                         /* if link status has changed since last checked then call lsc
1807                          * event callback */
1808                         if (slave_ethdev->data->dev_link.link_status !=
1809                                         internals->slaves[i].last_link_status) {
1810                                 internals->slaves[i].last_link_status =
1811                                                 slave_ethdev->data->dev_link.link_status;
1812
1813                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1814                                                 RTE_ETH_EVENT_INTR_LSC,
1815                                                 &bonded_ethdev->data->port_id);
1816                         }
1817                 }
1818                 rte_spinlock_unlock(&internals->lock);
1819         }
1820
1821         if (polling_slave_found)
1822                 /* Set alarm to continue monitoring link status of slave ethdev's */
1823                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1824                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1825 }
1826
1827 static int
1828 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1829                 int wait_to_complete)
1830 {
1831         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1832
1833         if (!bonded_eth_dev->data->dev_started ||
1834                 internals->active_slave_count == 0) {
1835                 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1836                 return 0;
1837         } else {
1838                 struct rte_eth_dev *slave_eth_dev;
1839                 int i, link_up = 0;
1840
1841                 for (i = 0; i < internals->active_slave_count; i++) {
1842                         slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1843
1844                         (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1845                                         wait_to_complete);
1846                         if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
1847                                 link_up = 1;
1848                                 break;
1849                         }
1850                 }
1851
1852                 bonded_eth_dev->data->dev_link.link_status = link_up;
1853         }
1854
1855         return 0;
1856 }
1857
1858 static void
1859 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1860 {
1861         struct bond_dev_private *internals = dev->data->dev_private;
1862         struct rte_eth_stats slave_stats;
1863         int i, j;
1864
1865         for (i = 0; i < internals->slave_count; i++) {
1866                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1867
1868                 stats->ipackets += slave_stats.ipackets;
1869                 stats->opackets += slave_stats.opackets;
1870                 stats->ibytes += slave_stats.ibytes;
1871                 stats->obytes += slave_stats.obytes;
1872                 stats->imissed += slave_stats.imissed;
1873                 stats->ierrors += slave_stats.ierrors;
1874                 stats->oerrors += slave_stats.oerrors;
1875                 stats->rx_nombuf += slave_stats.rx_nombuf;
1876
1877                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1878                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1879                         stats->q_opackets[j] += slave_stats.q_opackets[j];
1880                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1881                         stats->q_obytes[j] += slave_stats.q_obytes[j];
1882                         stats->q_errors[j] += slave_stats.q_errors[j];
1883                 }
1884
1885         }
1886 }
1887
1888 static void
1889 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1890 {
1891         struct bond_dev_private *internals = dev->data->dev_private;
1892         int i;
1893
1894         for (i = 0; i < internals->slave_count; i++)
1895                 rte_eth_stats_reset(internals->slaves[i].port_id);
1896 }
1897
1898 static void
1899 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1900 {
1901         struct bond_dev_private *internals = eth_dev->data->dev_private;
1902         int i;
1903
1904         internals->promiscuous_en = 1;
1905
1906         switch (internals->mode) {
1907         /* Promiscuous mode is propagated to all slaves */
1908         case BONDING_MODE_ROUND_ROBIN:
1909         case BONDING_MODE_BALANCE:
1910         case BONDING_MODE_BROADCAST:
1911                 for (i = 0; i < internals->slave_count; i++)
1912                         rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1913                 break;
1914         /* In mode4 promiscus mode is managed when slave is added/removed */
1915         case BONDING_MODE_8023AD:
1916                 break;
1917         /* Promiscuous mode is propagated only to primary slave */
1918         case BONDING_MODE_ACTIVE_BACKUP:
1919         case BONDING_MODE_TLB:
1920         case BONDING_MODE_ALB:
1921         default:
1922                 rte_eth_promiscuous_enable(internals->current_primary_port);
1923         }
1924 }
1925
1926 static void
1927 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1928 {
1929         struct bond_dev_private *internals = dev->data->dev_private;
1930         int i;
1931
1932         internals->promiscuous_en = 0;
1933
1934         switch (internals->mode) {
1935         /* Promiscuous mode is propagated to all slaves */
1936         case BONDING_MODE_ROUND_ROBIN:
1937         case BONDING_MODE_BALANCE:
1938         case BONDING_MODE_BROADCAST:
1939                 for (i = 0; i < internals->slave_count; i++)
1940                         rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1941                 break;
1942         /* In mode4 promiscus mode is set managed when slave is added/removed */
1943         case BONDING_MODE_8023AD:
1944                 break;
1945         /* Promiscuous mode is propagated only to primary slave */
1946         case BONDING_MODE_ACTIVE_BACKUP:
1947         case BONDING_MODE_TLB:
1948         case BONDING_MODE_ALB:
1949         default:
1950                 rte_eth_promiscuous_disable(internals->current_primary_port);
1951         }
1952 }
1953
1954 static void
1955 bond_ethdev_delayed_lsc_propagation(void *arg)
1956 {
1957         if (arg == NULL)
1958                 return;
1959
1960         _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1961                         RTE_ETH_EVENT_INTR_LSC, NULL);
1962 }
1963
1964 void
1965 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1966                 void *param)
1967 {
1968         struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1969         struct bond_dev_private *internals;
1970         struct rte_eth_link link;
1971
1972         int i, valid_slave = 0;
1973         uint8_t active_pos;
1974         uint8_t lsc_flag = 0;
1975
1976         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1977                 return;
1978
1979         bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1980         slave_eth_dev = &rte_eth_devices[port_id];
1981
1982         if (check_for_bonded_ethdev(bonded_eth_dev))
1983                 return;
1984
1985         internals = bonded_eth_dev->data->dev_private;
1986
1987         /* If the device isn't started don't handle interrupts */
1988         if (!bonded_eth_dev->data->dev_started)
1989                 return;
1990
1991         /* verify that port_id is a valid slave of bonded port */
1992         for (i = 0; i < internals->slave_count; i++) {
1993                 if (internals->slaves[i].port_id == port_id) {
1994                         valid_slave = 1;
1995                         break;
1996                 }
1997         }
1998
1999         if (!valid_slave)
2000                 return;
2001
2002         /* Search for port in active port list */
2003         active_pos = find_slave_by_id(internals->active_slaves,
2004                         internals->active_slave_count, port_id);
2005
2006         rte_eth_link_get_nowait(port_id, &link);
2007         if (link.link_status) {
2008                 if (active_pos < internals->active_slave_count)
2009                         return;
2010
2011                 /* if no active slave ports then set this port to be primary port */
2012                 if (internals->active_slave_count < 1) {
2013                         /* If first active slave, then change link status */
2014                         bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
2015                         internals->current_primary_port = port_id;
2016                         lsc_flag = 1;
2017
2018                         mac_address_slaves_update(bonded_eth_dev);
2019
2020                         /* Inherit eth dev link properties from first active slave */
2021                         link_properties_set(bonded_eth_dev,
2022                                         &(slave_eth_dev->data->dev_link));
2023                 } else {
2024                         if (link_properties_valid(
2025                                 &bonded_eth_dev->data->dev_link, &link) != 0) {
2026                                 slave_eth_dev->data->dev_flags &=
2027                                         (~RTE_ETH_DEV_BONDED_SLAVE);
2028                                 RTE_LOG(ERR, PMD,
2029                                         "port %u invalid speed/duplex\n",
2030                                         port_id);
2031                                 return;
2032                         }
2033                 }
2034
2035                 activate_slave(bonded_eth_dev, port_id);
2036
2037                 /* If user has defined the primary port then default to using it */
2038                 if (internals->user_defined_primary_port &&
2039                                 internals->primary_port == port_id)
2040                         bond_ethdev_primary_set(internals, port_id);
2041         } else {
2042                 if (active_pos == internals->active_slave_count)
2043                         return;
2044
2045                 /* Remove from active slave list */
2046                 deactivate_slave(bonded_eth_dev, port_id);
2047
2048                 /* No active slaves, change link status to down and reset other
2049                  * link properties */
2050                 if (internals->active_slave_count < 1) {
2051                         lsc_flag = 1;
2052                         bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2053
2054                         link_properties_reset(bonded_eth_dev);
2055                 }
2056
2057                 /* Update primary id, take first active slave from list or if none
2058                  * available set to -1 */
2059                 if (port_id == internals->current_primary_port) {
2060                         if (internals->active_slave_count > 0)
2061                                 bond_ethdev_primary_set(internals,
2062                                                 internals->active_slaves[0]);
2063                         else
2064                                 internals->current_primary_port = internals->primary_port;
2065                 }
2066         }
2067
2068         if (lsc_flag) {
2069                 /* Cancel any possible outstanding interrupts if delays are enabled */
2070                 if (internals->link_up_delay_ms > 0 ||
2071                         internals->link_down_delay_ms > 0)
2072                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2073                                         bonded_eth_dev);
2074
2075                 if (bonded_eth_dev->data->dev_link.link_status) {
2076                         if (internals->link_up_delay_ms > 0)
2077                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2078                                                 bond_ethdev_delayed_lsc_propagation,
2079                                                 (void *)bonded_eth_dev);
2080                         else
2081                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2082                                                 RTE_ETH_EVENT_INTR_LSC, NULL);
2083
2084                 } else {
2085                         if (internals->link_down_delay_ms > 0)
2086                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2087                                                 bond_ethdev_delayed_lsc_propagation,
2088                                                 (void *)bonded_eth_dev);
2089                         else
2090                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2091                                                 RTE_ETH_EVENT_INTR_LSC, NULL);
2092                 }
2093         }
2094 }
2095
2096 static int
2097 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2098                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2099 {
2100         unsigned i, j;
2101         int result = 0;
2102         int slave_reta_size;
2103         unsigned reta_count;
2104         struct bond_dev_private *internals = dev->data->dev_private;
2105
2106         if (reta_size != internals->reta_size)
2107                 return -EINVAL;
2108
2109          /* Copy RETA table */
2110         reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2111
2112         for (i = 0; i < reta_count; i++) {
2113                 internals->reta_conf[i].mask = reta_conf[i].mask;
2114                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2115                         if ((reta_conf[i].mask >> j) & 0x01)
2116                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2117         }
2118
2119         /* Fill rest of array */
2120         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2121                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2122                                 sizeof(internals->reta_conf[0]) * reta_count);
2123
2124         /* Propagate RETA over slaves */
2125         for (i = 0; i < internals->slave_count; i++) {
2126                 slave_reta_size = internals->slaves[i].reta_size;
2127                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2128                                 &internals->reta_conf[0], slave_reta_size);
2129                 if (result < 0)
2130                         return result;
2131         }
2132
2133         return 0;
2134 }
2135
2136 static int
2137 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2138                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2139 {
2140         int i, j;
2141         struct bond_dev_private *internals = dev->data->dev_private;
2142
2143         if (reta_size != internals->reta_size)
2144                 return -EINVAL;
2145
2146          /* Copy RETA table */
2147         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2148                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2149                         if ((reta_conf[i].mask >> j) & 0x01)
2150                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2151
2152         return 0;
2153 }
2154
2155 static int
2156 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2157                 struct rte_eth_rss_conf *rss_conf)
2158 {
2159         int i, result = 0;
2160         struct bond_dev_private *internals = dev->data->dev_private;
2161         struct rte_eth_rss_conf bond_rss_conf;
2162
2163         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2164
2165         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2166
2167         if (bond_rss_conf.rss_hf != 0)
2168                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2169
2170         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2171                         sizeof(internals->rss_key)) {
2172                 if (bond_rss_conf.rss_key_len == 0)
2173                         bond_rss_conf.rss_key_len = 40;
2174                 internals->rss_key_len = bond_rss_conf.rss_key_len;
2175                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2176                                 internals->rss_key_len);
2177         }
2178
2179         for (i = 0; i < internals->slave_count; i++) {
2180                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2181                                 &bond_rss_conf);
2182                 if (result < 0)
2183                         return result;
2184         }
2185
2186         return 0;
2187 }
2188
2189 static int
2190 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2191                 struct rte_eth_rss_conf *rss_conf)
2192 {
2193         struct bond_dev_private *internals = dev->data->dev_private;
2194
2195         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2196         rss_conf->rss_key_len = internals->rss_key_len;
2197         if (rss_conf->rss_key)
2198                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2199
2200         return 0;
2201 }
2202
2203 const struct eth_dev_ops default_dev_ops = {
2204         .dev_start            = bond_ethdev_start,
2205         .dev_stop             = bond_ethdev_stop,
2206         .dev_close            = bond_ethdev_close,
2207         .dev_configure        = bond_ethdev_configure,
2208         .dev_infos_get        = bond_ethdev_info,
2209         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
2210         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
2211         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
2212         .rx_queue_release     = bond_ethdev_rx_queue_release,
2213         .tx_queue_release     = bond_ethdev_tx_queue_release,
2214         .link_update          = bond_ethdev_link_update,
2215         .stats_get            = bond_ethdev_stats_get,
2216         .stats_reset          = bond_ethdev_stats_reset,
2217         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
2218         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
2219         .reta_update          = bond_ethdev_rss_reta_update,
2220         .reta_query           = bond_ethdev_rss_reta_query,
2221         .rss_hash_update      = bond_ethdev_rss_hash_update,
2222         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get
2223 };
2224
2225 static int
2226 bond_probe(const char *name, const char *params)
2227 {
2228         struct bond_dev_private *internals;
2229         struct rte_kvargs *kvlist;
2230         uint8_t bonding_mode, socket_id;
2231         int  arg_count, port_id;
2232
2233         RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2234
2235         kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
2236         if (kvlist == NULL)
2237                 return -1;
2238
2239         /* Parse link bonding mode */
2240         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2241                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2242                                 &bond_ethdev_parse_slave_mode_kvarg,
2243                                 &bonding_mode) != 0) {
2244                         RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2245                                         name);
2246                         goto parse_error;
2247                 }
2248         } else {
2249                 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2250                                 "device %s\n", name);
2251                 goto parse_error;
2252         }
2253
2254         /* Parse socket id to create bonding device on */
2255         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2256         if (arg_count == 1) {
2257                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2258                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2259                                 != 0) {
2260                         RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2261                                         "bonded device %s\n", name);
2262                         goto parse_error;
2263                 }
2264         } else if (arg_count > 1) {
2265                 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2266                                 "bonded device %s\n", name);
2267                 goto parse_error;
2268         } else {
2269                 socket_id = rte_socket_id();
2270         }
2271
2272         /* Create link bonding eth device */
2273         port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2274         if (port_id < 0) {
2275                 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2276                                 "socket %u.\n", name, bonding_mode, socket_id);
2277                 goto parse_error;
2278         }
2279         internals = rte_eth_devices[port_id].data->dev_private;
2280         internals->kvlist = kvlist;
2281
2282         RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2283                         "socket %u.\n", name, port_id, bonding_mode, socket_id);
2284         return 0;
2285
2286 parse_error:
2287         rte_kvargs_free(kvlist);
2288
2289         return -1;
2290 }
2291
2292 static int
2293 bond_remove(const char *name)
2294 {
2295         int  ret;
2296
2297         if (name == NULL)
2298                 return -EINVAL;
2299
2300         RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2301
2302         /* free link bonding eth device */
2303         ret = rte_eth_bond_free(name);
2304         if (ret < 0)
2305                 RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
2306
2307         return ret;
2308 }
2309
2310 /* this part will resolve the slave portids after all the other pdev and vdev
2311  * have been allocated */
2312 static int
2313 bond_ethdev_configure(struct rte_eth_dev *dev)
2314 {
2315         char *name = dev->data->name;
2316         struct bond_dev_private *internals = dev->data->dev_private;
2317         struct rte_kvargs *kvlist = internals->kvlist;
2318         int arg_count;
2319         uint8_t port_id = dev - rte_eth_devices;
2320
2321         static const uint8_t default_rss_key[40] = {
2322                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2323                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2324                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2325                 0xBE, 0xAC, 0x01, 0xFA
2326         };
2327
2328         unsigned i, j;
2329
2330         /* If RSS is enabled, fill table and key with default values */
2331         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2332                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2333                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2334                 memcpy(internals->rss_key, default_rss_key, 40);
2335
2336                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2337                         internals->reta_conf[i].mask = ~0LL;
2338                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2339                                 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2340                 }
2341         }
2342
2343         /* set the max_rx_pktlen */
2344         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
2345
2346         /*
2347          * if no kvlist, it means that this bonded device has been created
2348          * through the bonding api.
2349          */
2350         if (!kvlist)
2351                 return 0;
2352
2353         /* Parse MAC address for bonded device */
2354         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2355         if (arg_count == 1) {
2356                 struct ether_addr bond_mac;
2357
2358                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2359                                 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2360                         RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2361                                         name);
2362                         return -1;
2363                 }
2364
2365                 /* Set MAC address */
2366                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2367                         RTE_LOG(ERR, EAL,
2368                                         "Failed to set mac address on bonded device %s\n",
2369                                         name);
2370                         return -1;
2371                 }
2372         } else if (arg_count > 1) {
2373                 RTE_LOG(ERR, EAL,
2374                                 "MAC address can be specified only once for bonded device %s\n",
2375                                 name);
2376                 return -1;
2377         }
2378
2379         /* Parse/set balance mode transmit policy */
2380         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2381         if (arg_count == 1) {
2382                 uint8_t xmit_policy;
2383
2384                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2385                                 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2386                                                 0) {
2387                         RTE_LOG(INFO, EAL,
2388                                         "Invalid xmit policy specified for bonded device %s\n",
2389                                         name);
2390                         return -1;
2391                 }
2392
2393                 /* Set balance mode transmit policy*/
2394                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2395                         RTE_LOG(ERR, EAL,
2396                                         "Failed to set balance xmit policy on bonded device %s\n",
2397                                         name);
2398                         return -1;
2399                 }
2400         } else if (arg_count > 1) {
2401                 RTE_LOG(ERR, EAL,
2402                                 "Transmit policy can be specified only once for bonded device"
2403                                 " %s\n", name);
2404                 return -1;
2405         }
2406
2407         /* Parse/add slave ports to bonded device */
2408         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2409                 struct bond_ethdev_slave_ports slave_ports;
2410                 unsigned i;
2411
2412                 memset(&slave_ports, 0, sizeof(slave_ports));
2413
2414                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2415                                 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2416                         RTE_LOG(ERR, EAL,
2417                                         "Failed to parse slave ports for bonded device %s\n",
2418                                         name);
2419                         return -1;
2420                 }
2421
2422                 for (i = 0; i < slave_ports.slave_count; i++) {
2423                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2424                                 RTE_LOG(ERR, EAL,
2425                                                 "Failed to add port %d as slave to bonded device %s\n",
2426                                                 slave_ports.slaves[i], name);
2427                         }
2428                 }
2429
2430         } else {
2431                 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2432                 return -1;
2433         }
2434
2435         /* Parse/set primary slave port id*/
2436         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2437         if (arg_count == 1) {
2438                 uint8_t primary_slave_port_id;
2439
2440                 if (rte_kvargs_process(kvlist,
2441                                 PMD_BOND_PRIMARY_SLAVE_KVARG,
2442                                 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2443                                 &primary_slave_port_id) < 0) {
2444                         RTE_LOG(INFO, EAL,
2445                                         "Invalid primary slave port id specified for bonded device"
2446                                         " %s\n", name);
2447                         return -1;
2448                 }
2449
2450                 /* Set balance mode transmit policy*/
2451                 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2452                                 != 0) {
2453                         RTE_LOG(ERR, EAL,
2454                                         "Failed to set primary slave port %d on bonded device %s\n",
2455                                         primary_slave_port_id, name);
2456                         return -1;
2457                 }
2458         } else if (arg_count > 1) {
2459                 RTE_LOG(INFO, EAL,
2460                                 "Primary slave can be specified only once for bonded device"
2461                                 " %s\n", name);
2462                 return -1;
2463         }
2464
2465         /* Parse link status monitor polling interval */
2466         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2467         if (arg_count == 1) {
2468                 uint32_t lsc_poll_interval_ms;
2469
2470                 if (rte_kvargs_process(kvlist,
2471                                 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2472                                 &bond_ethdev_parse_time_ms_kvarg,
2473                                 &lsc_poll_interval_ms) < 0) {
2474                         RTE_LOG(INFO, EAL,
2475                                         "Invalid lsc polling interval value specified for bonded"
2476                                         " device %s\n", name);
2477                         return -1;
2478                 }
2479
2480                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2481                                 != 0) {
2482                         RTE_LOG(ERR, EAL,
2483                                         "Failed to set lsc monitor polling interval (%u ms) on"
2484                                         " bonded device %s\n", lsc_poll_interval_ms, name);
2485                         return -1;
2486                 }
2487         } else if (arg_count > 1) {
2488                 RTE_LOG(INFO, EAL,
2489                                 "LSC polling interval can be specified only once for bonded"
2490                                 " device %s\n", name);
2491                 return -1;
2492         }
2493
2494         /* Parse link up interrupt propagation delay */
2495         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2496         if (arg_count == 1) {
2497                 uint32_t link_up_delay_ms;
2498
2499                 if (rte_kvargs_process(kvlist,
2500                                 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2501                                 &bond_ethdev_parse_time_ms_kvarg,
2502                                 &link_up_delay_ms) < 0) {
2503                         RTE_LOG(INFO, EAL,
2504                                         "Invalid link up propagation delay value specified for"
2505                                         " bonded device %s\n", name);
2506                         return -1;
2507                 }
2508
2509                 /* Set balance mode transmit policy*/
2510                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2511                                 != 0) {
2512                         RTE_LOG(ERR, EAL,
2513                                         "Failed to set link up propagation delay (%u ms) on bonded"
2514                                         " device %s\n", link_up_delay_ms, name);
2515                         return -1;
2516                 }
2517         } else if (arg_count > 1) {
2518                 RTE_LOG(INFO, EAL,
2519                                 "Link up propagation delay can be specified only once for"
2520                                 " bonded device %s\n", name);
2521                 return -1;
2522         }
2523
2524         /* Parse link down interrupt propagation delay */
2525         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2526         if (arg_count == 1) {
2527                 uint32_t link_down_delay_ms;
2528
2529                 if (rte_kvargs_process(kvlist,
2530                                 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2531                                 &bond_ethdev_parse_time_ms_kvarg,
2532                                 &link_down_delay_ms) < 0) {
2533                         RTE_LOG(INFO, EAL,
2534                                         "Invalid link down propagation delay value specified for"
2535                                         " bonded device %s\n", name);
2536                         return -1;
2537                 }
2538
2539                 /* Set balance mode transmit policy*/
2540                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2541                                 != 0) {
2542                         RTE_LOG(ERR, EAL,
2543                                         "Failed to set link down propagation delay (%u ms) on"
2544                                         " bonded device %s\n", link_down_delay_ms, name);
2545                         return -1;
2546                 }
2547         } else if (arg_count > 1) {
2548                 RTE_LOG(INFO, EAL,
2549                                 "Link down propagation delay can be specified only once for"
2550                                 " bonded device %s\n", name);
2551                 return -1;
2552         }
2553
2554         return 0;
2555 }
2556
2557 static struct rte_vdev_driver bond_drv = {
2558         .probe = bond_probe,
2559         .remove = bond_remove,
2560 };
2561
2562 RTE_PMD_REGISTER_VDEV(net_bonding, bond_drv);
2563
2564 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
2565         "slave=<ifc> "
2566         "primary=<ifc> "
2567         "mode=[0-6] "
2568         "xmit_policy=[l2 | l23 | l34] "
2569         "socket_id=<int> "
2570         "mac=<mac addr> "
2571         "lsc_poll_period_ms=<int> "
2572         "up_delay=<int> "
2573         "down_delay=<int>");