bonding: use existing enslaved device queues
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <stdlib.h>
34 #include <netinet/in.h>
35
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
39 #include <rte_tcp.h>
40 #include <rte_udp.h>
41 #include <rte_ip.h>
42 #include <rte_ip_frag.h>
43 #include <rte_devargs.h>
44 #include <rte_kvargs.h>
45 #include <rte_dev.h>
46 #include <rte_alarm.h>
47 #include <rte_cycles.h>
48
49 #include "rte_eth_bond.h"
50 #include "rte_eth_bond_private.h"
51 #include "rte_eth_bond_8023ad_private.h"
52
53 #define REORDER_PERIOD_MS 10
54
55 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
56
57 /* Table for statistics in mode 5 TLB */
58 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
59
60 static inline size_t
61 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
62 {
63         size_t vlan_offset = 0;
64
65         if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
66                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
67
68                 vlan_offset = sizeof(struct vlan_hdr);
69                 *proto = vlan_hdr->eth_proto;
70
71                 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
72                         vlan_hdr = vlan_hdr + 1;
73                         *proto = vlan_hdr->eth_proto;
74                         vlan_offset += sizeof(struct vlan_hdr);
75                 }
76         }
77         return vlan_offset;
78 }
79
80 static uint16_t
81 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
82 {
83         struct bond_dev_private *internals;
84
85         uint16_t num_rx_slave = 0;
86         uint16_t num_rx_total = 0;
87
88         int i;
89
90         /* Cast to structure, containing bonded device's port id and queue id */
91         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
92
93         internals = bd_rx_q->dev_private;
94
95
96         for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
97                 /* Offset of pointer to *bufs increases as packets are received
98                  * from other slaves */
99                 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
100                                 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
101                 if (num_rx_slave) {
102                         num_rx_total += num_rx_slave;
103                         nb_pkts -= num_rx_slave;
104                 }
105         }
106
107         return num_rx_total;
108 }
109
110 static uint16_t
111 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
112                 uint16_t nb_pkts)
113 {
114         struct bond_dev_private *internals;
115
116         /* Cast to structure, containing bonded device's port id and queue id */
117         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
118
119         internals = bd_rx_q->dev_private;
120
121         return rte_eth_rx_burst(internals->current_primary_port,
122                         bd_rx_q->queue_id, bufs, nb_pkts);
123 }
124
125 static uint16_t
126 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
127                 uint16_t nb_pkts)
128 {
129         /* Cast to structure, containing bonded device's port id and queue id */
130         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
131         struct bond_dev_private *internals = bd_rx_q->dev_private;
132         struct ether_addr bond_mac;
133
134         struct ether_hdr *hdr;
135
136         const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
137         uint16_t num_rx_total = 0;      /* Total number of received packets */
138         uint8_t slaves[RTE_MAX_ETHPORTS];
139         uint8_t slave_count;
140
141         uint8_t collecting;  /* current slave collecting status */
142         const uint8_t promisc = internals->promiscuous_en;
143         uint8_t i, j, k;
144
145         rte_eth_macaddr_get(internals->port_id, &bond_mac);
146         /* Copy slave list to protect against slave up/down changes during tx
147          * bursting */
148         slave_count = internals->active_slave_count;
149         memcpy(slaves, internals->active_slaves,
150                         sizeof(internals->active_slaves[0]) * slave_count);
151
152         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
153                 j = num_rx_total;
154                 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
155
156                 /* Read packets from this slave */
157                 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
158                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
159
160                 for (k = j; k < 2 && k < num_rx_total; k++)
161                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
162
163                 /* Handle slow protocol packets. */
164                 while (j < num_rx_total) {
165                         if (j + 3 < num_rx_total)
166                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
167
168                         hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
169                         /* Remove packet from array if it is slow packet or slave is not
170                          * in collecting state or bondign interface is not in promiscus
171                          * mode and packet address does not match. */
172                         if (unlikely(hdr->ether_type == ether_type_slow_be ||
173                                 !collecting || (!promisc &&
174                                         !is_multicast_ether_addr(&hdr->d_addr) &&
175                                         !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
176
177                                 if (hdr->ether_type == ether_type_slow_be) {
178                                         bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
179                                                 bufs[j]);
180                                 } else
181                                         rte_pktmbuf_free(bufs[j]);
182
183                                 /* Packet is managed by mode 4 or dropped, shift the array */
184                                 num_rx_total--;
185                                 if (j < num_rx_total) {
186                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
187                                                 (num_rx_total - j));
188                                 }
189                         } else
190                                 j++;
191                 }
192         }
193
194         return num_rx_total;
195 }
196
197 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
198 uint32_t burstnumberRX;
199 uint32_t burstnumberTX;
200
201 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
202
203 static void
204 arp_op_name(uint16_t arp_op, char *buf)
205 {
206         switch (arp_op) {
207         case ARP_OP_REQUEST:
208                 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
209                 return;
210         case ARP_OP_REPLY:
211                 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
212                 return;
213         case ARP_OP_REVREQUEST:
214                 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
215                                 "Reverse ARP Request");
216                 return;
217         case ARP_OP_REVREPLY:
218                 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
219                                 "Reverse ARP Reply");
220                 return;
221         case ARP_OP_INVREQUEST:
222                 snprintf(buf, sizeof("Peer Identify Request"), "%s",
223                                 "Peer Identify Request");
224                 return;
225         case ARP_OP_INVREPLY:
226                 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
227                                 "Peer Identify Reply");
228                 return;
229         default:
230                 break;
231         }
232         snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
233         return;
234 }
235 #endif
236 #define MaxIPv4String   16
237 static void
238 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
239 {
240         uint32_t ipv4_addr;
241
242         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
243         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
244                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
245                 ipv4_addr & 0xFF);
246 }
247
248 #define MAX_CLIENTS_NUMBER      128
249 uint8_t active_clients;
250 struct client_stats_t {
251         uint8_t port;
252         uint32_t ipv4_addr;
253         uint32_t ipv4_rx_packets;
254         uint32_t ipv4_tx_packets;
255 };
256 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
257
258 static void
259 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
260 {
261         int i = 0;
262
263         for (; i < MAX_CLIENTS_NUMBER; i++)     {
264                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
265                         /* Just update RX packets number for this client */
266                         if (TXorRXindicator == &burstnumberRX)
267                                 client_stats[i].ipv4_rx_packets++;
268                         else
269                                 client_stats[i].ipv4_tx_packets++;
270                         return;
271                 }
272         }
273         /* We have a new client. Insert him to the table, and increment stats */
274         if (TXorRXindicator == &burstnumberRX)
275                 client_stats[active_clients].ipv4_rx_packets++;
276         else
277                 client_stats[active_clients].ipv4_tx_packets++;
278         client_stats[active_clients].ipv4_addr = addr;
279         client_stats[active_clients].port = port;
280         active_clients++;
281
282 }
283
284 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
285 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber)     \
286                 RTE_LOG(DEBUG, PMD, \
287                 "%s " \
288                 "port:%d " \
289                 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
290                 "SrcIP:%s " \
291                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
292                 "DstIP:%s " \
293                 "%s " \
294                 "%d\n", \
295                 info, \
296                 port, \
297                 eth_h->s_addr.addr_bytes[0], \
298                 eth_h->s_addr.addr_bytes[1], \
299                 eth_h->s_addr.addr_bytes[2], \
300                 eth_h->s_addr.addr_bytes[3], \
301                 eth_h->s_addr.addr_bytes[4], \
302                 eth_h->s_addr.addr_bytes[5], \
303                 src_ip, \
304                 eth_h->d_addr.addr_bytes[0], \
305                 eth_h->d_addr.addr_bytes[1], \
306                 eth_h->d_addr.addr_bytes[2], \
307                 eth_h->d_addr.addr_bytes[3], \
308                 eth_h->d_addr.addr_bytes[4], \
309                 eth_h->d_addr.addr_bytes[5], \
310                 dst_ip, \
311                 arp_op, \
312                 ++burstnumber)
313 #endif
314
315 static void
316 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
317                 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
318 {
319         struct ipv4_hdr *ipv4_h;
320 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
321         struct arp_hdr *arp_h;
322         char dst_ip[16];
323         char ArpOp[24];
324         char buf[16];
325 #endif
326         char src_ip[16];
327
328         uint16_t ether_type = eth_h->ether_type;
329         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
330
331 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
332         snprintf(buf, 16, "%s", info);
333 #endif
334
335         if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
336                 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
337                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
338 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
339                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
340                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
341 #endif
342                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
343         }
344 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
345         else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
346                 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
347                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
348                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
349                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
350                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
351         }
352 #endif
353 }
354 #endif
355
356 static uint16_t
357 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
358 {
359         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
360         struct bond_dev_private *internals = bd_tx_q->dev_private;
361         struct ether_hdr *eth_h;
362         uint16_t ether_type, offset;
363         uint16_t nb_recv_pkts;
364         int i;
365
366         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
367
368         for (i = 0; i < nb_recv_pkts; i++) {
369                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
370                 ether_type = eth_h->ether_type;
371                 offset = get_vlan_offset(eth_h, &ether_type);
372
373                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
374 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
375                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
376 #endif
377                         bond_mode_alb_arp_recv(eth_h, offset, internals);
378                 }
379 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
380                 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
381                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
382 #endif
383         }
384
385         return nb_recv_pkts;
386 }
387
388 static uint16_t
389 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
390                 uint16_t nb_pkts)
391 {
392         struct bond_dev_private *internals;
393         struct bond_tx_queue *bd_tx_q;
394
395         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
396         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
397
398         uint8_t num_of_slaves;
399         uint8_t slaves[RTE_MAX_ETHPORTS];
400
401         uint16_t num_tx_total = 0, num_tx_slave;
402
403         static int slave_idx = 0;
404         int i, cslave_idx = 0, tx_fail_total = 0;
405
406         bd_tx_q = (struct bond_tx_queue *)queue;
407         internals = bd_tx_q->dev_private;
408
409         /* Copy slave list to protect against slave up/down changes during tx
410          * bursting */
411         num_of_slaves = internals->active_slave_count;
412         memcpy(slaves, internals->active_slaves,
413                         sizeof(internals->active_slaves[0]) * num_of_slaves);
414
415         if (num_of_slaves < 1)
416                 return num_tx_total;
417
418         /* Populate slaves mbuf with which packets are to be sent on it  */
419         for (i = 0; i < nb_pkts; i++) {
420                 cslave_idx = (slave_idx + i) % num_of_slaves;
421                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
422         }
423
424         /* increment current slave index so the next call to tx burst starts on the
425          * next slave */
426         slave_idx = ++cslave_idx;
427
428         /* Send packet burst on each slave device */
429         for (i = 0; i < num_of_slaves; i++) {
430                 if (slave_nb_pkts[i] > 0) {
431                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
432                                         slave_bufs[i], slave_nb_pkts[i]);
433
434                         /* if tx burst fails move packets to end of bufs */
435                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
436                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
437
438                                 tx_fail_total += tx_fail_slave;
439
440                                 memcpy(&bufs[nb_pkts - tx_fail_total],
441                                                 &slave_bufs[i][num_tx_slave],
442                                                 tx_fail_slave * sizeof(bufs[0]));
443                         }
444                         num_tx_total += num_tx_slave;
445                 }
446         }
447
448         return num_tx_total;
449 }
450
451 static uint16_t
452 bond_ethdev_tx_burst_active_backup(void *queue,
453                 struct rte_mbuf **bufs, uint16_t nb_pkts)
454 {
455         struct bond_dev_private *internals;
456         struct bond_tx_queue *bd_tx_q;
457
458         bd_tx_q = (struct bond_tx_queue *)queue;
459         internals = bd_tx_q->dev_private;
460
461         if (internals->active_slave_count < 1)
462                 return 0;
463
464         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
465                         bufs, nb_pkts);
466 }
467
468 static inline uint16_t
469 ether_hash(struct ether_hdr *eth_hdr)
470 {
471         unaligned_uint16_t *word_src_addr =
472                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
473         unaligned_uint16_t *word_dst_addr =
474                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
475
476         return (word_src_addr[0] ^ word_dst_addr[0]) ^
477                         (word_src_addr[1] ^ word_dst_addr[1]) ^
478                         (word_src_addr[2] ^ word_dst_addr[2]);
479 }
480
481 static inline uint32_t
482 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
483 {
484         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
485 }
486
487 static inline uint32_t
488 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
489 {
490         unaligned_uint32_t *word_src_addr =
491                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
492         unaligned_uint32_t *word_dst_addr =
493                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
494
495         return (word_src_addr[0] ^ word_dst_addr[0]) ^
496                         (word_src_addr[1] ^ word_dst_addr[1]) ^
497                         (word_src_addr[2] ^ word_dst_addr[2]) ^
498                         (word_src_addr[3] ^ word_dst_addr[3]);
499 }
500
501 uint16_t
502 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
503 {
504         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
505
506         uint32_t hash = ether_hash(eth_hdr);
507
508         return (hash ^= hash >> 8) % slave_count;
509 }
510
511 uint16_t
512 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
513 {
514         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
515         uint16_t proto = eth_hdr->ether_type;
516         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
517         uint32_t hash, l3hash = 0;
518
519         hash = ether_hash(eth_hdr);
520
521         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
522                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
523                                 ((char *)(eth_hdr + 1) + vlan_offset);
524                 l3hash = ipv4_hash(ipv4_hdr);
525
526         } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
527                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
528                                 ((char *)(eth_hdr + 1) + vlan_offset);
529                 l3hash = ipv6_hash(ipv6_hdr);
530         }
531
532         hash = hash ^ l3hash;
533         hash ^= hash >> 16;
534         hash ^= hash >> 8;
535
536         return hash % slave_count;
537 }
538
539 uint16_t
540 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
541 {
542         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
543         uint16_t proto = eth_hdr->ether_type;
544         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
545
546         struct udp_hdr *udp_hdr = NULL;
547         struct tcp_hdr *tcp_hdr = NULL;
548         uint32_t hash, l3hash = 0, l4hash = 0;
549
550         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
551                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
552                                 ((char *)(eth_hdr + 1) + vlan_offset);
553                 size_t ip_hdr_offset;
554
555                 l3hash = ipv4_hash(ipv4_hdr);
556
557                 /* there is no L4 header in fragmented packet */
558                 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
559                         ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
560                                         IPV4_IHL_MULTIPLIER;
561
562                         if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
563                                 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
564                                                 ip_hdr_offset);
565                                 l4hash = HASH_L4_PORTS(tcp_hdr);
566                         } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
567                                 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
568                                                 ip_hdr_offset);
569                                 l4hash = HASH_L4_PORTS(udp_hdr);
570                         }
571                 }
572         } else if  (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
573                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
574                                 ((char *)(eth_hdr + 1) + vlan_offset);
575                 l3hash = ipv6_hash(ipv6_hdr);
576
577                 if (ipv6_hdr->proto == IPPROTO_TCP) {
578                         tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
579                         l4hash = HASH_L4_PORTS(tcp_hdr);
580                 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
581                         udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
582                         l4hash = HASH_L4_PORTS(udp_hdr);
583                 }
584         }
585
586         hash = l3hash ^ l4hash;
587         hash ^= hash >> 16;
588         hash ^= hash >> 8;
589
590         return hash % slave_count;
591 }
592
593 struct bwg_slave {
594         uint64_t bwg_left_int;
595         uint64_t bwg_left_remainder;
596         uint8_t slave;
597 };
598
599 void
600 bond_tlb_activate_slave(struct bond_dev_private *internals) {
601         int i;
602
603         for (i = 0; i < internals->active_slave_count; i++) {
604                 tlb_last_obytets[internals->active_slaves[i]] = 0;
605         }
606 }
607
608 static int
609 bandwidth_cmp(const void *a, const void *b)
610 {
611         const struct bwg_slave *bwg_a = a;
612         const struct bwg_slave *bwg_b = b;
613         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
614         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
615                         (int64_t)bwg_a->bwg_left_remainder;
616         if (diff > 0)
617                 return 1;
618         else if (diff < 0)
619                 return -1;
620         else if (diff2 > 0)
621                 return 1;
622         else if (diff2 < 0)
623                 return -1;
624         else
625                 return 0;
626 }
627
628 static void
629 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
630                 struct bwg_slave *bwg_slave)
631 {
632         struct rte_eth_link link_status;
633
634         rte_eth_link_get(port_id, &link_status);
635         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
636         if (link_bwg == 0)
637                 return;
638         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
639         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
640         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
641 }
642
643 static void
644 bond_ethdev_update_tlb_slave_cb(void *arg)
645 {
646         struct bond_dev_private *internals = arg;
647         struct rte_eth_stats slave_stats;
648         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
649         uint8_t slave_count;
650         uint64_t tx_bytes;
651
652         uint8_t update_stats = 0;
653         uint8_t i, slave_id;
654
655         internals->slave_update_idx++;
656
657
658         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
659                 update_stats = 1;
660
661         for (i = 0; i < internals->active_slave_count; i++) {
662                 slave_id = internals->active_slaves[i];
663                 rte_eth_stats_get(slave_id, &slave_stats);
664                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
665                 bandwidth_left(slave_id, tx_bytes,
666                                 internals->slave_update_idx, &bwg_array[i]);
667                 bwg_array[i].slave = slave_id;
668
669                 if (update_stats) {
670                         tlb_last_obytets[slave_id] = slave_stats.obytes;
671                 }
672         }
673
674         if (update_stats == 1)
675                 internals->slave_update_idx = 0;
676
677         slave_count = i;
678         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
679         for (i = 0; i < slave_count; i++)
680                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
681
682         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
683                         (struct bond_dev_private *)internals);
684 }
685
686 static uint16_t
687 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
688 {
689         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
690         struct bond_dev_private *internals = bd_tx_q->dev_private;
691
692         struct rte_eth_dev *primary_port =
693                         &rte_eth_devices[internals->primary_port];
694         uint16_t num_tx_total = 0;
695         uint8_t i, j;
696
697         uint8_t num_of_slaves = internals->active_slave_count;
698         uint8_t slaves[RTE_MAX_ETHPORTS];
699
700         struct ether_hdr *ether_hdr;
701         struct ether_addr primary_slave_addr;
702         struct ether_addr active_slave_addr;
703
704         if (num_of_slaves < 1)
705                 return num_tx_total;
706
707         memcpy(slaves, internals->tlb_slaves_order,
708                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
709
710
711         ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
712
713         if (nb_pkts > 3) {
714                 for (i = 0; i < 3; i++)
715                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
716         }
717
718         for (i = 0; i < num_of_slaves; i++) {
719                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
720                 for (j = num_tx_total; j < nb_pkts; j++) {
721                         if (j + 3 < nb_pkts)
722                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
723
724                         ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
725                         if (is_same_ether_addr(&ether_hdr->s_addr, &primary_slave_addr))
726                                 ether_addr_copy(&active_slave_addr, &ether_hdr->s_addr);
727 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
728                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
729 #endif
730                 }
731
732                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
733                                 bufs + num_tx_total, nb_pkts - num_tx_total);
734
735                 if (num_tx_total == nb_pkts)
736                         break;
737         }
738
739         return num_tx_total;
740 }
741
742 void
743 bond_tlb_disable(struct bond_dev_private *internals)
744 {
745         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
746 }
747
748 void
749 bond_tlb_enable(struct bond_dev_private *internals)
750 {
751         bond_ethdev_update_tlb_slave_cb(internals);
752 }
753
754 static uint16_t
755 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
756 {
757         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
758         struct bond_dev_private *internals = bd_tx_q->dev_private;
759
760         struct ether_hdr *eth_h;
761         uint16_t ether_type, offset;
762
763         struct client_data *client_info;
764
765         /*
766          * We create transmit buffers for every slave and one additional to send
767          * through tlb. In worst case every packet will be send on one port.
768          */
769         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
770         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
771
772         /*
773          * We create separate transmit buffers for update packets as they wont be
774          * counted in num_tx_total.
775          */
776         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
777         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
778
779         struct rte_mbuf *upd_pkt;
780         size_t pkt_size;
781
782         uint16_t num_send, num_not_send = 0;
783         uint16_t num_tx_total = 0;
784         uint8_t slave_idx;
785
786         int i, j;
787
788         /* Search tx buffer for ARP packets and forward them to alb */
789         for (i = 0; i < nb_pkts; i++) {
790                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
791                 ether_type = eth_h->ether_type;
792                 offset = get_vlan_offset(eth_h, &ether_type);
793
794                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
795                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
796
797                         /* Change src mac in eth header */
798                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
799
800                         /* Add packet to slave tx buffer */
801                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
802                         slave_bufs_pkts[slave_idx]++;
803                 } else {
804                         /* If packet is not ARP, send it with TLB policy */
805                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
806                                         bufs[i];
807                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
808                 }
809         }
810
811         /* Update connected client ARP tables */
812         if (internals->mode6.ntt) {
813                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
814                         client_info = &internals->mode6.client_table[i];
815
816                         if (client_info->in_use) {
817                                 /* Allocate new packet to send ARP update on current slave */
818                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
819                                 if (upd_pkt == NULL) {
820                                         RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
821                                         continue;
822                                 }
823                                 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
824                                                 + client_info->vlan_count * sizeof(struct vlan_hdr);
825                                 upd_pkt->data_len = pkt_size;
826                                 upd_pkt->pkt_len = pkt_size;
827
828                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
829                                                 internals);
830
831                                 /* Add packet to update tx buffer */
832                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
833                                 update_bufs_pkts[slave_idx]++;
834                         }
835                 }
836                 internals->mode6.ntt = 0;
837         }
838
839         /* Send ARP packets on proper slaves */
840         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
841                 if (slave_bufs_pkts[i] > 0) {
842                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
843                                         slave_bufs[i], slave_bufs_pkts[i]);
844                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
845                                 bufs[nb_pkts - 1 - num_not_send - j] =
846                                                 slave_bufs[i][nb_pkts - 1 - j];
847                         }
848
849                         num_tx_total += num_send;
850                         num_not_send += slave_bufs_pkts[i] - num_send;
851
852 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
853         /* Print TX stats including update packets */
854                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
855                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
856                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
857                         }
858 #endif
859                 }
860         }
861
862         /* Send update packets on proper slaves */
863         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
864                 if (update_bufs_pkts[i] > 0) {
865                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
866                                         update_bufs_pkts[i]);
867                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
868                                 rte_pktmbuf_free(update_bufs[i][j]);
869                         }
870 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
871                         for (j = 0; j < update_bufs_pkts[i]; j++) {
872                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
873                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
874                         }
875 #endif
876                 }
877         }
878
879         /* Send non-ARP packets using tlb policy */
880         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
881                 num_send = bond_ethdev_tx_burst_tlb(queue,
882                                 slave_bufs[RTE_MAX_ETHPORTS],
883                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
884
885                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
886                         bufs[nb_pkts - 1 - num_not_send - j] =
887                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
888                 }
889
890                 num_tx_total += num_send;
891                 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
892         }
893
894         return num_tx_total;
895 }
896
897 static uint16_t
898 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
899                 uint16_t nb_pkts)
900 {
901         struct bond_dev_private *internals;
902         struct bond_tx_queue *bd_tx_q;
903
904         uint8_t num_of_slaves;
905         uint8_t slaves[RTE_MAX_ETHPORTS];
906
907         uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
908
909         int i, op_slave_id;
910
911         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
912         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
913
914         bd_tx_q = (struct bond_tx_queue *)queue;
915         internals = bd_tx_q->dev_private;
916
917         /* Copy slave list to protect against slave up/down changes during tx
918          * bursting */
919         num_of_slaves = internals->active_slave_count;
920         memcpy(slaves, internals->active_slaves,
921                         sizeof(internals->active_slaves[0]) * num_of_slaves);
922
923         if (num_of_slaves < 1)
924                 return num_tx_total;
925
926         /* Populate slaves mbuf with the packets which are to be sent on it  */
927         for (i = 0; i < nb_pkts; i++) {
928                 /* Select output slave using hash based on xmit policy */
929                 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
930
931                 /* Populate slave mbuf arrays with mbufs for that slave */
932                 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
933         }
934
935         /* Send packet burst on each slave device */
936         for (i = 0; i < num_of_slaves; i++) {
937                 if (slave_nb_pkts[i] > 0) {
938                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
939                                         slave_bufs[i], slave_nb_pkts[i]);
940
941                         /* if tx burst fails move packets to end of bufs */
942                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
943                                 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
944
945                                 tx_fail_total += slave_tx_fail_count;
946                                 memcpy(&bufs[nb_pkts - tx_fail_total],
947                                                 &slave_bufs[i][num_tx_slave],
948                                                 slave_tx_fail_count * sizeof(bufs[0]));
949                         }
950
951                         num_tx_total += num_tx_slave;
952                 }
953         }
954
955         return num_tx_total;
956 }
957
958 static uint16_t
959 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
960                 uint16_t nb_pkts)
961 {
962         struct bond_dev_private *internals;
963         struct bond_tx_queue *bd_tx_q;
964
965         uint8_t num_of_slaves;
966         uint8_t slaves[RTE_MAX_ETHPORTS];
967          /* positions in slaves, not ID */
968         uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
969         uint8_t distributing_count;
970
971         uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
972         uint16_t i, j, op_slave_idx;
973         const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
974
975         /* Allocate additional packets in case 8023AD mode. */
976         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
977         void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
978
979         /* Total amount of packets in slave_bufs */
980         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
981         /* Slow packets placed in each slave */
982         uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
983
984         bd_tx_q = (struct bond_tx_queue *)queue;
985         internals = bd_tx_q->dev_private;
986
987         /* Copy slave list to protect against slave up/down changes during tx
988          * bursting */
989         num_of_slaves = internals->active_slave_count;
990         if (num_of_slaves < 1)
991                 return num_tx_total;
992
993         memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
994
995         distributing_count = 0;
996         for (i = 0; i < num_of_slaves; i++) {
997                 struct port *port = &mode_8023ad_ports[slaves[i]];
998
999                 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1000                                 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
1001                 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1002
1003                 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1004                         slave_bufs[i][j] = slow_pkts[j];
1005
1006                 if (ACTOR_STATE(port, DISTRIBUTING))
1007                         distributing_offsets[distributing_count++] = i;
1008         }
1009
1010         if (likely(distributing_count > 0)) {
1011                 /* Populate slaves mbuf with the packets which are to be sent on it */
1012                 for (i = 0; i < nb_pkts; i++) {
1013                         /* Select output slave using hash based on xmit policy */
1014                         op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1015
1016                         /* Populate slave mbuf arrays with mbufs for that slave. Use only
1017                          * slaves that are currently distributing. */
1018                         uint8_t slave_offset = distributing_offsets[op_slave_idx];
1019                         slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1020                         slave_nb_pkts[slave_offset]++;
1021                 }
1022         }
1023
1024         /* Send packet burst on each slave device */
1025         for (i = 0; i < num_of_slaves; i++) {
1026                 if (slave_nb_pkts[i] == 0)
1027                         continue;
1028
1029                 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1030                                 slave_bufs[i], slave_nb_pkts[i]);
1031
1032                 /* If tx burst fails drop slow packets */
1033                 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1034                         rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1035
1036                 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1037                 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1038
1039                 /* If tx burst fails move packets to end of bufs */
1040                 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1041                         uint16_t j = nb_pkts - num_tx_fail_total;
1042                         for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1043                                 bufs[j] = slave_bufs[i][num_tx_slave];
1044                 }
1045         }
1046
1047         return num_tx_total;
1048 }
1049
1050 static uint16_t
1051 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1052                 uint16_t nb_pkts)
1053 {
1054         struct bond_dev_private *internals;
1055         struct bond_tx_queue *bd_tx_q;
1056
1057         uint8_t tx_failed_flag = 0, num_of_slaves;
1058         uint8_t slaves[RTE_MAX_ETHPORTS];
1059
1060         uint16_t max_nb_of_tx_pkts = 0;
1061
1062         int slave_tx_total[RTE_MAX_ETHPORTS];
1063         int i, most_successful_tx_slave = -1;
1064
1065         bd_tx_q = (struct bond_tx_queue *)queue;
1066         internals = bd_tx_q->dev_private;
1067
1068         /* Copy slave list to protect against slave up/down changes during tx
1069          * bursting */
1070         num_of_slaves = internals->active_slave_count;
1071         memcpy(slaves, internals->active_slaves,
1072                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1073
1074         if (num_of_slaves < 1)
1075                 return 0;
1076
1077         /* Increment reference count on mbufs */
1078         for (i = 0; i < nb_pkts; i++)
1079                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1080
1081         /* Transmit burst on each active slave */
1082         for (i = 0; i < num_of_slaves; i++) {
1083                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1084                                         bufs, nb_pkts);
1085
1086                 if (unlikely(slave_tx_total[i] < nb_pkts))
1087                         tx_failed_flag = 1;
1088
1089                 /* record the value and slave index for the slave which transmits the
1090                  * maximum number of packets */
1091                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1092                         max_nb_of_tx_pkts = slave_tx_total[i];
1093                         most_successful_tx_slave = i;
1094                 }
1095         }
1096
1097         /* if slaves fail to transmit packets from burst, the calling application
1098          * is not expected to know about multiple references to packets so we must
1099          * handle failures of all packets except those of the most successful slave
1100          */
1101         if (unlikely(tx_failed_flag))
1102                 for (i = 0; i < num_of_slaves; i++)
1103                         if (i != most_successful_tx_slave)
1104                                 while (slave_tx_total[i] < nb_pkts)
1105                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1106
1107         return max_nb_of_tx_pkts;
1108 }
1109
1110 void
1111 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1112                 struct rte_eth_link *slave_dev_link)
1113 {
1114         struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1115         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1116
1117         if (slave_dev_link->link_status &&
1118                 bonded_eth_dev->data->dev_started) {
1119                 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1120                 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1121
1122                 internals->link_props_set = 1;
1123         }
1124 }
1125
1126 void
1127 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1128 {
1129         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1130
1131         memset(&(bonded_eth_dev->data->dev_link), 0,
1132                         sizeof(bonded_eth_dev->data->dev_link));
1133
1134         internals->link_props_set = 0;
1135 }
1136
1137 int
1138 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1139                 struct rte_eth_link *slave_dev_link)
1140 {
1141         if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1142                 bonded_dev_link->link_speed !=  slave_dev_link->link_speed)
1143                 return -1;
1144
1145         return 0;
1146 }
1147
1148 int
1149 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1150 {
1151         struct ether_addr *mac_addr;
1152
1153         if (eth_dev == NULL) {
1154                 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1155                 return -1;
1156         }
1157
1158         if (dst_mac_addr == NULL) {
1159                 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1160                 return -1;
1161         }
1162
1163         mac_addr = eth_dev->data->mac_addrs;
1164
1165         ether_addr_copy(mac_addr, dst_mac_addr);
1166         return 0;
1167 }
1168
1169 int
1170 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1171 {
1172         struct ether_addr *mac_addr;
1173
1174         if (eth_dev == NULL) {
1175                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1176                 return -1;
1177         }
1178
1179         if (new_mac_addr == NULL) {
1180                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1181                 return -1;
1182         }
1183
1184         mac_addr = eth_dev->data->mac_addrs;
1185
1186         /* If new MAC is different to current MAC then update */
1187         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1188                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1189
1190         return 0;
1191 }
1192
1193 int
1194 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1195 {
1196         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1197         int i;
1198
1199         /* Update slave devices MAC addresses */
1200         if (internals->slave_count < 1)
1201                 return -1;
1202
1203         switch (internals->mode) {
1204         case BONDING_MODE_ROUND_ROBIN:
1205         case BONDING_MODE_BALANCE:
1206         case BONDING_MODE_BROADCAST:
1207                 for (i = 0; i < internals->slave_count; i++) {
1208                         if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1209                                         bonded_eth_dev->data->mac_addrs)) {
1210                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1211                                                 internals->slaves[i].port_id);
1212                                 return -1;
1213                         }
1214                 }
1215                 break;
1216         case BONDING_MODE_8023AD:
1217                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1218                 break;
1219         case BONDING_MODE_ACTIVE_BACKUP:
1220         case BONDING_MODE_TLB:
1221         case BONDING_MODE_ALB:
1222         default:
1223                 for (i = 0; i < internals->slave_count; i++) {
1224                         if (internals->slaves[i].port_id ==
1225                                         internals->current_primary_port) {
1226                                 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1227                                                 bonded_eth_dev->data->mac_addrs)) {
1228                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1229                                                         internals->current_primary_port);
1230                                         return -1;
1231                                 }
1232                         } else {
1233                                 if (mac_address_set(
1234                                                 &rte_eth_devices[internals->slaves[i].port_id],
1235                                                 &internals->slaves[i].persisted_mac_addr)) {
1236                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1237                                                         internals->slaves[i].port_id);
1238                                         return -1;
1239                                 }
1240                         }
1241                 }
1242         }
1243
1244         return 0;
1245 }
1246
1247 int
1248 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1249 {
1250         struct bond_dev_private *internals;
1251
1252         internals = eth_dev->data->dev_private;
1253
1254         switch (mode) {
1255         case BONDING_MODE_ROUND_ROBIN:
1256                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1257                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1258                 break;
1259         case BONDING_MODE_ACTIVE_BACKUP:
1260                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1261                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1262                 break;
1263         case BONDING_MODE_BALANCE:
1264                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1265                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1266                 break;
1267         case BONDING_MODE_BROADCAST:
1268                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1269                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1270                 break;
1271         case BONDING_MODE_8023AD:
1272                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1273                         return -1;
1274
1275                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1276                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1277                 RTE_LOG(WARNING, PMD,
1278                                 "Using mode 4, it is necessary to do TX burst and RX burst "
1279                                 "at least every 100ms.\n");
1280                 break;
1281         case BONDING_MODE_TLB:
1282                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1283                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1284                 break;
1285         case BONDING_MODE_ALB:
1286                 if (bond_mode_alb_enable(eth_dev) != 0)
1287                         return -1;
1288
1289                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1290                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1291                 break;
1292         default:
1293                 return -1;
1294         }
1295
1296         internals->mode = mode;
1297
1298         return 0;
1299 }
1300
1301 int
1302 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1303                 struct rte_eth_dev *slave_eth_dev)
1304 {
1305         struct bond_rx_queue *bd_rx_q;
1306         struct bond_tx_queue *bd_tx_q;
1307
1308         uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
1309         uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
1310         int errval;
1311         uint16_t q_id;
1312
1313         /* Stop slave */
1314         rte_eth_dev_stop(slave_eth_dev->data->port_id);
1315
1316         /* Enable interrupts on slave device if supported */
1317         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1318                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1319
1320         /* If RSS is enabled for bonding, try to enable it for slaves  */
1321         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1322                 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1323                                 != 0) {
1324                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1325                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1326                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1327                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1328                 } else {
1329                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1330                 }
1331
1332                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1333                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1334                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1335                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1336         }
1337
1338         /* Configure device */
1339         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1340                         bonded_eth_dev->data->nb_rx_queues,
1341                         bonded_eth_dev->data->nb_tx_queues,
1342                         &(slave_eth_dev->data->dev_conf));
1343         if (errval != 0) {
1344                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1345                                 slave_eth_dev->data->port_id, errval);
1346                 return errval;
1347         }
1348
1349         /* Setup Rx Queues */
1350         /* Use existing queues, if any */
1351         for (q_id = old_nb_rx_queues;
1352              q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1353                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1354
1355                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1356                                 bd_rx_q->nb_rx_desc,
1357                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1358                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1359                 if (errval != 0) {
1360                         RTE_BOND_LOG(ERR,
1361                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1362                                         slave_eth_dev->data->port_id, q_id, errval);
1363                         return errval;
1364                 }
1365         }
1366
1367         /* Setup Tx Queues */
1368         /* Use existing queues, if any */
1369         for (q_id = old_nb_tx_queues;
1370              q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1371                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1372
1373                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1374                                 bd_tx_q->nb_tx_desc,
1375                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1376                                 &bd_tx_q->tx_conf);
1377                 if (errval != 0) {
1378                         RTE_BOND_LOG(ERR,
1379                                         "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1380                                         slave_eth_dev->data->port_id, q_id, errval);
1381                         return errval;
1382                 }
1383         }
1384
1385         /* Start device */
1386         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1387         if (errval != 0) {
1388                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1389                                 slave_eth_dev->data->port_id, errval);
1390                 return -1;
1391         }
1392
1393         /* If RSS is enabled for bonding, synchronize RETA */
1394         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1395                 int i;
1396                 struct bond_dev_private *internals;
1397
1398                 internals = bonded_eth_dev->data->dev_private;
1399
1400                 for (i = 0; i < internals->slave_count; i++) {
1401                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1402                                 errval = rte_eth_dev_rss_reta_update(
1403                                                 slave_eth_dev->data->port_id,
1404                                                 &internals->reta_conf[0],
1405                                                 internals->slaves[i].reta_size);
1406                                 if (errval != 0) {
1407                                         RTE_LOG(WARNING, PMD,
1408                                                         "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1409                                                         " RSS Configuration for bonding may be inconsistent.\n",
1410                                                         slave_eth_dev->data->port_id, errval);
1411                                 }
1412                                 break;
1413                         }
1414                 }
1415         }
1416
1417         /* If lsc interrupt is set, check initial slave's link status */
1418         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1419                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1420                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
1421
1422         return 0;
1423 }
1424
1425 void
1426 slave_remove(struct bond_dev_private *internals,
1427                 struct rte_eth_dev *slave_eth_dev)
1428 {
1429         uint8_t i;
1430
1431         for (i = 0; i < internals->slave_count; i++)
1432                 if (internals->slaves[i].port_id ==
1433                                 slave_eth_dev->data->port_id)
1434                         break;
1435
1436         if (i < (internals->slave_count - 1))
1437                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1438                                 sizeof(internals->slaves[0]) *
1439                                 (internals->slave_count - i - 1));
1440
1441         internals->slave_count--;
1442 }
1443
1444 static void
1445 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1446
1447 void
1448 slave_add(struct bond_dev_private *internals,
1449                 struct rte_eth_dev *slave_eth_dev)
1450 {
1451         struct bond_slave_details *slave_details =
1452                         &internals->slaves[internals->slave_count];
1453
1454         slave_details->port_id = slave_eth_dev->data->port_id;
1455         slave_details->last_link_status = 0;
1456
1457         /* If slave device doesn't support interrupts then we need to enabled
1458          * polling to monitor link status */
1459         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1460                 slave_details->link_status_poll_enabled = 1;
1461
1462                 if (!internals->link_status_polling_enabled) {
1463                         internals->link_status_polling_enabled = 1;
1464
1465                         rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1466                                         bond_ethdev_slave_link_status_change_monitor,
1467                                         (void *)&rte_eth_devices[internals->port_id]);
1468                 }
1469         }
1470
1471         slave_details->link_status_wait_to_complete = 0;
1472         /* clean tlb_last_obytes when adding port for bonding device */
1473         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1474                         sizeof(struct ether_addr));
1475 }
1476
1477 void
1478 bond_ethdev_primary_set(struct bond_dev_private *internals,
1479                 uint8_t slave_port_id)
1480 {
1481         int i;
1482
1483         if (internals->active_slave_count < 1)
1484                 internals->current_primary_port = slave_port_id;
1485         else
1486                 /* Search bonded device slave ports for new proposed primary port */
1487                 for (i = 0; i < internals->active_slave_count; i++) {
1488                         if (internals->active_slaves[i] == slave_port_id)
1489                                 internals->current_primary_port = slave_port_id;
1490                 }
1491 }
1492
1493 static void
1494 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1495
1496 static int
1497 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1498 {
1499         struct bond_dev_private *internals;
1500         int i;
1501
1502         /* slave eth dev will be started by bonded device */
1503         if (check_for_bonded_ethdev(eth_dev)) {
1504                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1505                                 eth_dev->data->port_id);
1506                 return -1;
1507         }
1508
1509         eth_dev->data->dev_link.link_status = 0;
1510         eth_dev->data->dev_started = 1;
1511
1512         internals = eth_dev->data->dev_private;
1513
1514         if (internals->slave_count == 0) {
1515                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1516                 return -1;
1517         }
1518
1519         if (internals->user_defined_mac == 0) {
1520                 struct ether_addr *new_mac_addr = NULL;
1521
1522                 for (i = 0; i < internals->slave_count; i++)
1523                         if (internals->slaves[i].port_id == internals->primary_port)
1524                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1525
1526                 if (new_mac_addr == NULL)
1527                         return -1;
1528
1529                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1530                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1531                                         eth_dev->data->port_id);
1532                         return -1;
1533                 }
1534         }
1535
1536         /* Update all slave devices MACs*/
1537         if (mac_address_slaves_update(eth_dev) != 0)
1538                 return -1;
1539
1540         /* If bonded device is configure in promiscuous mode then re-apply config */
1541         if (internals->promiscuous_en)
1542                 bond_ethdev_promiscuous_enable(eth_dev);
1543
1544         /* Reconfigure each slave device if starting bonded device */
1545         for (i = 0; i < internals->slave_count; i++) {
1546                 if (slave_configure(eth_dev,
1547                                 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1548                         RTE_BOND_LOG(ERR,
1549                                         "bonded port (%d) failed to reconfigure slave device (%d)",
1550                                         eth_dev->data->port_id, internals->slaves[i].port_id);
1551                         return -1;
1552                 }
1553         }
1554
1555         if (internals->user_defined_primary_port)
1556                 bond_ethdev_primary_set(internals, internals->primary_port);
1557
1558         if (internals->mode == BONDING_MODE_8023AD)
1559                 bond_mode_8023ad_start(eth_dev);
1560
1561         if (internals->mode == BONDING_MODE_TLB ||
1562                         internals->mode == BONDING_MODE_ALB)
1563                 bond_tlb_enable(internals);
1564
1565         return 0;
1566 }
1567
1568 static void
1569 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1570 {
1571         uint8_t i;
1572
1573         if (dev->data->rx_queues != NULL) {
1574                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1575                         rte_free(dev->data->rx_queues[i]);
1576                         dev->data->rx_queues[i] = NULL;
1577                 }
1578                 dev->data->nb_rx_queues = 0;
1579         }
1580
1581         if (dev->data->tx_queues != NULL) {
1582                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1583                         rte_free(dev->data->tx_queues[i]);
1584                         dev->data->tx_queues[i] = NULL;
1585                 }
1586                 dev->data->nb_tx_queues = 0;
1587         }
1588 }
1589
1590 void
1591 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1592 {
1593         struct bond_dev_private *internals = eth_dev->data->dev_private;
1594         uint8_t i;
1595
1596         if (internals->mode == BONDING_MODE_8023AD) {
1597                 struct port *port;
1598                 void *pkt = NULL;
1599
1600                 bond_mode_8023ad_stop(eth_dev);
1601
1602                 /* Discard all messages to/from mode 4 state machines */
1603                 for (i = 0; i < internals->active_slave_count; i++) {
1604                         port = &mode_8023ad_ports[internals->active_slaves[i]];
1605
1606                         RTE_VERIFY(port->rx_ring != NULL);
1607                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1608                                 rte_pktmbuf_free(pkt);
1609
1610                         RTE_VERIFY(port->tx_ring != NULL);
1611                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1612                                 rte_pktmbuf_free(pkt);
1613                 }
1614         }
1615
1616         if (internals->mode == BONDING_MODE_TLB ||
1617                         internals->mode == BONDING_MODE_ALB) {
1618                 bond_tlb_disable(internals);
1619                 for (i = 0; i < internals->active_slave_count; i++)
1620                         tlb_last_obytets[internals->active_slaves[i]] = 0;
1621         }
1622
1623         internals->active_slave_count = 0;
1624         internals->link_status_polling_enabled = 0;
1625
1626         eth_dev->data->dev_link.link_status = 0;
1627         eth_dev->data->dev_started = 0;
1628 }
1629
1630 void
1631 bond_ethdev_close(struct rte_eth_dev *dev)
1632 {
1633         bond_ethdev_free_queues(dev);
1634 }
1635
1636 /* forward declaration */
1637 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1638
1639 static void
1640 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1641 {
1642         struct bond_dev_private *internals = dev->data->dev_private;
1643
1644         dev_info->max_mac_addrs = 1;
1645
1646         dev_info->max_rx_pktlen = (uint32_t)2048;
1647
1648         dev_info->max_rx_queues = (uint16_t)128;
1649         dev_info->max_tx_queues = (uint16_t)512;
1650
1651         dev_info->min_rx_bufsize = 0;
1652         dev_info->pci_dev = NULL;
1653
1654         dev_info->rx_offload_capa = internals->rx_offload_capa;
1655         dev_info->tx_offload_capa = internals->tx_offload_capa;
1656         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1657
1658         dev_info->reta_size = internals->reta_size;
1659 }
1660
1661 static int
1662 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1663                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1664                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1665 {
1666         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1667                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1668                                         0, dev->data->numa_node);
1669         if (bd_rx_q == NULL)
1670                 return -1;
1671
1672         bd_rx_q->queue_id = rx_queue_id;
1673         bd_rx_q->dev_private = dev->data->dev_private;
1674
1675         bd_rx_q->nb_rx_desc = nb_rx_desc;
1676
1677         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1678         bd_rx_q->mb_pool = mb_pool;
1679
1680         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1681
1682         return 0;
1683 }
1684
1685 static int
1686 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1687                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1688                 const struct rte_eth_txconf *tx_conf)
1689 {
1690         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
1691                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1692                                         0, dev->data->numa_node);
1693
1694         if (bd_tx_q == NULL)
1695                 return -1;
1696
1697         bd_tx_q->queue_id = tx_queue_id;
1698         bd_tx_q->dev_private = dev->data->dev_private;
1699
1700         bd_tx_q->nb_tx_desc = nb_tx_desc;
1701         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1702
1703         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1704
1705         return 0;
1706 }
1707
1708 static void
1709 bond_ethdev_rx_queue_release(void *queue)
1710 {
1711         if (queue == NULL)
1712                 return;
1713
1714         rte_free(queue);
1715 }
1716
1717 static void
1718 bond_ethdev_tx_queue_release(void *queue)
1719 {
1720         if (queue == NULL)
1721                 return;
1722
1723         rte_free(queue);
1724 }
1725
1726 static void
1727 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1728 {
1729         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1730         struct bond_dev_private *internals;
1731
1732         /* Default value for polling slave found is true as we don't want to
1733          * disable the polling thread if we cannot get the lock */
1734         int i, polling_slave_found = 1;
1735
1736         if (cb_arg == NULL)
1737                 return;
1738
1739         bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1740         internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1741
1742         if (!bonded_ethdev->data->dev_started ||
1743                 !internals->link_status_polling_enabled)
1744                 return;
1745
1746         /* If device is currently being configured then don't check slaves link
1747          * status, wait until next period */
1748         if (rte_spinlock_trylock(&internals->lock)) {
1749                 if (internals->slave_count > 0)
1750                         polling_slave_found = 0;
1751
1752                 for (i = 0; i < internals->slave_count; i++) {
1753                         if (!internals->slaves[i].link_status_poll_enabled)
1754                                 continue;
1755
1756                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1757                         polling_slave_found = 1;
1758
1759                         /* Update slave link status */
1760                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1761                                         internals->slaves[i].link_status_wait_to_complete);
1762
1763                         /* if link status has changed since last checked then call lsc
1764                          * event callback */
1765                         if (slave_ethdev->data->dev_link.link_status !=
1766                                         internals->slaves[i].last_link_status) {
1767                                 internals->slaves[i].last_link_status =
1768                                                 slave_ethdev->data->dev_link.link_status;
1769
1770                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1771                                                 RTE_ETH_EVENT_INTR_LSC,
1772                                                 &bonded_ethdev->data->port_id);
1773                         }
1774                 }
1775                 rte_spinlock_unlock(&internals->lock);
1776         }
1777
1778         if (polling_slave_found)
1779                 /* Set alarm to continue monitoring link status of slave ethdev's */
1780                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1781                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1782 }
1783
1784 static int
1785 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1786                 int wait_to_complete)
1787 {
1788         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1789
1790         if (!bonded_eth_dev->data->dev_started ||
1791                 internals->active_slave_count == 0) {
1792                 bonded_eth_dev->data->dev_link.link_status = 0;
1793                 return 0;
1794         } else {
1795                 struct rte_eth_dev *slave_eth_dev;
1796                 int i, link_up = 0;
1797
1798                 for (i = 0; i < internals->active_slave_count; i++) {
1799                         slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1800
1801                         (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1802                                         wait_to_complete);
1803                         if (slave_eth_dev->data->dev_link.link_status == 1) {
1804                                 link_up = 1;
1805                                 break;
1806                         }
1807                 }
1808
1809                 bonded_eth_dev->data->dev_link.link_status = link_up;
1810         }
1811
1812         return 0;
1813 }
1814
1815 static void
1816 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1817 {
1818         struct bond_dev_private *internals = dev->data->dev_private;
1819         struct rte_eth_stats slave_stats;
1820         int i, j;
1821
1822         for (i = 0; i < internals->slave_count; i++) {
1823                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1824
1825                 stats->ipackets += slave_stats.ipackets;
1826                 stats->opackets += slave_stats.opackets;
1827                 stats->ibytes += slave_stats.ibytes;
1828                 stats->obytes += slave_stats.obytes;
1829                 stats->imissed += slave_stats.imissed;
1830                 stats->ierrors += slave_stats.ierrors;
1831                 stats->oerrors += slave_stats.oerrors;
1832                 stats->imcasts += slave_stats.imcasts;
1833                 stats->rx_nombuf += slave_stats.rx_nombuf;
1834
1835                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1836                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1837                         stats->q_opackets[j] += slave_stats.q_opackets[j];
1838                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1839                         stats->q_obytes[j] += slave_stats.q_obytes[j];
1840                         stats->q_errors[j] += slave_stats.q_errors[j];
1841                 }
1842
1843         }
1844 }
1845
1846 static void
1847 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1848 {
1849         struct bond_dev_private *internals = dev->data->dev_private;
1850         int i;
1851
1852         for (i = 0; i < internals->slave_count; i++)
1853                 rte_eth_stats_reset(internals->slaves[i].port_id);
1854 }
1855
1856 static void
1857 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1858 {
1859         struct bond_dev_private *internals = eth_dev->data->dev_private;
1860         int i;
1861
1862         internals->promiscuous_en = 1;
1863
1864         switch (internals->mode) {
1865         /* Promiscuous mode is propagated to all slaves */
1866         case BONDING_MODE_ROUND_ROBIN:
1867         case BONDING_MODE_BALANCE:
1868         case BONDING_MODE_BROADCAST:
1869                 for (i = 0; i < internals->slave_count; i++)
1870                         rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1871                 break;
1872         /* In mode4 promiscus mode is managed when slave is added/removed */
1873         case BONDING_MODE_8023AD:
1874                 break;
1875         /* Promiscuous mode is propagated only to primary slave */
1876         case BONDING_MODE_ACTIVE_BACKUP:
1877         case BONDING_MODE_TLB:
1878         case BONDING_MODE_ALB:
1879         default:
1880                 rte_eth_promiscuous_enable(internals->current_primary_port);
1881         }
1882 }
1883
1884 static void
1885 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1886 {
1887         struct bond_dev_private *internals = dev->data->dev_private;
1888         int i;
1889
1890         internals->promiscuous_en = 0;
1891
1892         switch (internals->mode) {
1893         /* Promiscuous mode is propagated to all slaves */
1894         case BONDING_MODE_ROUND_ROBIN:
1895         case BONDING_MODE_BALANCE:
1896         case BONDING_MODE_BROADCAST:
1897                 for (i = 0; i < internals->slave_count; i++)
1898                         rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1899                 break;
1900         /* In mode4 promiscus mode is set managed when slave is added/removed */
1901         case BONDING_MODE_8023AD:
1902                 break;
1903         /* Promiscuous mode is propagated only to primary slave */
1904         case BONDING_MODE_ACTIVE_BACKUP:
1905         case BONDING_MODE_TLB:
1906         case BONDING_MODE_ALB:
1907         default:
1908                 rte_eth_promiscuous_disable(internals->current_primary_port);
1909         }
1910 }
1911
1912 static void
1913 bond_ethdev_delayed_lsc_propagation(void *arg)
1914 {
1915         if (arg == NULL)
1916                 return;
1917
1918         _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1919                         RTE_ETH_EVENT_INTR_LSC);
1920 }
1921
1922 void
1923 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1924                 void *param)
1925 {
1926         struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1927         struct bond_dev_private *internals;
1928         struct rte_eth_link link;
1929
1930         int i, valid_slave = 0;
1931         uint8_t active_pos;
1932         uint8_t lsc_flag = 0;
1933
1934         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1935                 return;
1936
1937         bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1938         slave_eth_dev = &rte_eth_devices[port_id];
1939
1940         if (check_for_bonded_ethdev(bonded_eth_dev))
1941                 return;
1942
1943         internals = bonded_eth_dev->data->dev_private;
1944
1945         /* If the device isn't started don't handle interrupts */
1946         if (!bonded_eth_dev->data->dev_started)
1947                 return;
1948
1949         /* verify that port_id is a valid slave of bonded port */
1950         for (i = 0; i < internals->slave_count; i++) {
1951                 if (internals->slaves[i].port_id == port_id) {
1952                         valid_slave = 1;
1953                         break;
1954                 }
1955         }
1956
1957         if (!valid_slave)
1958                 return;
1959
1960         /* Search for port in active port list */
1961         active_pos = find_slave_by_id(internals->active_slaves,
1962                         internals->active_slave_count, port_id);
1963
1964         rte_eth_link_get_nowait(port_id, &link);
1965         if (link.link_status) {
1966                 if (active_pos < internals->active_slave_count)
1967                         return;
1968
1969                 /* if no active slave ports then set this port to be primary port */
1970                 if (internals->active_slave_count < 1) {
1971                         /* If first active slave, then change link status */
1972                         bonded_eth_dev->data->dev_link.link_status = 1;
1973                         internals->current_primary_port = port_id;
1974                         lsc_flag = 1;
1975
1976                         mac_address_slaves_update(bonded_eth_dev);
1977
1978                         /* Inherit eth dev link properties from first active slave */
1979                         link_properties_set(bonded_eth_dev,
1980                                         &(slave_eth_dev->data->dev_link));
1981                 }
1982
1983                 activate_slave(bonded_eth_dev, port_id);
1984
1985                 /* If user has defined the primary port then default to using it */
1986                 if (internals->user_defined_primary_port &&
1987                                 internals->primary_port == port_id)
1988                         bond_ethdev_primary_set(internals, port_id);
1989         } else {
1990                 if (active_pos == internals->active_slave_count)
1991                         return;
1992
1993                 /* Remove from active slave list */
1994                 deactivate_slave(bonded_eth_dev, port_id);
1995
1996                 /* No active slaves, change link status to down and reset other
1997                  * link properties */
1998                 if (internals->active_slave_count < 1) {
1999                         lsc_flag = 1;
2000                         bonded_eth_dev->data->dev_link.link_status = 0;
2001
2002                         link_properties_reset(bonded_eth_dev);
2003                 }
2004
2005                 /* Update primary id, take first active slave from list or if none
2006                  * available set to -1 */
2007                 if (port_id == internals->current_primary_port) {
2008                         if (internals->active_slave_count > 0)
2009                                 bond_ethdev_primary_set(internals,
2010                                                 internals->active_slaves[0]);
2011                         else
2012                                 internals->current_primary_port = internals->primary_port;
2013                 }
2014         }
2015
2016         if (lsc_flag) {
2017                 /* Cancel any possible outstanding interrupts if delays are enabled */
2018                 if (internals->link_up_delay_ms > 0 ||
2019                         internals->link_down_delay_ms > 0)
2020                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2021                                         bonded_eth_dev);
2022
2023                 if (bonded_eth_dev->data->dev_link.link_status) {
2024                         if (internals->link_up_delay_ms > 0)
2025                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2026                                                 bond_ethdev_delayed_lsc_propagation,
2027                                                 (void *)bonded_eth_dev);
2028                         else
2029                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2030                                                 RTE_ETH_EVENT_INTR_LSC);
2031
2032                 } else {
2033                         if (internals->link_down_delay_ms > 0)
2034                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2035                                                 bond_ethdev_delayed_lsc_propagation,
2036                                                 (void *)bonded_eth_dev);
2037                         else
2038                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2039                                                 RTE_ETH_EVENT_INTR_LSC);
2040                 }
2041         }
2042 }
2043
2044 static int
2045 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2046                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2047 {
2048         unsigned i, j;
2049         int result = 0;
2050         int slave_reta_size;
2051         unsigned reta_count;
2052         struct bond_dev_private *internals = dev->data->dev_private;
2053
2054         if (reta_size != internals->reta_size)
2055                 return -EINVAL;
2056
2057          /* Copy RETA table */
2058         reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2059
2060         for (i = 0; i < reta_count; i++) {
2061                 internals->reta_conf[i].mask = reta_conf[i].mask;
2062                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2063                         if ((reta_conf[i].mask >> j) & 0x01)
2064                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2065         }
2066
2067         /* Fill rest of array */
2068         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2069                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2070                                 sizeof(internals->reta_conf[0]) * reta_count);
2071
2072         /* Propagate RETA over slaves */
2073         for (i = 0; i < internals->slave_count; i++) {
2074                 slave_reta_size = internals->slaves[i].reta_size;
2075                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2076                                 &internals->reta_conf[0], slave_reta_size);
2077                 if (result < 0)
2078                         return result;
2079         }
2080
2081         return 0;
2082 }
2083
2084 static int
2085 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2086                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2087 {
2088         int i, j;
2089         struct bond_dev_private *internals = dev->data->dev_private;
2090
2091         if (reta_size != internals->reta_size)
2092                 return -EINVAL;
2093
2094          /* Copy RETA table */
2095         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2096                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2097                         if ((reta_conf[i].mask >> j) & 0x01)
2098                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2099
2100         return 0;
2101 }
2102
2103 static int
2104 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2105                 struct rte_eth_rss_conf *rss_conf)
2106 {
2107         int i, result = 0;
2108         struct bond_dev_private *internals = dev->data->dev_private;
2109         struct rte_eth_rss_conf bond_rss_conf;
2110
2111         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2112
2113         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2114
2115         if (bond_rss_conf.rss_hf != 0)
2116                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2117
2118         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2119                         sizeof(internals->rss_key)) {
2120                 if (bond_rss_conf.rss_key_len == 0)
2121                         bond_rss_conf.rss_key_len = 40;
2122                 internals->rss_key_len = bond_rss_conf.rss_key_len;
2123                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2124                                 internals->rss_key_len);
2125         }
2126
2127         for (i = 0; i < internals->slave_count; i++) {
2128                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2129                                 &bond_rss_conf);
2130                 if (result < 0)
2131                         return result;
2132         }
2133
2134         return 0;
2135 }
2136
2137 static int
2138 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2139                 struct rte_eth_rss_conf *rss_conf)
2140 {
2141         struct bond_dev_private *internals = dev->data->dev_private;
2142
2143         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2144         rss_conf->rss_key_len = internals->rss_key_len;
2145         if (rss_conf->rss_key)
2146                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2147
2148         return 0;
2149 }
2150
2151 const struct eth_dev_ops default_dev_ops = {
2152         .dev_start            = bond_ethdev_start,
2153         .dev_stop             = bond_ethdev_stop,
2154         .dev_close            = bond_ethdev_close,
2155         .dev_configure        = bond_ethdev_configure,
2156         .dev_infos_get        = bond_ethdev_info,
2157         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
2158         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
2159         .rx_queue_release     = bond_ethdev_rx_queue_release,
2160         .tx_queue_release     = bond_ethdev_tx_queue_release,
2161         .link_update          = bond_ethdev_link_update,
2162         .stats_get            = bond_ethdev_stats_get,
2163         .stats_reset          = bond_ethdev_stats_reset,
2164         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
2165         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
2166         .reta_update          = bond_ethdev_rss_reta_update,
2167         .reta_query           = bond_ethdev_rss_reta_query,
2168         .rss_hash_update      = bond_ethdev_rss_hash_update,
2169         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get
2170 };
2171
2172 static int
2173 bond_init(const char *name, const char *params)
2174 {
2175         struct bond_dev_private *internals;
2176         struct rte_kvargs *kvlist;
2177         uint8_t bonding_mode, socket_id;
2178         int  arg_count, port_id;
2179
2180         RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2181
2182         kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
2183         if (kvlist == NULL)
2184                 return -1;
2185
2186         /* Parse link bonding mode */
2187         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2188                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2189                                 &bond_ethdev_parse_slave_mode_kvarg,
2190                                 &bonding_mode) != 0) {
2191                         RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2192                                         name);
2193                         goto parse_error;
2194                 }
2195         } else {
2196                 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2197                                 "device %s\n", name);
2198                 goto parse_error;
2199         }
2200
2201         /* Parse socket id to create bonding device on */
2202         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2203         if (arg_count == 1) {
2204                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2205                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2206                                 != 0) {
2207                         RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2208                                         "bonded device %s\n", name);
2209                         goto parse_error;
2210                 }
2211         } else if (arg_count > 1) {
2212                 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2213                                 "bonded device %s\n", name);
2214                 goto parse_error;
2215         } else {
2216                 socket_id = rte_socket_id();
2217         }
2218
2219         /* Create link bonding eth device */
2220         port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2221         if (port_id < 0) {
2222                 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2223                                 "socket %u.\n", name, bonding_mode, socket_id);
2224                 goto parse_error;
2225         }
2226         internals = rte_eth_devices[port_id].data->dev_private;
2227         internals->kvlist = kvlist;
2228
2229         RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2230                         "socket %u.\n", name, port_id, bonding_mode, socket_id);
2231         return 0;
2232
2233 parse_error:
2234         rte_kvargs_free(kvlist);
2235
2236         return -1;
2237 }
2238
2239 static int
2240 bond_uninit(const char *name)
2241 {
2242         int  ret;
2243
2244         if (name == NULL)
2245                 return -EINVAL;
2246
2247         RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2248
2249         /* free link bonding eth device */
2250         ret = rte_eth_bond_free(name);
2251         if (ret < 0)
2252                 RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
2253
2254         return ret;
2255 }
2256
2257 /* this part will resolve the slave portids after all the other pdev and vdev
2258  * have been allocated */
2259 static int
2260 bond_ethdev_configure(struct rte_eth_dev *dev)
2261 {
2262         char *name = dev->data->name;
2263         struct bond_dev_private *internals = dev->data->dev_private;
2264         struct rte_kvargs *kvlist = internals->kvlist;
2265         int arg_count;
2266         uint8_t port_id = dev - rte_eth_devices;
2267
2268         static const uint8_t default_rss_key[40] = {
2269                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2270                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2271                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2272                 0xBE, 0xAC, 0x01, 0xFA
2273         };
2274
2275         unsigned i, j;
2276
2277         /* If RSS is enabled, fill table and key with default values */
2278         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2279                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2280                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2281                 memcpy(internals->rss_key, default_rss_key, 40);
2282
2283                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2284                         internals->reta_conf[i].mask = ~0LL;
2285                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2286                                 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2287                 }
2288         }
2289
2290         /*
2291          * if no kvlist, it means that this bonded device has been created
2292          * through the bonding api.
2293          */
2294         if (!kvlist)
2295                 return 0;
2296
2297         /* Parse MAC address for bonded device */
2298         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2299         if (arg_count == 1) {
2300                 struct ether_addr bond_mac;
2301
2302                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2303                                 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2304                         RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2305                                         name);
2306                         return -1;
2307                 }
2308
2309                 /* Set MAC address */
2310                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2311                         RTE_LOG(ERR, EAL,
2312                                         "Failed to set mac address on bonded device %s\n",
2313                                         name);
2314                         return -1;
2315                 }
2316         } else if (arg_count > 1) {
2317                 RTE_LOG(ERR, EAL,
2318                                 "MAC address can be specified only once for bonded device %s\n",
2319                                 name);
2320                 return -1;
2321         }
2322
2323         /* Parse/set balance mode transmit policy */
2324         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2325         if (arg_count == 1) {
2326                 uint8_t xmit_policy;
2327
2328                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2329                                 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2330                                                 0) {
2331                         RTE_LOG(INFO, EAL,
2332                                         "Invalid xmit policy specified for bonded device %s\n",
2333                                         name);
2334                         return -1;
2335                 }
2336
2337                 /* Set balance mode transmit policy*/
2338                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2339                         RTE_LOG(ERR, EAL,
2340                                         "Failed to set balance xmit policy on bonded device %s\n",
2341                                         name);
2342                         return -1;
2343                 }
2344         } else if (arg_count > 1) {
2345                 RTE_LOG(ERR, EAL,
2346                                 "Transmit policy can be specified only once for bonded device"
2347                                 " %s\n", name);
2348                 return -1;
2349         }
2350
2351         /* Parse/add slave ports to bonded device */
2352         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2353                 struct bond_ethdev_slave_ports slave_ports;
2354                 unsigned i;
2355
2356                 memset(&slave_ports, 0, sizeof(slave_ports));
2357
2358                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2359                                 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2360                         RTE_LOG(ERR, EAL,
2361                                         "Failed to parse slave ports for bonded device %s\n",
2362                                         name);
2363                         return -1;
2364                 }
2365
2366                 for (i = 0; i < slave_ports.slave_count; i++) {
2367                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2368                                 RTE_LOG(ERR, EAL,
2369                                                 "Failed to add port %d as slave to bonded device %s\n",
2370                                                 slave_ports.slaves[i], name);
2371                         }
2372                 }
2373
2374         } else {
2375                 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2376                 return -1;
2377         }
2378
2379         /* Parse/set primary slave port id*/
2380         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2381         if (arg_count == 1) {
2382                 uint8_t primary_slave_port_id;
2383
2384                 if (rte_kvargs_process(kvlist,
2385                                 PMD_BOND_PRIMARY_SLAVE_KVARG,
2386                                 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2387                                 &primary_slave_port_id) < 0) {
2388                         RTE_LOG(INFO, EAL,
2389                                         "Invalid primary slave port id specified for bonded device"
2390                                         " %s\n", name);
2391                         return -1;
2392                 }
2393
2394                 /* Set balance mode transmit policy*/
2395                 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2396                                 != 0) {
2397                         RTE_LOG(ERR, EAL,
2398                                         "Failed to set primary slave port %d on bonded device %s\n",
2399                                         primary_slave_port_id, name);
2400                         return -1;
2401                 }
2402         } else if (arg_count > 1) {
2403                 RTE_LOG(INFO, EAL,
2404                                 "Primary slave can be specified only once for bonded device"
2405                                 " %s\n", name);
2406                 return -1;
2407         }
2408
2409         /* Parse link status monitor polling interval */
2410         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2411         if (arg_count == 1) {
2412                 uint32_t lsc_poll_interval_ms;
2413
2414                 if (rte_kvargs_process(kvlist,
2415                                 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2416                                 &bond_ethdev_parse_time_ms_kvarg,
2417                                 &lsc_poll_interval_ms) < 0) {
2418                         RTE_LOG(INFO, EAL,
2419                                         "Invalid lsc polling interval value specified for bonded"
2420                                         " device %s\n", name);
2421                         return -1;
2422                 }
2423
2424                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2425                                 != 0) {
2426                         RTE_LOG(ERR, EAL,
2427                                         "Failed to set lsc monitor polling interval (%u ms) on"
2428                                         " bonded device %s\n", lsc_poll_interval_ms, name);
2429                         return -1;
2430                 }
2431         } else if (arg_count > 1) {
2432                 RTE_LOG(INFO, EAL,
2433                                 "LSC polling interval can be specified only once for bonded"
2434                                 " device %s\n", name);
2435                 return -1;
2436         }
2437
2438         /* Parse link up interrupt propagation delay */
2439         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2440         if (arg_count == 1) {
2441                 uint32_t link_up_delay_ms;
2442
2443                 if (rte_kvargs_process(kvlist,
2444                                 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2445                                 &bond_ethdev_parse_time_ms_kvarg,
2446                                 &link_up_delay_ms) < 0) {
2447                         RTE_LOG(INFO, EAL,
2448                                         "Invalid link up propagation delay value specified for"
2449                                         " bonded device %s\n", name);
2450                         return -1;
2451                 }
2452
2453                 /* Set balance mode transmit policy*/
2454                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2455                                 != 0) {
2456                         RTE_LOG(ERR, EAL,
2457                                         "Failed to set link up propagation delay (%u ms) on bonded"
2458                                         " device %s\n", link_up_delay_ms, name);
2459                         return -1;
2460                 }
2461         } else if (arg_count > 1) {
2462                 RTE_LOG(INFO, EAL,
2463                                 "Link up propagation delay can be specified only once for"
2464                                 " bonded device %s\n", name);
2465                 return -1;
2466         }
2467
2468         /* Parse link down interrupt propagation delay */
2469         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2470         if (arg_count == 1) {
2471                 uint32_t link_down_delay_ms;
2472
2473                 if (rte_kvargs_process(kvlist,
2474                                 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2475                                 &bond_ethdev_parse_time_ms_kvarg,
2476                                 &link_down_delay_ms) < 0) {
2477                         RTE_LOG(INFO, EAL,
2478                                         "Invalid link down propagation delay value specified for"
2479                                         " bonded device %s\n", name);
2480                         return -1;
2481                 }
2482
2483                 /* Set balance mode transmit policy*/
2484                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2485                                 != 0) {
2486                         RTE_LOG(ERR, EAL,
2487                                         "Failed to set link down propagation delay (%u ms) on"
2488                                         " bonded device %s\n", link_down_delay_ms, name);
2489                         return -1;
2490                 }
2491         } else if (arg_count > 1) {
2492                 RTE_LOG(INFO, EAL,
2493                                 "Link down propagation delay can be specified only once for"
2494                                 " bonded device %s\n", name);
2495                 return -1;
2496         }
2497
2498         return 0;
2499 }
2500
2501 static struct rte_driver bond_drv = {
2502         .name = "eth_bond",
2503         .type = PMD_VDEV,
2504         .init = bond_init,
2505         .uninit = bond_uninit,
2506 };
2507
2508 PMD_REGISTER_DRIVER(bond_drv);