net/bonding: fix number of bonding Tx/Rx queues
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <stdlib.h>
34 #include <netinet/in.h>
35
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_tcp.h>
41 #include <rte_udp.h>
42 #include <rte_ip.h>
43 #include <rte_ip_frag.h>
44 #include <rte_devargs.h>
45 #include <rte_kvargs.h>
46 #include <rte_vdev.h>
47 #include <rte_alarm.h>
48 #include <rte_cycles.h>
49
50 #include "rte_eth_bond.h"
51 #include "rte_eth_bond_private.h"
52 #include "rte_eth_bond_8023ad_private.h"
53
54 #define REORDER_PERIOD_MS 10
55 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
56
57 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
58
59 /* Table for statistics in mode 5 TLB */
60 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
61
62 static inline size_t
63 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
64 {
65         size_t vlan_offset = 0;
66
67         if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
68                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
69
70                 vlan_offset = sizeof(struct vlan_hdr);
71                 *proto = vlan_hdr->eth_proto;
72
73                 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
74                         vlan_hdr = vlan_hdr + 1;
75                         *proto = vlan_hdr->eth_proto;
76                         vlan_offset += sizeof(struct vlan_hdr);
77                 }
78         }
79         return vlan_offset;
80 }
81
82 static uint16_t
83 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
84 {
85         struct bond_dev_private *internals;
86
87         uint16_t num_rx_slave = 0;
88         uint16_t num_rx_total = 0;
89
90         int i;
91
92         /* Cast to structure, containing bonded device's port id and queue id */
93         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
94
95         internals = bd_rx_q->dev_private;
96
97
98         for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
99                 /* Offset of pointer to *bufs increases as packets are received
100                  * from other slaves */
101                 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
102                                 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
103                 if (num_rx_slave) {
104                         num_rx_total += num_rx_slave;
105                         nb_pkts -= num_rx_slave;
106                 }
107         }
108
109         return num_rx_total;
110 }
111
112 static uint16_t
113 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
114                 uint16_t nb_pkts)
115 {
116         struct bond_dev_private *internals;
117
118         /* Cast to structure, containing bonded device's port id and queue id */
119         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
120
121         internals = bd_rx_q->dev_private;
122
123         return rte_eth_rx_burst(internals->current_primary_port,
124                         bd_rx_q->queue_id, bufs, nb_pkts);
125 }
126
127 static inline uint8_t
128 is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
129 {
130         const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
131
132         return !vlan_tci && (ethertype == ether_type_slow_be &&
133                 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
134 }
135
136 static uint16_t
137 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
138                 uint16_t nb_pkts)
139 {
140         /* Cast to structure, containing bonded device's port id and queue id */
141         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
142         struct bond_dev_private *internals = bd_rx_q->dev_private;
143         struct ether_addr bond_mac;
144
145         struct ether_hdr *hdr;
146
147         const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
148         uint16_t num_rx_total = 0;      /* Total number of received packets */
149         uint8_t slaves[RTE_MAX_ETHPORTS];
150         uint8_t slave_count, idx;
151
152         uint8_t collecting;  /* current slave collecting status */
153         const uint8_t promisc = internals->promiscuous_en;
154         uint8_t i, j, k;
155         uint8_t subtype;
156
157         rte_eth_macaddr_get(internals->port_id, &bond_mac);
158         /* Copy slave list to protect against slave up/down changes during tx
159          * bursting */
160         slave_count = internals->active_slave_count;
161         memcpy(slaves, internals->active_slaves,
162                         sizeof(internals->active_slaves[0]) * slave_count);
163
164         idx = internals->active_slave;
165         if (idx >= slave_count) {
166                 internals->active_slave = 0;
167                 idx = 0;
168         }
169         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
170                 j = num_rx_total;
171                 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
172                                          COLLECTING);
173
174                 /* Read packets from this slave */
175                 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
176                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
177
178                 for (k = j; k < 2 && k < num_rx_total; k++)
179                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
180
181                 /* Handle slow protocol packets. */
182                 while (j < num_rx_total) {
183                         if (j + 3 < num_rx_total)
184                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
185
186                         hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
187                         subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
188
189                         /* Remove packet from array if it is slow packet or slave is not
190                          * in collecting state or bondign interface is not in promiscus
191                          * mode and packet address does not match. */
192                         if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
193                                 !collecting || (!promisc &&
194                                         !is_multicast_ether_addr(&hdr->d_addr) &&
195                                         !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
196
197                                 if (hdr->ether_type == ether_type_slow_be) {
198                                         bond_mode_8023ad_handle_slow_pkt(
199                                             internals, slaves[idx], bufs[j]);
200                                 } else
201                                         rte_pktmbuf_free(bufs[j]);
202
203                                 /* Packet is managed by mode 4 or dropped, shift the array */
204                                 num_rx_total--;
205                                 if (j < num_rx_total) {
206                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
207                                                 (num_rx_total - j));
208                                 }
209                         } else
210                                 j++;
211                 }
212                 if (unlikely(++idx == slave_count))
213                         idx = 0;
214         }
215
216         internals->active_slave = idx;
217         return num_rx_total;
218 }
219
220 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
221 uint32_t burstnumberRX;
222 uint32_t burstnumberTX;
223
224 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
225
226 static void
227 arp_op_name(uint16_t arp_op, char *buf)
228 {
229         switch (arp_op) {
230         case ARP_OP_REQUEST:
231                 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
232                 return;
233         case ARP_OP_REPLY:
234                 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
235                 return;
236         case ARP_OP_REVREQUEST:
237                 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
238                                 "Reverse ARP Request");
239                 return;
240         case ARP_OP_REVREPLY:
241                 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
242                                 "Reverse ARP Reply");
243                 return;
244         case ARP_OP_INVREQUEST:
245                 snprintf(buf, sizeof("Peer Identify Request"), "%s",
246                                 "Peer Identify Request");
247                 return;
248         case ARP_OP_INVREPLY:
249                 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
250                                 "Peer Identify Reply");
251                 return;
252         default:
253                 break;
254         }
255         snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
256         return;
257 }
258 #endif
259 #define MaxIPv4String   16
260 static void
261 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
262 {
263         uint32_t ipv4_addr;
264
265         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
266         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
267                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
268                 ipv4_addr & 0xFF);
269 }
270
271 #define MAX_CLIENTS_NUMBER      128
272 uint8_t active_clients;
273 struct client_stats_t {
274         uint8_t port;
275         uint32_t ipv4_addr;
276         uint32_t ipv4_rx_packets;
277         uint32_t ipv4_tx_packets;
278 };
279 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
280
281 static void
282 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
283 {
284         int i = 0;
285
286         for (; i < MAX_CLIENTS_NUMBER; i++)     {
287                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
288                         /* Just update RX packets number for this client */
289                         if (TXorRXindicator == &burstnumberRX)
290                                 client_stats[i].ipv4_rx_packets++;
291                         else
292                                 client_stats[i].ipv4_tx_packets++;
293                         return;
294                 }
295         }
296         /* We have a new client. Insert him to the table, and increment stats */
297         if (TXorRXindicator == &burstnumberRX)
298                 client_stats[active_clients].ipv4_rx_packets++;
299         else
300                 client_stats[active_clients].ipv4_tx_packets++;
301         client_stats[active_clients].ipv4_addr = addr;
302         client_stats[active_clients].port = port;
303         active_clients++;
304
305 }
306
307 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
308 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber)     \
309                 RTE_LOG(DEBUG, PMD, \
310                 "%s " \
311                 "port:%d " \
312                 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
313                 "SrcIP:%s " \
314                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
315                 "DstIP:%s " \
316                 "%s " \
317                 "%d\n", \
318                 info, \
319                 port, \
320                 eth_h->s_addr.addr_bytes[0], \
321                 eth_h->s_addr.addr_bytes[1], \
322                 eth_h->s_addr.addr_bytes[2], \
323                 eth_h->s_addr.addr_bytes[3], \
324                 eth_h->s_addr.addr_bytes[4], \
325                 eth_h->s_addr.addr_bytes[5], \
326                 src_ip, \
327                 eth_h->d_addr.addr_bytes[0], \
328                 eth_h->d_addr.addr_bytes[1], \
329                 eth_h->d_addr.addr_bytes[2], \
330                 eth_h->d_addr.addr_bytes[3], \
331                 eth_h->d_addr.addr_bytes[4], \
332                 eth_h->d_addr.addr_bytes[5], \
333                 dst_ip, \
334                 arp_op, \
335                 ++burstnumber)
336 #endif
337
338 static void
339 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
340                 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
341 {
342         struct ipv4_hdr *ipv4_h;
343 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
344         struct arp_hdr *arp_h;
345         char dst_ip[16];
346         char ArpOp[24];
347         char buf[16];
348 #endif
349         char src_ip[16];
350
351         uint16_t ether_type = eth_h->ether_type;
352         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
353
354 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
355         snprintf(buf, 16, "%s", info);
356 #endif
357
358         if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
359                 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
360                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
361 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
362                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
363                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
364 #endif
365                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
366         }
367 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
368         else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
369                 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
370                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
371                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
372                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
373                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
374         }
375 #endif
376 }
377 #endif
378
379 static uint16_t
380 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
381 {
382         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
383         struct bond_dev_private *internals = bd_tx_q->dev_private;
384         struct ether_hdr *eth_h;
385         uint16_t ether_type, offset;
386         uint16_t nb_recv_pkts;
387         int i;
388
389         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
390
391         for (i = 0; i < nb_recv_pkts; i++) {
392                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
393                 ether_type = eth_h->ether_type;
394                 offset = get_vlan_offset(eth_h, &ether_type);
395
396                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
397 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
398                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
399 #endif
400                         bond_mode_alb_arp_recv(eth_h, offset, internals);
401                 }
402 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
403                 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
404                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
405 #endif
406         }
407
408         return nb_recv_pkts;
409 }
410
411 static uint16_t
412 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
413                 uint16_t nb_pkts)
414 {
415         struct bond_dev_private *internals;
416         struct bond_tx_queue *bd_tx_q;
417
418         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
419         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
420
421         uint8_t num_of_slaves;
422         uint8_t slaves[RTE_MAX_ETHPORTS];
423
424         uint16_t num_tx_total = 0, num_tx_slave;
425
426         static int slave_idx = 0;
427         int i, cslave_idx = 0, tx_fail_total = 0;
428
429         bd_tx_q = (struct bond_tx_queue *)queue;
430         internals = bd_tx_q->dev_private;
431
432         /* Copy slave list to protect against slave up/down changes during tx
433          * bursting */
434         num_of_slaves = internals->active_slave_count;
435         memcpy(slaves, internals->active_slaves,
436                         sizeof(internals->active_slaves[0]) * num_of_slaves);
437
438         if (num_of_slaves < 1)
439                 return num_tx_total;
440
441         /* Populate slaves mbuf with which packets are to be sent on it  */
442         for (i = 0; i < nb_pkts; i++) {
443                 cslave_idx = (slave_idx + i) % num_of_slaves;
444                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
445         }
446
447         /* increment current slave index so the next call to tx burst starts on the
448          * next slave */
449         slave_idx = ++cslave_idx;
450
451         /* Send packet burst on each slave device */
452         for (i = 0; i < num_of_slaves; i++) {
453                 if (slave_nb_pkts[i] > 0) {
454                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
455                                         slave_bufs[i], slave_nb_pkts[i]);
456
457                         /* if tx burst fails move packets to end of bufs */
458                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
459                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
460
461                                 tx_fail_total += tx_fail_slave;
462
463                                 memcpy(&bufs[nb_pkts - tx_fail_total],
464                                                 &slave_bufs[i][num_tx_slave],
465                                                 tx_fail_slave * sizeof(bufs[0]));
466                         }
467                         num_tx_total += num_tx_slave;
468                 }
469         }
470
471         return num_tx_total;
472 }
473
474 static uint16_t
475 bond_ethdev_tx_burst_active_backup(void *queue,
476                 struct rte_mbuf **bufs, uint16_t nb_pkts)
477 {
478         struct bond_dev_private *internals;
479         struct bond_tx_queue *bd_tx_q;
480
481         bd_tx_q = (struct bond_tx_queue *)queue;
482         internals = bd_tx_q->dev_private;
483
484         if (internals->active_slave_count < 1)
485                 return 0;
486
487         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
488                         bufs, nb_pkts);
489 }
490
491 static inline uint16_t
492 ether_hash(struct ether_hdr *eth_hdr)
493 {
494         unaligned_uint16_t *word_src_addr =
495                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
496         unaligned_uint16_t *word_dst_addr =
497                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
498
499         return (word_src_addr[0] ^ word_dst_addr[0]) ^
500                         (word_src_addr[1] ^ word_dst_addr[1]) ^
501                         (word_src_addr[2] ^ word_dst_addr[2]);
502 }
503
504 static inline uint32_t
505 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
506 {
507         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
508 }
509
510 static inline uint32_t
511 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
512 {
513         unaligned_uint32_t *word_src_addr =
514                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
515         unaligned_uint32_t *word_dst_addr =
516                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
517
518         return (word_src_addr[0] ^ word_dst_addr[0]) ^
519                         (word_src_addr[1] ^ word_dst_addr[1]) ^
520                         (word_src_addr[2] ^ word_dst_addr[2]) ^
521                         (word_src_addr[3] ^ word_dst_addr[3]);
522 }
523
524 uint16_t
525 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
526 {
527         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
528
529         uint32_t hash = ether_hash(eth_hdr);
530
531         return (hash ^= hash >> 8) % slave_count;
532 }
533
534 uint16_t
535 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
536 {
537         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
538         uint16_t proto = eth_hdr->ether_type;
539         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
540         uint32_t hash, l3hash = 0;
541
542         hash = ether_hash(eth_hdr);
543
544         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
545                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
546                                 ((char *)(eth_hdr + 1) + vlan_offset);
547                 l3hash = ipv4_hash(ipv4_hdr);
548
549         } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
550                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
551                                 ((char *)(eth_hdr + 1) + vlan_offset);
552                 l3hash = ipv6_hash(ipv6_hdr);
553         }
554
555         hash = hash ^ l3hash;
556         hash ^= hash >> 16;
557         hash ^= hash >> 8;
558
559         return hash % slave_count;
560 }
561
562 uint16_t
563 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
564 {
565         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
566         uint16_t proto = eth_hdr->ether_type;
567         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
568
569         struct udp_hdr *udp_hdr = NULL;
570         struct tcp_hdr *tcp_hdr = NULL;
571         uint32_t hash, l3hash = 0, l4hash = 0;
572
573         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
574                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
575                                 ((char *)(eth_hdr + 1) + vlan_offset);
576                 size_t ip_hdr_offset;
577
578                 l3hash = ipv4_hash(ipv4_hdr);
579
580                 /* there is no L4 header in fragmented packet */
581                 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
582                         ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
583                                         IPV4_IHL_MULTIPLIER;
584
585                         if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
586                                 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
587                                                 ip_hdr_offset);
588                                 l4hash = HASH_L4_PORTS(tcp_hdr);
589                         } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
590                                 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
591                                                 ip_hdr_offset);
592                                 l4hash = HASH_L4_PORTS(udp_hdr);
593                         }
594                 }
595         } else if  (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
596                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
597                                 ((char *)(eth_hdr + 1) + vlan_offset);
598                 l3hash = ipv6_hash(ipv6_hdr);
599
600                 if (ipv6_hdr->proto == IPPROTO_TCP) {
601                         tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
602                         l4hash = HASH_L4_PORTS(tcp_hdr);
603                 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
604                         udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
605                         l4hash = HASH_L4_PORTS(udp_hdr);
606                 }
607         }
608
609         hash = l3hash ^ l4hash;
610         hash ^= hash >> 16;
611         hash ^= hash >> 8;
612
613         return hash % slave_count;
614 }
615
616 struct bwg_slave {
617         uint64_t bwg_left_int;
618         uint64_t bwg_left_remainder;
619         uint8_t slave;
620 };
621
622 void
623 bond_tlb_activate_slave(struct bond_dev_private *internals) {
624         int i;
625
626         for (i = 0; i < internals->active_slave_count; i++) {
627                 tlb_last_obytets[internals->active_slaves[i]] = 0;
628         }
629 }
630
631 static int
632 bandwidth_cmp(const void *a, const void *b)
633 {
634         const struct bwg_slave *bwg_a = a;
635         const struct bwg_slave *bwg_b = b;
636         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
637         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
638                         (int64_t)bwg_a->bwg_left_remainder;
639         if (diff > 0)
640                 return 1;
641         else if (diff < 0)
642                 return -1;
643         else if (diff2 > 0)
644                 return 1;
645         else if (diff2 < 0)
646                 return -1;
647         else
648                 return 0;
649 }
650
651 static void
652 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
653                 struct bwg_slave *bwg_slave)
654 {
655         struct rte_eth_link link_status;
656
657         rte_eth_link_get_nowait(port_id, &link_status);
658         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
659         if (link_bwg == 0)
660                 return;
661         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
662         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
663         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
664 }
665
666 static void
667 bond_ethdev_update_tlb_slave_cb(void *arg)
668 {
669         struct bond_dev_private *internals = arg;
670         struct rte_eth_stats slave_stats;
671         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
672         uint8_t slave_count;
673         uint64_t tx_bytes;
674
675         uint8_t update_stats = 0;
676         uint8_t i, slave_id;
677
678         internals->slave_update_idx++;
679
680
681         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
682                 update_stats = 1;
683
684         for (i = 0; i < internals->active_slave_count; i++) {
685                 slave_id = internals->active_slaves[i];
686                 rte_eth_stats_get(slave_id, &slave_stats);
687                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
688                 bandwidth_left(slave_id, tx_bytes,
689                                 internals->slave_update_idx, &bwg_array[i]);
690                 bwg_array[i].slave = slave_id;
691
692                 if (update_stats) {
693                         tlb_last_obytets[slave_id] = slave_stats.obytes;
694                 }
695         }
696
697         if (update_stats == 1)
698                 internals->slave_update_idx = 0;
699
700         slave_count = i;
701         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
702         for (i = 0; i < slave_count; i++)
703                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
704
705         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
706                         (struct bond_dev_private *)internals);
707 }
708
709 static uint16_t
710 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
711 {
712         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
713         struct bond_dev_private *internals = bd_tx_q->dev_private;
714
715         struct rte_eth_dev *primary_port =
716                         &rte_eth_devices[internals->primary_port];
717         uint16_t num_tx_total = 0;
718         uint8_t i, j;
719
720         uint8_t num_of_slaves = internals->active_slave_count;
721         uint8_t slaves[RTE_MAX_ETHPORTS];
722
723         struct ether_hdr *ether_hdr;
724         struct ether_addr primary_slave_addr;
725         struct ether_addr active_slave_addr;
726
727         if (num_of_slaves < 1)
728                 return num_tx_total;
729
730         memcpy(slaves, internals->tlb_slaves_order,
731                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
732
733
734         ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
735
736         if (nb_pkts > 3) {
737                 for (i = 0; i < 3; i++)
738                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
739         }
740
741         for (i = 0; i < num_of_slaves; i++) {
742                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
743                 for (j = num_tx_total; j < nb_pkts; j++) {
744                         if (j + 3 < nb_pkts)
745                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
746
747                         ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
748                         if (is_same_ether_addr(&ether_hdr->s_addr, &primary_slave_addr))
749                                 ether_addr_copy(&active_slave_addr, &ether_hdr->s_addr);
750 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
751                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
752 #endif
753                 }
754
755                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
756                                 bufs + num_tx_total, nb_pkts - num_tx_total);
757
758                 if (num_tx_total == nb_pkts)
759                         break;
760         }
761
762         return num_tx_total;
763 }
764
765 void
766 bond_tlb_disable(struct bond_dev_private *internals)
767 {
768         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
769 }
770
771 void
772 bond_tlb_enable(struct bond_dev_private *internals)
773 {
774         bond_ethdev_update_tlb_slave_cb(internals);
775 }
776
777 static uint16_t
778 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
779 {
780         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
781         struct bond_dev_private *internals = bd_tx_q->dev_private;
782
783         struct ether_hdr *eth_h;
784         uint16_t ether_type, offset;
785
786         struct client_data *client_info;
787
788         /*
789          * We create transmit buffers for every slave and one additional to send
790          * through tlb. In worst case every packet will be send on one port.
791          */
792         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
793         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
794
795         /*
796          * We create separate transmit buffers for update packets as they won't
797          * be counted in num_tx_total.
798          */
799         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
800         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
801
802         struct rte_mbuf *upd_pkt;
803         size_t pkt_size;
804
805         uint16_t num_send, num_not_send = 0;
806         uint16_t num_tx_total = 0;
807         uint8_t slave_idx;
808
809         int i, j;
810
811         /* Search tx buffer for ARP packets and forward them to alb */
812         for (i = 0; i < nb_pkts; i++) {
813                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
814                 ether_type = eth_h->ether_type;
815                 offset = get_vlan_offset(eth_h, &ether_type);
816
817                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
818                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
819
820                         /* Change src mac in eth header */
821                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
822
823                         /* Add packet to slave tx buffer */
824                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
825                         slave_bufs_pkts[slave_idx]++;
826                 } else {
827                         /* If packet is not ARP, send it with TLB policy */
828                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
829                                         bufs[i];
830                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
831                 }
832         }
833
834         /* Update connected client ARP tables */
835         if (internals->mode6.ntt) {
836                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
837                         client_info = &internals->mode6.client_table[i];
838
839                         if (client_info->in_use) {
840                                 /* Allocate new packet to send ARP update on current slave */
841                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
842                                 if (upd_pkt == NULL) {
843                                         RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
844                                         continue;
845                                 }
846                                 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
847                                                 + client_info->vlan_count * sizeof(struct vlan_hdr);
848                                 upd_pkt->data_len = pkt_size;
849                                 upd_pkt->pkt_len = pkt_size;
850
851                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
852                                                 internals);
853
854                                 /* Add packet to update tx buffer */
855                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
856                                 update_bufs_pkts[slave_idx]++;
857                         }
858                 }
859                 internals->mode6.ntt = 0;
860         }
861
862         /* Send ARP packets on proper slaves */
863         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
864                 if (slave_bufs_pkts[i] > 0) {
865                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
866                                         slave_bufs[i], slave_bufs_pkts[i]);
867                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
868                                 bufs[nb_pkts - 1 - num_not_send - j] =
869                                                 slave_bufs[i][nb_pkts - 1 - j];
870                         }
871
872                         num_tx_total += num_send;
873                         num_not_send += slave_bufs_pkts[i] - num_send;
874
875 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
876         /* Print TX stats including update packets */
877                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
878                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
879                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
880                         }
881 #endif
882                 }
883         }
884
885         /* Send update packets on proper slaves */
886         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
887                 if (update_bufs_pkts[i] > 0) {
888                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
889                                         update_bufs_pkts[i]);
890                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
891                                 rte_pktmbuf_free(update_bufs[i][j]);
892                         }
893 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
894                         for (j = 0; j < update_bufs_pkts[i]; j++) {
895                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
896                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
897                         }
898 #endif
899                 }
900         }
901
902         /* Send non-ARP packets using tlb policy */
903         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
904                 num_send = bond_ethdev_tx_burst_tlb(queue,
905                                 slave_bufs[RTE_MAX_ETHPORTS],
906                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
907
908                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
909                         bufs[nb_pkts - 1 - num_not_send - j] =
910                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
911                 }
912
913                 num_tx_total += num_send;
914         }
915
916         return num_tx_total;
917 }
918
919 static uint16_t
920 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
921                 uint16_t nb_pkts)
922 {
923         struct bond_dev_private *internals;
924         struct bond_tx_queue *bd_tx_q;
925
926         uint8_t num_of_slaves;
927         uint8_t slaves[RTE_MAX_ETHPORTS];
928
929         uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
930
931         int i, op_slave_id;
932
933         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
934         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
935
936         bd_tx_q = (struct bond_tx_queue *)queue;
937         internals = bd_tx_q->dev_private;
938
939         /* Copy slave list to protect against slave up/down changes during tx
940          * bursting */
941         num_of_slaves = internals->active_slave_count;
942         memcpy(slaves, internals->active_slaves,
943                         sizeof(internals->active_slaves[0]) * num_of_slaves);
944
945         if (num_of_slaves < 1)
946                 return num_tx_total;
947
948         /* Populate slaves mbuf with the packets which are to be sent on it  */
949         for (i = 0; i < nb_pkts; i++) {
950                 /* Select output slave using hash based on xmit policy */
951                 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
952
953                 /* Populate slave mbuf arrays with mbufs for that slave */
954                 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
955         }
956
957         /* Send packet burst on each slave device */
958         for (i = 0; i < num_of_slaves; i++) {
959                 if (slave_nb_pkts[i] > 0) {
960                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
961                                         slave_bufs[i], slave_nb_pkts[i]);
962
963                         /* if tx burst fails move packets to end of bufs */
964                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
965                                 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
966
967                                 tx_fail_total += slave_tx_fail_count;
968                                 memcpy(&bufs[nb_pkts - tx_fail_total],
969                                                 &slave_bufs[i][num_tx_slave],
970                                                 slave_tx_fail_count * sizeof(bufs[0]));
971                         }
972
973                         num_tx_total += num_tx_slave;
974                 }
975         }
976
977         return num_tx_total;
978 }
979
980 static uint16_t
981 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
982                 uint16_t nb_pkts)
983 {
984         struct bond_dev_private *internals;
985         struct bond_tx_queue *bd_tx_q;
986
987         uint8_t num_of_slaves;
988         uint8_t slaves[RTE_MAX_ETHPORTS];
989          /* positions in slaves, not ID */
990         uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
991         uint8_t distributing_count;
992
993         uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
994         uint16_t i, j, op_slave_idx;
995         const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
996
997         /* Allocate additional packets in case 8023AD mode. */
998         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
999         void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
1000
1001         /* Total amount of packets in slave_bufs */
1002         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
1003         /* Slow packets placed in each slave */
1004         uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
1005
1006         bd_tx_q = (struct bond_tx_queue *)queue;
1007         internals = bd_tx_q->dev_private;
1008
1009         /* Copy slave list to protect against slave up/down changes during tx
1010          * bursting */
1011         num_of_slaves = internals->active_slave_count;
1012         if (num_of_slaves < 1)
1013                 return num_tx_total;
1014
1015         memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
1016
1017         distributing_count = 0;
1018         for (i = 0; i < num_of_slaves; i++) {
1019                 struct port *port = &mode_8023ad_ports[slaves[i]];
1020
1021                 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1022                                 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
1023                                 NULL);
1024                 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1025
1026                 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1027                         slave_bufs[i][j] = slow_pkts[j];
1028
1029                 if (ACTOR_STATE(port, DISTRIBUTING))
1030                         distributing_offsets[distributing_count++] = i;
1031         }
1032
1033         if (likely(distributing_count > 0)) {
1034                 /* Populate slaves mbuf with the packets which are to be sent on it */
1035                 for (i = 0; i < nb_pkts; i++) {
1036                         /* Select output slave using hash based on xmit policy */
1037                         op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1038
1039                         /* Populate slave mbuf arrays with mbufs for that slave. Use only
1040                          * slaves that are currently distributing. */
1041                         uint8_t slave_offset = distributing_offsets[op_slave_idx];
1042                         slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1043                         slave_nb_pkts[slave_offset]++;
1044                 }
1045         }
1046
1047         /* Send packet burst on each slave device */
1048         for (i = 0; i < num_of_slaves; i++) {
1049                 if (slave_nb_pkts[i] == 0)
1050                         continue;
1051
1052                 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1053                                 slave_bufs[i], slave_nb_pkts[i]);
1054
1055                 /* If tx burst fails drop slow packets */
1056                 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1057                         rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1058
1059                 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1060                 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1061
1062                 /* If tx burst fails move packets to end of bufs */
1063                 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1064                         uint16_t j = nb_pkts - num_tx_fail_total;
1065                         for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1066                                 bufs[j] = slave_bufs[i][num_tx_slave];
1067                 }
1068         }
1069
1070         return num_tx_total;
1071 }
1072
1073 static uint16_t
1074 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1075                 uint16_t nb_pkts)
1076 {
1077         struct bond_dev_private *internals;
1078         struct bond_tx_queue *bd_tx_q;
1079
1080         uint8_t tx_failed_flag = 0, num_of_slaves;
1081         uint8_t slaves[RTE_MAX_ETHPORTS];
1082
1083         uint16_t max_nb_of_tx_pkts = 0;
1084
1085         int slave_tx_total[RTE_MAX_ETHPORTS];
1086         int i, most_successful_tx_slave = -1;
1087
1088         bd_tx_q = (struct bond_tx_queue *)queue;
1089         internals = bd_tx_q->dev_private;
1090
1091         /* Copy slave list to protect against slave up/down changes during tx
1092          * bursting */
1093         num_of_slaves = internals->active_slave_count;
1094         memcpy(slaves, internals->active_slaves,
1095                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1096
1097         if (num_of_slaves < 1)
1098                 return 0;
1099
1100         /* Increment reference count on mbufs */
1101         for (i = 0; i < nb_pkts; i++)
1102                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1103
1104         /* Transmit burst on each active slave */
1105         for (i = 0; i < num_of_slaves; i++) {
1106                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1107                                         bufs, nb_pkts);
1108
1109                 if (unlikely(slave_tx_total[i] < nb_pkts))
1110                         tx_failed_flag = 1;
1111
1112                 /* record the value and slave index for the slave which transmits the
1113                  * maximum number of packets */
1114                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1115                         max_nb_of_tx_pkts = slave_tx_total[i];
1116                         most_successful_tx_slave = i;
1117                 }
1118         }
1119
1120         /* if slaves fail to transmit packets from burst, the calling application
1121          * is not expected to know about multiple references to packets so we must
1122          * handle failures of all packets except those of the most successful slave
1123          */
1124         if (unlikely(tx_failed_flag))
1125                 for (i = 0; i < num_of_slaves; i++)
1126                         if (i != most_successful_tx_slave)
1127                                 while (slave_tx_total[i] < nb_pkts)
1128                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1129
1130         return max_nb_of_tx_pkts;
1131 }
1132
1133 void
1134 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1135                 struct rte_eth_link *slave_dev_link)
1136 {
1137         struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1138         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1139
1140         if (slave_dev_link->link_status &&
1141                 bonded_eth_dev->data->dev_started) {
1142                 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1143                 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1144
1145                 internals->link_props_set = 1;
1146         }
1147 }
1148
1149 void
1150 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1151 {
1152         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1153
1154         memset(&(bonded_eth_dev->data->dev_link), 0,
1155                         sizeof(bonded_eth_dev->data->dev_link));
1156
1157         internals->link_props_set = 0;
1158 }
1159
1160 int
1161 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1162                 struct rte_eth_link *slave_dev_link)
1163 {
1164         if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1165                 bonded_dev_link->link_speed !=  slave_dev_link->link_speed)
1166                 return -1;
1167
1168         return 0;
1169 }
1170
1171 int
1172 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1173 {
1174         struct ether_addr *mac_addr;
1175
1176         if (eth_dev == NULL) {
1177                 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1178                 return -1;
1179         }
1180
1181         if (dst_mac_addr == NULL) {
1182                 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1183                 return -1;
1184         }
1185
1186         mac_addr = eth_dev->data->mac_addrs;
1187
1188         ether_addr_copy(mac_addr, dst_mac_addr);
1189         return 0;
1190 }
1191
1192 int
1193 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1194 {
1195         struct ether_addr *mac_addr;
1196
1197         if (eth_dev == NULL) {
1198                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1199                 return -1;
1200         }
1201
1202         if (new_mac_addr == NULL) {
1203                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1204                 return -1;
1205         }
1206
1207         mac_addr = eth_dev->data->mac_addrs;
1208
1209         /* If new MAC is different to current MAC then update */
1210         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1211                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1212
1213         return 0;
1214 }
1215
1216 int
1217 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1218 {
1219         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1220         int i;
1221
1222         /* Update slave devices MAC addresses */
1223         if (internals->slave_count < 1)
1224                 return -1;
1225
1226         switch (internals->mode) {
1227         case BONDING_MODE_ROUND_ROBIN:
1228         case BONDING_MODE_BALANCE:
1229         case BONDING_MODE_BROADCAST:
1230                 for (i = 0; i < internals->slave_count; i++) {
1231                         if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1232                                         bonded_eth_dev->data->mac_addrs)) {
1233                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1234                                                 internals->slaves[i].port_id);
1235                                 return -1;
1236                         }
1237                 }
1238                 break;
1239         case BONDING_MODE_8023AD:
1240                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1241                 break;
1242         case BONDING_MODE_ACTIVE_BACKUP:
1243         case BONDING_MODE_TLB:
1244         case BONDING_MODE_ALB:
1245         default:
1246                 for (i = 0; i < internals->slave_count; i++) {
1247                         if (internals->slaves[i].port_id ==
1248                                         internals->current_primary_port) {
1249                                 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1250                                                 bonded_eth_dev->data->mac_addrs)) {
1251                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1252                                                         internals->current_primary_port);
1253                                         return -1;
1254                                 }
1255                         } else {
1256                                 if (mac_address_set(
1257                                                 &rte_eth_devices[internals->slaves[i].port_id],
1258                                                 &internals->slaves[i].persisted_mac_addr)) {
1259                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1260                                                         internals->slaves[i].port_id);
1261                                         return -1;
1262                                 }
1263                         }
1264                 }
1265         }
1266
1267         return 0;
1268 }
1269
1270 int
1271 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1272 {
1273         struct bond_dev_private *internals;
1274
1275         internals = eth_dev->data->dev_private;
1276
1277         switch (mode) {
1278         case BONDING_MODE_ROUND_ROBIN:
1279                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1280                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1281                 break;
1282         case BONDING_MODE_ACTIVE_BACKUP:
1283                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1284                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1285                 break;
1286         case BONDING_MODE_BALANCE:
1287                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1288                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1289                 break;
1290         case BONDING_MODE_BROADCAST:
1291                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1292                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1293                 break;
1294         case BONDING_MODE_8023AD:
1295                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1296                         return -1;
1297
1298                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1299                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1300                 RTE_LOG(WARNING, PMD,
1301                                 "Using mode 4, it is necessary to do TX burst and RX burst "
1302                                 "at least every 100ms.\n");
1303                 break;
1304         case BONDING_MODE_TLB:
1305                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1306                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1307                 break;
1308         case BONDING_MODE_ALB:
1309                 if (bond_mode_alb_enable(eth_dev) != 0)
1310                         return -1;
1311
1312                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1313                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1314                 break;
1315         default:
1316                 return -1;
1317         }
1318
1319         internals->mode = mode;
1320
1321         return 0;
1322 }
1323
1324 int
1325 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1326                 struct rte_eth_dev *slave_eth_dev)
1327 {
1328         struct bond_rx_queue *bd_rx_q;
1329         struct bond_tx_queue *bd_tx_q;
1330
1331         int errval;
1332         uint16_t q_id;
1333
1334         /* Stop slave */
1335         rte_eth_dev_stop(slave_eth_dev->data->port_id);
1336
1337         /* Enable interrupts on slave device if supported */
1338         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1339                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1340
1341         /* If RSS is enabled for bonding, try to enable it for slaves  */
1342         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1343                 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1344                                 != 0) {
1345                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1346                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1347                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1348                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1349                 } else {
1350                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1351                 }
1352
1353                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1354                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1355                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1356                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1357         }
1358
1359         slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
1360                         bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
1361
1362         /* Configure device */
1363         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1364                         bonded_eth_dev->data->nb_rx_queues,
1365                         bonded_eth_dev->data->nb_tx_queues,
1366                         &(slave_eth_dev->data->dev_conf));
1367         if (errval != 0) {
1368                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1369                                 slave_eth_dev->data->port_id, errval);
1370                 return errval;
1371         }
1372
1373         /* Setup Rx Queues */
1374         for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1375                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1376
1377                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1378                                 bd_rx_q->nb_rx_desc,
1379                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1380                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1381                 if (errval != 0) {
1382                         RTE_BOND_LOG(ERR,
1383                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1384                                         slave_eth_dev->data->port_id, q_id, errval);
1385                         return errval;
1386                 }
1387         }
1388
1389         /* Setup Tx Queues */
1390         for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1391                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1392
1393                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1394                                 bd_tx_q->nb_tx_desc,
1395                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1396                                 &bd_tx_q->tx_conf);
1397                 if (errval != 0) {
1398                         RTE_BOND_LOG(ERR,
1399                                         "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1400                                         slave_eth_dev->data->port_id, q_id, errval);
1401                         return errval;
1402                 }
1403         }
1404
1405         /* Start device */
1406         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1407         if (errval != 0) {
1408                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1409                                 slave_eth_dev->data->port_id, errval);
1410                 return -1;
1411         }
1412
1413         /* If RSS is enabled for bonding, synchronize RETA */
1414         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1415                 int i;
1416                 struct bond_dev_private *internals;
1417
1418                 internals = bonded_eth_dev->data->dev_private;
1419
1420                 for (i = 0; i < internals->slave_count; i++) {
1421                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1422                                 errval = rte_eth_dev_rss_reta_update(
1423                                                 slave_eth_dev->data->port_id,
1424                                                 &internals->reta_conf[0],
1425                                                 internals->slaves[i].reta_size);
1426                                 if (errval != 0) {
1427                                         RTE_LOG(WARNING, PMD,
1428                                                         "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1429                                                         " RSS Configuration for bonding may be inconsistent.\n",
1430                                                         slave_eth_dev->data->port_id, errval);
1431                                 }
1432                                 break;
1433                         }
1434                 }
1435         }
1436
1437         /* If lsc interrupt is set, check initial slave's link status */
1438         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1439                 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1440                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1441                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1442                         NULL);
1443         }
1444
1445         return 0;
1446 }
1447
1448 void
1449 slave_remove(struct bond_dev_private *internals,
1450                 struct rte_eth_dev *slave_eth_dev)
1451 {
1452         uint8_t i;
1453
1454         for (i = 0; i < internals->slave_count; i++)
1455                 if (internals->slaves[i].port_id ==
1456                                 slave_eth_dev->data->port_id)
1457                         break;
1458
1459         if (i < (internals->slave_count - 1))
1460                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1461                                 sizeof(internals->slaves[0]) *
1462                                 (internals->slave_count - i - 1));
1463
1464         internals->slave_count--;
1465
1466         /* force reconfiguration of slave interfaces */
1467         _rte_eth_dev_reset(slave_eth_dev);
1468 }
1469
1470 static void
1471 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1472
1473 void
1474 slave_add(struct bond_dev_private *internals,
1475                 struct rte_eth_dev *slave_eth_dev)
1476 {
1477         struct bond_slave_details *slave_details =
1478                         &internals->slaves[internals->slave_count];
1479
1480         slave_details->port_id = slave_eth_dev->data->port_id;
1481         slave_details->last_link_status = 0;
1482
1483         /* Mark slave devices that don't support interrupts so we can
1484          * compensate when we start the bond
1485          */
1486         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1487                 slave_details->link_status_poll_enabled = 1;
1488         }
1489
1490         slave_details->link_status_wait_to_complete = 0;
1491         /* clean tlb_last_obytes when adding port for bonding device */
1492         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1493                         sizeof(struct ether_addr));
1494 }
1495
1496 void
1497 bond_ethdev_primary_set(struct bond_dev_private *internals,
1498                 uint8_t slave_port_id)
1499 {
1500         int i;
1501
1502         if (internals->active_slave_count < 1)
1503                 internals->current_primary_port = slave_port_id;
1504         else
1505                 /* Search bonded device slave ports for new proposed primary port */
1506                 for (i = 0; i < internals->active_slave_count; i++) {
1507                         if (internals->active_slaves[i] == slave_port_id)
1508                                 internals->current_primary_port = slave_port_id;
1509                 }
1510 }
1511
1512 static void
1513 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1514
1515 static int
1516 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1517 {
1518         struct bond_dev_private *internals;
1519         int i;
1520
1521         /* slave eth dev will be started by bonded device */
1522         if (check_for_bonded_ethdev(eth_dev)) {
1523                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1524                                 eth_dev->data->port_id);
1525                 return -1;
1526         }
1527
1528         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1529         eth_dev->data->dev_started = 1;
1530
1531         internals = eth_dev->data->dev_private;
1532
1533         if (internals->slave_count == 0) {
1534                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1535                 return -1;
1536         }
1537
1538         if (internals->user_defined_mac == 0) {
1539                 struct ether_addr *new_mac_addr = NULL;
1540
1541                 for (i = 0; i < internals->slave_count; i++)
1542                         if (internals->slaves[i].port_id == internals->primary_port)
1543                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1544
1545                 if (new_mac_addr == NULL)
1546                         return -1;
1547
1548                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1549                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1550                                         eth_dev->data->port_id);
1551                         return -1;
1552                 }
1553         }
1554
1555         /* Update all slave devices MACs*/
1556         if (mac_address_slaves_update(eth_dev) != 0)
1557                 return -1;
1558
1559         /* If bonded device is configure in promiscuous mode then re-apply config */
1560         if (internals->promiscuous_en)
1561                 bond_ethdev_promiscuous_enable(eth_dev);
1562
1563         /* Reconfigure each slave device if starting bonded device */
1564         for (i = 0; i < internals->slave_count; i++) {
1565                 if (slave_configure(eth_dev,
1566                                 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1567                         RTE_BOND_LOG(ERR,
1568                                         "bonded port (%d) failed to reconfigure slave device (%d)",
1569                                         eth_dev->data->port_id, internals->slaves[i].port_id);
1570                         return -1;
1571                 }
1572                 /* We will need to poll for link status if any slave doesn't
1573                  * support interrupts
1574                  */
1575                 if (internals->slaves[i].link_status_poll_enabled)
1576                         internals->link_status_polling_enabled = 1;
1577         }
1578         /* start polling if needed */
1579         if (internals->link_status_polling_enabled) {
1580                 rte_eal_alarm_set(
1581                         internals->link_status_polling_interval_ms * 1000,
1582                         bond_ethdev_slave_link_status_change_monitor,
1583                         (void *)&rte_eth_devices[internals->port_id]);
1584         }
1585
1586         if (internals->user_defined_primary_port)
1587                 bond_ethdev_primary_set(internals, internals->primary_port);
1588
1589         if (internals->mode == BONDING_MODE_8023AD)
1590                 bond_mode_8023ad_start(eth_dev);
1591
1592         if (internals->mode == BONDING_MODE_TLB ||
1593                         internals->mode == BONDING_MODE_ALB)
1594                 bond_tlb_enable(internals);
1595
1596         return 0;
1597 }
1598
1599 static void
1600 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1601 {
1602         uint8_t i;
1603
1604         if (dev->data->rx_queues != NULL) {
1605                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1606                         rte_free(dev->data->rx_queues[i]);
1607                         dev->data->rx_queues[i] = NULL;
1608                 }
1609                 dev->data->nb_rx_queues = 0;
1610         }
1611
1612         if (dev->data->tx_queues != NULL) {
1613                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1614                         rte_free(dev->data->tx_queues[i]);
1615                         dev->data->tx_queues[i] = NULL;
1616                 }
1617                 dev->data->nb_tx_queues = 0;
1618         }
1619 }
1620
1621 void
1622 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1623 {
1624         struct bond_dev_private *internals = eth_dev->data->dev_private;
1625         uint8_t i;
1626
1627         if (internals->mode == BONDING_MODE_8023AD) {
1628                 struct port *port;
1629                 void *pkt = NULL;
1630
1631                 bond_mode_8023ad_stop(eth_dev);
1632
1633                 /* Discard all messages to/from mode 4 state machines */
1634                 for (i = 0; i < internals->active_slave_count; i++) {
1635                         port = &mode_8023ad_ports[internals->active_slaves[i]];
1636
1637                         RTE_ASSERT(port->rx_ring != NULL);
1638                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1639                                 rte_pktmbuf_free(pkt);
1640
1641                         RTE_ASSERT(port->tx_ring != NULL);
1642                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1643                                 rte_pktmbuf_free(pkt);
1644                 }
1645         }
1646
1647         if (internals->mode == BONDING_MODE_TLB ||
1648                         internals->mode == BONDING_MODE_ALB) {
1649                 bond_tlb_disable(internals);
1650                 for (i = 0; i < internals->active_slave_count; i++)
1651                         tlb_last_obytets[internals->active_slaves[i]] = 0;
1652         }
1653
1654         internals->active_slave_count = 0;
1655         internals->link_status_polling_enabled = 0;
1656         for (i = 0; i < internals->slave_count; i++)
1657                 internals->slaves[i].last_link_status = 0;
1658
1659         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1660         eth_dev->data->dev_started = 0;
1661 }
1662
1663 void
1664 bond_ethdev_close(struct rte_eth_dev *dev)
1665 {
1666         struct bond_dev_private *internals = dev->data->dev_private;
1667         uint8_t bond_port_id = internals->port_id;
1668         int skipped = 0;
1669
1670         RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name);
1671         while (internals->slave_count != skipped) {
1672                 uint8_t port_id = internals->slaves[skipped].port_id;
1673
1674                 rte_eth_dev_stop(port_id);
1675
1676                 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
1677                         RTE_LOG(ERR, EAL,
1678                                 "Failed to remove port %d from bonded device "
1679                                 "%s\n", port_id, dev->device->name);
1680                         skipped++;
1681                 }
1682         }
1683         bond_ethdev_free_queues(dev);
1684         rte_bitmap_reset(internals->vlan_filter_bmp);
1685 }
1686
1687 /* forward declaration */
1688 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1689
1690 static void
1691 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1692 {
1693         struct bond_dev_private *internals = dev->data->dev_private;
1694         uint16_t max_nb_rx_queues = UINT16_MAX;
1695         uint16_t max_nb_tx_queues = UINT16_MAX;
1696
1697         dev_info->max_mac_addrs = 1;
1698
1699         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
1700                                   ? internals->candidate_max_rx_pktlen
1701                                   : ETHER_MAX_JUMBO_FRAME_LEN;
1702
1703         if (internals->slave_count > 0) {
1704                 /* Max number of tx/rx queues that the bonded device can
1705                  * support is the minimum values of the bonded slaves, as
1706                  * all slaves must be capable of supporting the same number
1707                  * of tx/rx queues.
1708                  */
1709                 struct rte_eth_dev_info slave_info;
1710                 uint8_t idx;
1711
1712                 for (idx = 0; idx < internals->slave_count; idx++) {
1713                         rte_eth_dev_info_get(internals->slaves[idx].port_id,
1714                                         &slave_info);
1715
1716                         if (slave_info.max_rx_queues < max_nb_rx_queues)
1717                                 max_nb_rx_queues = slave_info.max_rx_queues;
1718
1719                         if (slave_info.max_tx_queues < max_nb_tx_queues)
1720                                 max_nb_tx_queues = slave_info.max_tx_queues;
1721                 }
1722         }
1723
1724         dev_info->max_rx_queues = max_nb_rx_queues;
1725         dev_info->max_tx_queues = max_nb_tx_queues;
1726
1727         dev_info->min_rx_bufsize = 0;
1728
1729         dev_info->rx_offload_capa = internals->rx_offload_capa;
1730         dev_info->tx_offload_capa = internals->tx_offload_capa;
1731         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1732
1733         dev_info->reta_size = internals->reta_size;
1734 }
1735
1736 static int
1737 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1738 {
1739         int res;
1740         uint8_t i;
1741         struct bond_dev_private *internals = dev->data->dev_private;
1742
1743         /* don't do this while a slave is being added */
1744         rte_spinlock_lock(&internals->lock);
1745
1746         if (on)
1747                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
1748         else
1749                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
1750
1751         for (i = 0; i < internals->slave_count; i++) {
1752                 uint8_t port_id = internals->slaves[i].port_id;
1753
1754                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1755                 if (res == ENOTSUP)
1756                         RTE_LOG(WARNING, PMD,
1757                                 "Setting VLAN filter on slave port %u not supported.\n",
1758                                 port_id);
1759         }
1760
1761         rte_spinlock_unlock(&internals->lock);
1762         return 0;
1763 }
1764
1765 static int
1766 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1767                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1768                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1769 {
1770         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1771                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1772                                         0, dev->data->numa_node);
1773         if (bd_rx_q == NULL)
1774                 return -1;
1775
1776         bd_rx_q->queue_id = rx_queue_id;
1777         bd_rx_q->dev_private = dev->data->dev_private;
1778
1779         bd_rx_q->nb_rx_desc = nb_rx_desc;
1780
1781         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1782         bd_rx_q->mb_pool = mb_pool;
1783
1784         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1785
1786         return 0;
1787 }
1788
1789 static int
1790 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1791                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1792                 const struct rte_eth_txconf *tx_conf)
1793 {
1794         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
1795                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1796                                         0, dev->data->numa_node);
1797
1798         if (bd_tx_q == NULL)
1799                 return -1;
1800
1801         bd_tx_q->queue_id = tx_queue_id;
1802         bd_tx_q->dev_private = dev->data->dev_private;
1803
1804         bd_tx_q->nb_tx_desc = nb_tx_desc;
1805         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1806
1807         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1808
1809         return 0;
1810 }
1811
1812 static void
1813 bond_ethdev_rx_queue_release(void *queue)
1814 {
1815         if (queue == NULL)
1816                 return;
1817
1818         rte_free(queue);
1819 }
1820
1821 static void
1822 bond_ethdev_tx_queue_release(void *queue)
1823 {
1824         if (queue == NULL)
1825                 return;
1826
1827         rte_free(queue);
1828 }
1829
1830 static void
1831 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1832 {
1833         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1834         struct bond_dev_private *internals;
1835
1836         /* Default value for polling slave found is true as we don't want to
1837          * disable the polling thread if we cannot get the lock */
1838         int i, polling_slave_found = 1;
1839
1840         if (cb_arg == NULL)
1841                 return;
1842
1843         bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1844         internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1845
1846         if (!bonded_ethdev->data->dev_started ||
1847                 !internals->link_status_polling_enabled)
1848                 return;
1849
1850         /* If device is currently being configured then don't check slaves link
1851          * status, wait until next period */
1852         if (rte_spinlock_trylock(&internals->lock)) {
1853                 if (internals->slave_count > 0)
1854                         polling_slave_found = 0;
1855
1856                 for (i = 0; i < internals->slave_count; i++) {
1857                         if (!internals->slaves[i].link_status_poll_enabled)
1858                                 continue;
1859
1860                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1861                         polling_slave_found = 1;
1862
1863                         /* Update slave link status */
1864                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1865                                         internals->slaves[i].link_status_wait_to_complete);
1866
1867                         /* if link status has changed since last checked then call lsc
1868                          * event callback */
1869                         if (slave_ethdev->data->dev_link.link_status !=
1870                                         internals->slaves[i].last_link_status) {
1871                                 internals->slaves[i].last_link_status =
1872                                                 slave_ethdev->data->dev_link.link_status;
1873
1874                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1875                                                 RTE_ETH_EVENT_INTR_LSC,
1876                                                 &bonded_ethdev->data->port_id,
1877                                                 NULL);
1878                         }
1879                 }
1880                 rte_spinlock_unlock(&internals->lock);
1881         }
1882
1883         if (polling_slave_found)
1884                 /* Set alarm to continue monitoring link status of slave ethdev's */
1885                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1886                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1887 }
1888
1889 static int
1890 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1891                 int wait_to_complete)
1892 {
1893         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1894
1895         if (!bonded_eth_dev->data->dev_started ||
1896                 internals->active_slave_count == 0) {
1897                 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1898                 return 0;
1899         } else {
1900                 struct rte_eth_dev *slave_eth_dev;
1901                 int i, link_up = 0;
1902
1903                 for (i = 0; i < internals->active_slave_count; i++) {
1904                         slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1905
1906                         (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1907                                         wait_to_complete);
1908                         if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
1909                                 link_up = 1;
1910                                 break;
1911                         }
1912                 }
1913
1914                 bonded_eth_dev->data->dev_link.link_status = link_up;
1915         }
1916
1917         return 0;
1918 }
1919
1920 static void
1921 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1922 {
1923         struct bond_dev_private *internals = dev->data->dev_private;
1924         struct rte_eth_stats slave_stats;
1925         int i, j;
1926
1927         for (i = 0; i < internals->slave_count; i++) {
1928                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1929
1930                 stats->ipackets += slave_stats.ipackets;
1931                 stats->opackets += slave_stats.opackets;
1932                 stats->ibytes += slave_stats.ibytes;
1933                 stats->obytes += slave_stats.obytes;
1934                 stats->imissed += slave_stats.imissed;
1935                 stats->ierrors += slave_stats.ierrors;
1936                 stats->oerrors += slave_stats.oerrors;
1937                 stats->rx_nombuf += slave_stats.rx_nombuf;
1938
1939                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1940                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1941                         stats->q_opackets[j] += slave_stats.q_opackets[j];
1942                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1943                         stats->q_obytes[j] += slave_stats.q_obytes[j];
1944                         stats->q_errors[j] += slave_stats.q_errors[j];
1945                 }
1946
1947         }
1948 }
1949
1950 static void
1951 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1952 {
1953         struct bond_dev_private *internals = dev->data->dev_private;
1954         int i;
1955
1956         for (i = 0; i < internals->slave_count; i++)
1957                 rte_eth_stats_reset(internals->slaves[i].port_id);
1958 }
1959
1960 static void
1961 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1962 {
1963         struct bond_dev_private *internals = eth_dev->data->dev_private;
1964         int i;
1965
1966         internals->promiscuous_en = 1;
1967
1968         switch (internals->mode) {
1969         /* Promiscuous mode is propagated to all slaves */
1970         case BONDING_MODE_ROUND_ROBIN:
1971         case BONDING_MODE_BALANCE:
1972         case BONDING_MODE_BROADCAST:
1973                 for (i = 0; i < internals->slave_count; i++)
1974                         rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1975                 break;
1976         /* In mode4 promiscus mode is managed when slave is added/removed */
1977         case BONDING_MODE_8023AD:
1978                 break;
1979         /* Promiscuous mode is propagated only to primary slave */
1980         case BONDING_MODE_ACTIVE_BACKUP:
1981         case BONDING_MODE_TLB:
1982         case BONDING_MODE_ALB:
1983         default:
1984                 rte_eth_promiscuous_enable(internals->current_primary_port);
1985         }
1986 }
1987
1988 static void
1989 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1990 {
1991         struct bond_dev_private *internals = dev->data->dev_private;
1992         int i;
1993
1994         internals->promiscuous_en = 0;
1995
1996         switch (internals->mode) {
1997         /* Promiscuous mode is propagated to all slaves */
1998         case BONDING_MODE_ROUND_ROBIN:
1999         case BONDING_MODE_BALANCE:
2000         case BONDING_MODE_BROADCAST:
2001                 for (i = 0; i < internals->slave_count; i++)
2002                         rte_eth_promiscuous_disable(internals->slaves[i].port_id);
2003                 break;
2004         /* In mode4 promiscus mode is set managed when slave is added/removed */
2005         case BONDING_MODE_8023AD:
2006                 break;
2007         /* Promiscuous mode is propagated only to primary slave */
2008         case BONDING_MODE_ACTIVE_BACKUP:
2009         case BONDING_MODE_TLB:
2010         case BONDING_MODE_ALB:
2011         default:
2012                 rte_eth_promiscuous_disable(internals->current_primary_port);
2013         }
2014 }
2015
2016 static void
2017 bond_ethdev_delayed_lsc_propagation(void *arg)
2018 {
2019         if (arg == NULL)
2020                 return;
2021
2022         _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2023                         RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
2024 }
2025
2026 int
2027 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
2028                 void *param, void *ret_param __rte_unused)
2029 {
2030         struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
2031         struct bond_dev_private *internals;
2032         struct rte_eth_link link;
2033         int rc = -1;
2034
2035         int i, valid_slave = 0;
2036         uint8_t active_pos;
2037         uint8_t lsc_flag = 0;
2038
2039         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2040                 return rc;
2041
2042         bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
2043         slave_eth_dev = &rte_eth_devices[port_id];
2044
2045         if (check_for_bonded_ethdev(bonded_eth_dev))
2046                 return rc;
2047
2048         internals = bonded_eth_dev->data->dev_private;
2049
2050         /* If the device isn't started don't handle interrupts */
2051         if (!bonded_eth_dev->data->dev_started)
2052                 return rc;
2053
2054         /* verify that port_id is a valid slave of bonded port */
2055         for (i = 0; i < internals->slave_count; i++) {
2056                 if (internals->slaves[i].port_id == port_id) {
2057                         valid_slave = 1;
2058                         break;
2059                 }
2060         }
2061
2062         if (!valid_slave)
2063                 return rc;
2064
2065         /* Search for port in active port list */
2066         active_pos = find_slave_by_id(internals->active_slaves,
2067                         internals->active_slave_count, port_id);
2068
2069         rte_eth_link_get_nowait(port_id, &link);
2070         if (link.link_status) {
2071                 if (active_pos < internals->active_slave_count)
2072                         return rc;
2073
2074                 /* if no active slave ports then set this port to be primary port */
2075                 if (internals->active_slave_count < 1) {
2076                         /* If first active slave, then change link status */
2077                         bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
2078                         internals->current_primary_port = port_id;
2079                         lsc_flag = 1;
2080
2081                         mac_address_slaves_update(bonded_eth_dev);
2082
2083                         /* Inherit eth dev link properties from first active slave */
2084                         link_properties_set(bonded_eth_dev,
2085                                         &(slave_eth_dev->data->dev_link));
2086                 } else {
2087                         if (link_properties_valid(
2088                                 &bonded_eth_dev->data->dev_link, &link) != 0) {
2089                                 slave_eth_dev->data->dev_flags &=
2090                                         (~RTE_ETH_DEV_BONDED_SLAVE);
2091                                 RTE_LOG(ERR, PMD,
2092                                         "port %u invalid speed/duplex\n",
2093                                         port_id);
2094                                 return rc;
2095                         }
2096                 }
2097
2098                 activate_slave(bonded_eth_dev, port_id);
2099
2100                 /* If user has defined the primary port then default to using it */
2101                 if (internals->user_defined_primary_port &&
2102                                 internals->primary_port == port_id)
2103                         bond_ethdev_primary_set(internals, port_id);
2104         } else {
2105                 if (active_pos == internals->active_slave_count)
2106                         return rc;
2107
2108                 /* Remove from active slave list */
2109                 deactivate_slave(bonded_eth_dev, port_id);
2110
2111                 /* No active slaves, change link status to down and reset other
2112                  * link properties */
2113                 if (internals->active_slave_count < 1) {
2114                         lsc_flag = 1;
2115                         bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2116
2117                         link_properties_reset(bonded_eth_dev);
2118                 }
2119
2120                 /* Update primary id, take first active slave from list or if none
2121                  * available set to -1 */
2122                 if (port_id == internals->current_primary_port) {
2123                         if (internals->active_slave_count > 0)
2124                                 bond_ethdev_primary_set(internals,
2125                                                 internals->active_slaves[0]);
2126                         else
2127                                 internals->current_primary_port = internals->primary_port;
2128                 }
2129         }
2130
2131         if (lsc_flag) {
2132                 /* Cancel any possible outstanding interrupts if delays are enabled */
2133                 if (internals->link_up_delay_ms > 0 ||
2134                         internals->link_down_delay_ms > 0)
2135                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2136                                         bonded_eth_dev);
2137
2138                 if (bonded_eth_dev->data->dev_link.link_status) {
2139                         if (internals->link_up_delay_ms > 0)
2140                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2141                                                 bond_ethdev_delayed_lsc_propagation,
2142                                                 (void *)bonded_eth_dev);
2143                         else
2144                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2145                                                 RTE_ETH_EVENT_INTR_LSC,
2146                                                 NULL, NULL);
2147
2148                 } else {
2149                         if (internals->link_down_delay_ms > 0)
2150                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2151                                                 bond_ethdev_delayed_lsc_propagation,
2152                                                 (void *)bonded_eth_dev);
2153                         else
2154                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2155                                                 RTE_ETH_EVENT_INTR_LSC,
2156                                                 NULL, NULL);
2157                 }
2158         }
2159         return 0;
2160 }
2161
2162 static int
2163 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2164                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2165 {
2166         unsigned i, j;
2167         int result = 0;
2168         int slave_reta_size;
2169         unsigned reta_count;
2170         struct bond_dev_private *internals = dev->data->dev_private;
2171
2172         if (reta_size != internals->reta_size)
2173                 return -EINVAL;
2174
2175          /* Copy RETA table */
2176         reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2177
2178         for (i = 0; i < reta_count; i++) {
2179                 internals->reta_conf[i].mask = reta_conf[i].mask;
2180                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2181                         if ((reta_conf[i].mask >> j) & 0x01)
2182                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2183         }
2184
2185         /* Fill rest of array */
2186         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2187                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2188                                 sizeof(internals->reta_conf[0]) * reta_count);
2189
2190         /* Propagate RETA over slaves */
2191         for (i = 0; i < internals->slave_count; i++) {
2192                 slave_reta_size = internals->slaves[i].reta_size;
2193                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2194                                 &internals->reta_conf[0], slave_reta_size);
2195                 if (result < 0)
2196                         return result;
2197         }
2198
2199         return 0;
2200 }
2201
2202 static int
2203 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2204                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2205 {
2206         int i, j;
2207         struct bond_dev_private *internals = dev->data->dev_private;
2208
2209         if (reta_size != internals->reta_size)
2210                 return -EINVAL;
2211
2212          /* Copy RETA table */
2213         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2214                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2215                         if ((reta_conf[i].mask >> j) & 0x01)
2216                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2217
2218         return 0;
2219 }
2220
2221 static int
2222 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2223                 struct rte_eth_rss_conf *rss_conf)
2224 {
2225         int i, result = 0;
2226         struct bond_dev_private *internals = dev->data->dev_private;
2227         struct rte_eth_rss_conf bond_rss_conf;
2228
2229         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2230
2231         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2232
2233         if (bond_rss_conf.rss_hf != 0)
2234                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2235
2236         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2237                         sizeof(internals->rss_key)) {
2238                 if (bond_rss_conf.rss_key_len == 0)
2239                         bond_rss_conf.rss_key_len = 40;
2240                 internals->rss_key_len = bond_rss_conf.rss_key_len;
2241                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2242                                 internals->rss_key_len);
2243         }
2244
2245         for (i = 0; i < internals->slave_count; i++) {
2246                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2247                                 &bond_rss_conf);
2248                 if (result < 0)
2249                         return result;
2250         }
2251
2252         return 0;
2253 }
2254
2255 static int
2256 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2257                 struct rte_eth_rss_conf *rss_conf)
2258 {
2259         struct bond_dev_private *internals = dev->data->dev_private;
2260
2261         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2262         rss_conf->rss_key_len = internals->rss_key_len;
2263         if (rss_conf->rss_key)
2264                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2265
2266         return 0;
2267 }
2268
2269 const struct eth_dev_ops default_dev_ops = {
2270         .dev_start            = bond_ethdev_start,
2271         .dev_stop             = bond_ethdev_stop,
2272         .dev_close            = bond_ethdev_close,
2273         .dev_configure        = bond_ethdev_configure,
2274         .dev_infos_get        = bond_ethdev_info,
2275         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
2276         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
2277         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
2278         .rx_queue_release     = bond_ethdev_rx_queue_release,
2279         .tx_queue_release     = bond_ethdev_tx_queue_release,
2280         .link_update          = bond_ethdev_link_update,
2281         .stats_get            = bond_ethdev_stats_get,
2282         .stats_reset          = bond_ethdev_stats_reset,
2283         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
2284         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
2285         .reta_update          = bond_ethdev_rss_reta_update,
2286         .reta_query           = bond_ethdev_rss_reta_query,
2287         .rss_hash_update      = bond_ethdev_rss_hash_update,
2288         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get
2289 };
2290
2291 static int
2292 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
2293 {
2294         const char *name = rte_vdev_device_name(dev);
2295         uint8_t socket_id = dev->device.numa_node;
2296         struct bond_dev_private *internals = NULL;
2297         struct rte_eth_dev *eth_dev = NULL;
2298         uint32_t vlan_filter_bmp_size;
2299
2300         /* now do all data allocation - for eth_dev structure, dummy pci driver
2301          * and internal (private) data
2302          */
2303
2304         /* reserve an ethdev entry */
2305         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
2306         if (eth_dev == NULL) {
2307                 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
2308                 goto err;
2309         }
2310
2311         internals = eth_dev->data->dev_private;
2312         eth_dev->data->nb_rx_queues = (uint16_t)1;
2313         eth_dev->data->nb_tx_queues = (uint16_t)1;
2314
2315         eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
2316                         socket_id);
2317         if (eth_dev->data->mac_addrs == NULL) {
2318                 RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
2319                 goto err;
2320         }
2321
2322         eth_dev->dev_ops = &default_dev_ops;
2323         eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
2324                 RTE_ETH_DEV_DETACHABLE;
2325
2326         rte_spinlock_init(&internals->lock);
2327
2328         internals->port_id = eth_dev->data->port_id;
2329         internals->mode = BONDING_MODE_INVALID;
2330         internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
2331         internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
2332         internals->xmit_hash = xmit_l2_hash;
2333         internals->user_defined_mac = 0;
2334         internals->link_props_set = 0;
2335
2336         internals->link_status_polling_enabled = 0;
2337
2338         internals->link_status_polling_interval_ms =
2339                 DEFAULT_POLLING_INTERVAL_10_MS;
2340         internals->link_down_delay_ms = 0;
2341         internals->link_up_delay_ms = 0;
2342
2343         internals->slave_count = 0;
2344         internals->active_slave_count = 0;
2345         internals->rx_offload_capa = 0;
2346         internals->tx_offload_capa = 0;
2347         internals->candidate_max_rx_pktlen = 0;
2348         internals->max_rx_pktlen = 0;
2349
2350         /* Initially allow to choose any offload type */
2351         internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
2352
2353         memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
2354         memset(internals->slaves, 0, sizeof(internals->slaves));
2355
2356         /* Set mode 4 default configuration */
2357         bond_mode_8023ad_setup(eth_dev, NULL);
2358         if (bond_ethdev_mode_set(eth_dev, mode)) {
2359                 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
2360                                  eth_dev->data->port_id, mode);
2361                 goto err;
2362         }
2363
2364         vlan_filter_bmp_size =
2365                 rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
2366         internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
2367                                                    RTE_CACHE_LINE_SIZE);
2368         if (internals->vlan_filter_bmpmem == NULL) {
2369                 RTE_BOND_LOG(ERR,
2370                              "Failed to allocate vlan bitmap for bonded device %u\n",
2371                              eth_dev->data->port_id);
2372                 goto err;
2373         }
2374
2375         internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
2376                         internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
2377         if (internals->vlan_filter_bmp == NULL) {
2378                 RTE_BOND_LOG(ERR,
2379                              "Failed to init vlan bitmap for bonded device %u\n",
2380                              eth_dev->data->port_id);
2381                 rte_free(internals->vlan_filter_bmpmem);
2382                 goto err;
2383         }
2384
2385         return eth_dev->data->port_id;
2386
2387 err:
2388         rte_free(internals);
2389         if (eth_dev != NULL) {
2390                 rte_free(eth_dev->data->mac_addrs);
2391                 rte_eth_dev_release_port(eth_dev);
2392         }
2393         return -1;
2394 }
2395
2396 static int
2397 bond_probe(struct rte_vdev_device *dev)
2398 {
2399         const char *name;
2400         struct bond_dev_private *internals;
2401         struct rte_kvargs *kvlist;
2402         uint8_t bonding_mode, socket_id;
2403         int  arg_count, port_id;
2404
2405         if (!dev)
2406                 return -EINVAL;
2407
2408         name = rte_vdev_device_name(dev);
2409         RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2410
2411         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
2412                 pmd_bond_init_valid_arguments);
2413         if (kvlist == NULL)
2414                 return -1;
2415
2416         /* Parse link bonding mode */
2417         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2418                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2419                                 &bond_ethdev_parse_slave_mode_kvarg,
2420                                 &bonding_mode) != 0) {
2421                         RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2422                                         name);
2423                         goto parse_error;
2424                 }
2425         } else {
2426                 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2427                                 "device %s\n", name);
2428                 goto parse_error;
2429         }
2430
2431         /* Parse socket id to create bonding device on */
2432         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2433         if (arg_count == 1) {
2434                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2435                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2436                                 != 0) {
2437                         RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2438                                         "bonded device %s\n", name);
2439                         goto parse_error;
2440                 }
2441         } else if (arg_count > 1) {
2442                 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2443                                 "bonded device %s\n", name);
2444                 goto parse_error;
2445         } else {
2446                 socket_id = rte_socket_id();
2447         }
2448
2449         dev->device.numa_node = socket_id;
2450
2451         /* Create link bonding eth device */
2452         port_id = bond_alloc(dev, bonding_mode);
2453         if (port_id < 0) {
2454                 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2455                                 "socket %u.\n", name, bonding_mode, socket_id);
2456                 goto parse_error;
2457         }
2458         internals = rte_eth_devices[port_id].data->dev_private;
2459         internals->kvlist = kvlist;
2460
2461         RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2462                         "socket %u.\n", name, port_id, bonding_mode, socket_id);
2463         return 0;
2464
2465 parse_error:
2466         rte_kvargs_free(kvlist);
2467
2468         return -1;
2469 }
2470
2471 static int
2472 bond_remove(struct rte_vdev_device *dev)
2473 {
2474         struct rte_eth_dev *eth_dev;
2475         struct bond_dev_private *internals;
2476         const char *name;
2477
2478         if (!dev)
2479                 return -EINVAL;
2480
2481         name = rte_vdev_device_name(dev);
2482         RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2483
2484         /* now free all data allocation - for eth_dev structure,
2485          * dummy pci driver and internal (private) data
2486          */
2487
2488         /* find an ethdev entry */
2489         eth_dev = rte_eth_dev_allocated(name);
2490         if (eth_dev == NULL)
2491                 return -ENODEV;
2492
2493         RTE_ASSERT(eth_dev->device == &dev->device);
2494
2495         internals = eth_dev->data->dev_private;
2496         if (internals->slave_count != 0)
2497                 return -EBUSY;
2498
2499         if (eth_dev->data->dev_started == 1) {
2500                 bond_ethdev_stop(eth_dev);
2501                 bond_ethdev_close(eth_dev);
2502         }
2503
2504         eth_dev->dev_ops = NULL;
2505         eth_dev->rx_pkt_burst = NULL;
2506         eth_dev->tx_pkt_burst = NULL;
2507
2508         internals = eth_dev->data->dev_private;
2509         rte_bitmap_free(internals->vlan_filter_bmp);
2510         rte_free(internals->vlan_filter_bmpmem);
2511         rte_free(eth_dev->data->dev_private);
2512         rte_free(eth_dev->data->mac_addrs);
2513
2514         rte_eth_dev_release_port(eth_dev);
2515
2516         return 0;
2517 }
2518
2519 /* this part will resolve the slave portids after all the other pdev and vdev
2520  * have been allocated */
2521 static int
2522 bond_ethdev_configure(struct rte_eth_dev *dev)
2523 {
2524         const char *name = dev->device->name;
2525         struct bond_dev_private *internals = dev->data->dev_private;
2526         struct rte_kvargs *kvlist = internals->kvlist;
2527         int arg_count;
2528         uint8_t port_id = dev - rte_eth_devices;
2529
2530         static const uint8_t default_rss_key[40] = {
2531                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2532                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2533                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2534                 0xBE, 0xAC, 0x01, 0xFA
2535         };
2536
2537         unsigned i, j;
2538
2539         /* If RSS is enabled, fill table and key with default values */
2540         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2541                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2542                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2543                 memcpy(internals->rss_key, default_rss_key, 40);
2544
2545                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2546                         internals->reta_conf[i].mask = ~0LL;
2547                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2548                                 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2549                 }
2550         }
2551
2552         /* set the max_rx_pktlen */
2553         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
2554
2555         /*
2556          * if no kvlist, it means that this bonded device has been created
2557          * through the bonding api.
2558          */
2559         if (!kvlist)
2560                 return 0;
2561
2562         /* Parse MAC address for bonded device */
2563         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2564         if (arg_count == 1) {
2565                 struct ether_addr bond_mac;
2566
2567                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2568                                 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2569                         RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2570                                         name);
2571                         return -1;
2572                 }
2573
2574                 /* Set MAC address */
2575                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2576                         RTE_LOG(ERR, EAL,
2577                                         "Failed to set mac address on bonded device %s\n",
2578                                         name);
2579                         return -1;
2580                 }
2581         } else if (arg_count > 1) {
2582                 RTE_LOG(ERR, EAL,
2583                                 "MAC address can be specified only once for bonded device %s\n",
2584                                 name);
2585                 return -1;
2586         }
2587
2588         /* Parse/set balance mode transmit policy */
2589         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2590         if (arg_count == 1) {
2591                 uint8_t xmit_policy;
2592
2593                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2594                                 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2595                                                 0) {
2596                         RTE_LOG(INFO, EAL,
2597                                         "Invalid xmit policy specified for bonded device %s\n",
2598                                         name);
2599                         return -1;
2600                 }
2601
2602                 /* Set balance mode transmit policy*/
2603                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2604                         RTE_LOG(ERR, EAL,
2605                                         "Failed to set balance xmit policy on bonded device %s\n",
2606                                         name);
2607                         return -1;
2608                 }
2609         } else if (arg_count > 1) {
2610                 RTE_LOG(ERR, EAL,
2611                                 "Transmit policy can be specified only once for bonded device"
2612                                 " %s\n", name);
2613                 return -1;
2614         }
2615
2616         /* Parse/add slave ports to bonded device */
2617         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2618                 struct bond_ethdev_slave_ports slave_ports;
2619                 unsigned i;
2620
2621                 memset(&slave_ports, 0, sizeof(slave_ports));
2622
2623                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2624                                 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2625                         RTE_LOG(ERR, EAL,
2626                                         "Failed to parse slave ports for bonded device %s\n",
2627                                         name);
2628                         return -1;
2629                 }
2630
2631                 for (i = 0; i < slave_ports.slave_count; i++) {
2632                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2633                                 RTE_LOG(ERR, EAL,
2634                                                 "Failed to add port %d as slave to bonded device %s\n",
2635                                                 slave_ports.slaves[i], name);
2636                         }
2637                 }
2638
2639         } else {
2640                 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2641                 return -1;
2642         }
2643
2644         /* Parse/set primary slave port id*/
2645         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2646         if (arg_count == 1) {
2647                 uint8_t primary_slave_port_id;
2648
2649                 if (rte_kvargs_process(kvlist,
2650                                 PMD_BOND_PRIMARY_SLAVE_KVARG,
2651                                 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2652                                 &primary_slave_port_id) < 0) {
2653                         RTE_LOG(INFO, EAL,
2654                                         "Invalid primary slave port id specified for bonded device"
2655                                         " %s\n", name);
2656                         return -1;
2657                 }
2658
2659                 /* Set balance mode transmit policy*/
2660                 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2661                                 != 0) {
2662                         RTE_LOG(ERR, EAL,
2663                                         "Failed to set primary slave port %d on bonded device %s\n",
2664                                         primary_slave_port_id, name);
2665                         return -1;
2666                 }
2667         } else if (arg_count > 1) {
2668                 RTE_LOG(INFO, EAL,
2669                                 "Primary slave can be specified only once for bonded device"
2670                                 " %s\n", name);
2671                 return -1;
2672         }
2673
2674         /* Parse link status monitor polling interval */
2675         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2676         if (arg_count == 1) {
2677                 uint32_t lsc_poll_interval_ms;
2678
2679                 if (rte_kvargs_process(kvlist,
2680                                 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2681                                 &bond_ethdev_parse_time_ms_kvarg,
2682                                 &lsc_poll_interval_ms) < 0) {
2683                         RTE_LOG(INFO, EAL,
2684                                         "Invalid lsc polling interval value specified for bonded"
2685                                         " device %s\n", name);
2686                         return -1;
2687                 }
2688
2689                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2690                                 != 0) {
2691                         RTE_LOG(ERR, EAL,
2692                                         "Failed to set lsc monitor polling interval (%u ms) on"
2693                                         " bonded device %s\n", lsc_poll_interval_ms, name);
2694                         return -1;
2695                 }
2696         } else if (arg_count > 1) {
2697                 RTE_LOG(INFO, EAL,
2698                                 "LSC polling interval can be specified only once for bonded"
2699                                 " device %s\n", name);
2700                 return -1;
2701         }
2702
2703         /* Parse link up interrupt propagation delay */
2704         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2705         if (arg_count == 1) {
2706                 uint32_t link_up_delay_ms;
2707
2708                 if (rte_kvargs_process(kvlist,
2709                                 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2710                                 &bond_ethdev_parse_time_ms_kvarg,
2711                                 &link_up_delay_ms) < 0) {
2712                         RTE_LOG(INFO, EAL,
2713                                         "Invalid link up propagation delay value specified for"
2714                                         " bonded device %s\n", name);
2715                         return -1;
2716                 }
2717
2718                 /* Set balance mode transmit policy*/
2719                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2720                                 != 0) {
2721                         RTE_LOG(ERR, EAL,
2722                                         "Failed to set link up propagation delay (%u ms) on bonded"
2723                                         " device %s\n", link_up_delay_ms, name);
2724                         return -1;
2725                 }
2726         } else if (arg_count > 1) {
2727                 RTE_LOG(INFO, EAL,
2728                                 "Link up propagation delay can be specified only once for"
2729                                 " bonded device %s\n", name);
2730                 return -1;
2731         }
2732
2733         /* Parse link down interrupt propagation delay */
2734         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2735         if (arg_count == 1) {
2736                 uint32_t link_down_delay_ms;
2737
2738                 if (rte_kvargs_process(kvlist,
2739                                 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2740                                 &bond_ethdev_parse_time_ms_kvarg,
2741                                 &link_down_delay_ms) < 0) {
2742                         RTE_LOG(INFO, EAL,
2743                                         "Invalid link down propagation delay value specified for"
2744                                         " bonded device %s\n", name);
2745                         return -1;
2746                 }
2747
2748                 /* Set balance mode transmit policy*/
2749                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2750                                 != 0) {
2751                         RTE_LOG(ERR, EAL,
2752                                         "Failed to set link down propagation delay (%u ms) on"
2753                                         " bonded device %s\n", link_down_delay_ms, name);
2754                         return -1;
2755                 }
2756         } else if (arg_count > 1) {
2757                 RTE_LOG(INFO, EAL,
2758                                 "Link down propagation delay can be specified only once for"
2759                                 " bonded device %s\n", name);
2760                 return -1;
2761         }
2762
2763         return 0;
2764 }
2765
2766 struct rte_vdev_driver pmd_bond_drv = {
2767         .probe = bond_probe,
2768         .remove = bond_remove,
2769 };
2770
2771 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
2772 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
2773
2774 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
2775         "slave=<ifc> "
2776         "primary=<ifc> "
2777         "mode=[0-6] "
2778         "xmit_policy=[l2 | l23 | l34] "
2779         "socket_id=<int> "
2780         "mac=<mac addr> "
2781         "lsc_poll_period_ms=<int> "
2782         "up_delay=<int> "
2783         "down_delay=<int>");