drivers/net: use ethdev allocation helper for vdev
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <stdlib.h>
34 #include <netinet/in.h>
35
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_tcp.h>
41 #include <rte_udp.h>
42 #include <rte_ip.h>
43 #include <rte_ip_frag.h>
44 #include <rte_devargs.h>
45 #include <rte_kvargs.h>
46 #include <rte_vdev.h>
47 #include <rte_alarm.h>
48 #include <rte_cycles.h>
49
50 #include "rte_eth_bond.h"
51 #include "rte_eth_bond_private.h"
52 #include "rte_eth_bond_8023ad_private.h"
53
54 #define REORDER_PERIOD_MS 10
55 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
56
57 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
58
59 /* Table for statistics in mode 5 TLB */
60 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
61
62 static inline size_t
63 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
64 {
65         size_t vlan_offset = 0;
66
67         if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
68                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
69
70                 vlan_offset = sizeof(struct vlan_hdr);
71                 *proto = vlan_hdr->eth_proto;
72
73                 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
74                         vlan_hdr = vlan_hdr + 1;
75                         *proto = vlan_hdr->eth_proto;
76                         vlan_offset += sizeof(struct vlan_hdr);
77                 }
78         }
79         return vlan_offset;
80 }
81
82 static uint16_t
83 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
84 {
85         struct bond_dev_private *internals;
86
87         uint16_t num_rx_slave = 0;
88         uint16_t num_rx_total = 0;
89
90         int i;
91
92         /* Cast to structure, containing bonded device's port id and queue id */
93         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
94
95         internals = bd_rx_q->dev_private;
96
97
98         for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
99                 /* Offset of pointer to *bufs increases as packets are received
100                  * from other slaves */
101                 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
102                                 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
103                 if (num_rx_slave) {
104                         num_rx_total += num_rx_slave;
105                         nb_pkts -= num_rx_slave;
106                 }
107         }
108
109         return num_rx_total;
110 }
111
112 static uint16_t
113 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
114                 uint16_t nb_pkts)
115 {
116         struct bond_dev_private *internals;
117
118         /* Cast to structure, containing bonded device's port id and queue id */
119         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
120
121         internals = bd_rx_q->dev_private;
122
123         return rte_eth_rx_burst(internals->current_primary_port,
124                         bd_rx_q->queue_id, bufs, nb_pkts);
125 }
126
127 static inline uint8_t
128 is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
129 {
130         const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
131
132         return !vlan_tci && (ethertype == ether_type_slow_be &&
133                 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
134 }
135
136 static uint16_t
137 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
138                 uint16_t nb_pkts)
139 {
140         /* Cast to structure, containing bonded device's port id and queue id */
141         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
142         struct bond_dev_private *internals = bd_rx_q->dev_private;
143         struct ether_addr bond_mac;
144
145         struct ether_hdr *hdr;
146
147         const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
148         uint16_t num_rx_total = 0;      /* Total number of received packets */
149         uint8_t slaves[RTE_MAX_ETHPORTS];
150         uint8_t slave_count, idx;
151
152         uint8_t collecting;  /* current slave collecting status */
153         const uint8_t promisc = internals->promiscuous_en;
154         uint8_t i, j, k;
155         uint8_t subtype;
156
157         rte_eth_macaddr_get(internals->port_id, &bond_mac);
158         /* Copy slave list to protect against slave up/down changes during tx
159          * bursting */
160         slave_count = internals->active_slave_count;
161         memcpy(slaves, internals->active_slaves,
162                         sizeof(internals->active_slaves[0]) * slave_count);
163
164         idx = internals->active_slave;
165         if (idx >= slave_count) {
166                 internals->active_slave = 0;
167                 idx = 0;
168         }
169         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
170                 j = num_rx_total;
171                 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
172                                          COLLECTING);
173
174                 /* Read packets from this slave */
175                 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
176                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
177
178                 for (k = j; k < 2 && k < num_rx_total; k++)
179                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
180
181                 /* Handle slow protocol packets. */
182                 while (j < num_rx_total) {
183                         if (j + 3 < num_rx_total)
184                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
185
186                         hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
187                         subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
188
189                         /* Remove packet from array if it is slow packet or slave is not
190                          * in collecting state or bondign interface is not in promiscus
191                          * mode and packet address does not match. */
192                         if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
193                                 !collecting || (!promisc &&
194                                         !is_multicast_ether_addr(&hdr->d_addr) &&
195                                         !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
196
197                                 if (hdr->ether_type == ether_type_slow_be) {
198                                         bond_mode_8023ad_handle_slow_pkt(
199                                             internals, slaves[idx], bufs[j]);
200                                 } else
201                                         rte_pktmbuf_free(bufs[j]);
202
203                                 /* Packet is managed by mode 4 or dropped, shift the array */
204                                 num_rx_total--;
205                                 if (j < num_rx_total) {
206                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
207                                                 (num_rx_total - j));
208                                 }
209                         } else
210                                 j++;
211                 }
212                 if (unlikely(++idx == slave_count))
213                         idx = 0;
214         }
215
216         internals->active_slave = idx;
217         return num_rx_total;
218 }
219
220 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
221 uint32_t burstnumberRX;
222 uint32_t burstnumberTX;
223
224 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
225
226 static void
227 arp_op_name(uint16_t arp_op, char *buf)
228 {
229         switch (arp_op) {
230         case ARP_OP_REQUEST:
231                 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
232                 return;
233         case ARP_OP_REPLY:
234                 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
235                 return;
236         case ARP_OP_REVREQUEST:
237                 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
238                                 "Reverse ARP Request");
239                 return;
240         case ARP_OP_REVREPLY:
241                 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
242                                 "Reverse ARP Reply");
243                 return;
244         case ARP_OP_INVREQUEST:
245                 snprintf(buf, sizeof("Peer Identify Request"), "%s",
246                                 "Peer Identify Request");
247                 return;
248         case ARP_OP_INVREPLY:
249                 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
250                                 "Peer Identify Reply");
251                 return;
252         default:
253                 break;
254         }
255         snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
256         return;
257 }
258 #endif
259 #define MaxIPv4String   16
260 static void
261 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
262 {
263         uint32_t ipv4_addr;
264
265         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
266         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
267                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
268                 ipv4_addr & 0xFF);
269 }
270
271 #define MAX_CLIENTS_NUMBER      128
272 uint8_t active_clients;
273 struct client_stats_t {
274         uint8_t port;
275         uint32_t ipv4_addr;
276         uint32_t ipv4_rx_packets;
277         uint32_t ipv4_tx_packets;
278 };
279 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
280
281 static void
282 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
283 {
284         int i = 0;
285
286         for (; i < MAX_CLIENTS_NUMBER; i++)     {
287                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
288                         /* Just update RX packets number for this client */
289                         if (TXorRXindicator == &burstnumberRX)
290                                 client_stats[i].ipv4_rx_packets++;
291                         else
292                                 client_stats[i].ipv4_tx_packets++;
293                         return;
294                 }
295         }
296         /* We have a new client. Insert him to the table, and increment stats */
297         if (TXorRXindicator == &burstnumberRX)
298                 client_stats[active_clients].ipv4_rx_packets++;
299         else
300                 client_stats[active_clients].ipv4_tx_packets++;
301         client_stats[active_clients].ipv4_addr = addr;
302         client_stats[active_clients].port = port;
303         active_clients++;
304
305 }
306
307 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
308 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber)     \
309                 RTE_LOG(DEBUG, PMD, \
310                 "%s " \
311                 "port:%d " \
312                 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
313                 "SrcIP:%s " \
314                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
315                 "DstIP:%s " \
316                 "%s " \
317                 "%d\n", \
318                 info, \
319                 port, \
320                 eth_h->s_addr.addr_bytes[0], \
321                 eth_h->s_addr.addr_bytes[1], \
322                 eth_h->s_addr.addr_bytes[2], \
323                 eth_h->s_addr.addr_bytes[3], \
324                 eth_h->s_addr.addr_bytes[4], \
325                 eth_h->s_addr.addr_bytes[5], \
326                 src_ip, \
327                 eth_h->d_addr.addr_bytes[0], \
328                 eth_h->d_addr.addr_bytes[1], \
329                 eth_h->d_addr.addr_bytes[2], \
330                 eth_h->d_addr.addr_bytes[3], \
331                 eth_h->d_addr.addr_bytes[4], \
332                 eth_h->d_addr.addr_bytes[5], \
333                 dst_ip, \
334                 arp_op, \
335                 ++burstnumber)
336 #endif
337
338 static void
339 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
340                 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
341 {
342         struct ipv4_hdr *ipv4_h;
343 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
344         struct arp_hdr *arp_h;
345         char dst_ip[16];
346         char ArpOp[24];
347         char buf[16];
348 #endif
349         char src_ip[16];
350
351         uint16_t ether_type = eth_h->ether_type;
352         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
353
354 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
355         snprintf(buf, 16, "%s", info);
356 #endif
357
358         if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
359                 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
360                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
361 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
362                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
363                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
364 #endif
365                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
366         }
367 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
368         else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
369                 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
370                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
371                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
372                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
373                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
374         }
375 #endif
376 }
377 #endif
378
379 static uint16_t
380 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
381 {
382         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
383         struct bond_dev_private *internals = bd_tx_q->dev_private;
384         struct ether_hdr *eth_h;
385         uint16_t ether_type, offset;
386         uint16_t nb_recv_pkts;
387         int i;
388
389         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
390
391         for (i = 0; i < nb_recv_pkts; i++) {
392                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
393                 ether_type = eth_h->ether_type;
394                 offset = get_vlan_offset(eth_h, &ether_type);
395
396                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
397 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
398                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
399 #endif
400                         bond_mode_alb_arp_recv(eth_h, offset, internals);
401                 }
402 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
403                 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
404                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
405 #endif
406         }
407
408         return nb_recv_pkts;
409 }
410
411 static uint16_t
412 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
413                 uint16_t nb_pkts)
414 {
415         struct bond_dev_private *internals;
416         struct bond_tx_queue *bd_tx_q;
417
418         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
419         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
420
421         uint8_t num_of_slaves;
422         uint8_t slaves[RTE_MAX_ETHPORTS];
423
424         uint16_t num_tx_total = 0, num_tx_slave;
425
426         static int slave_idx = 0;
427         int i, cslave_idx = 0, tx_fail_total = 0;
428
429         bd_tx_q = (struct bond_tx_queue *)queue;
430         internals = bd_tx_q->dev_private;
431
432         /* Copy slave list to protect against slave up/down changes during tx
433          * bursting */
434         num_of_slaves = internals->active_slave_count;
435         memcpy(slaves, internals->active_slaves,
436                         sizeof(internals->active_slaves[0]) * num_of_slaves);
437
438         if (num_of_slaves < 1)
439                 return num_tx_total;
440
441         /* Populate slaves mbuf with which packets are to be sent on it  */
442         for (i = 0; i < nb_pkts; i++) {
443                 cslave_idx = (slave_idx + i) % num_of_slaves;
444                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
445         }
446
447         /* increment current slave index so the next call to tx burst starts on the
448          * next slave */
449         slave_idx = ++cslave_idx;
450
451         /* Send packet burst on each slave device */
452         for (i = 0; i < num_of_slaves; i++) {
453                 if (slave_nb_pkts[i] > 0) {
454                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
455                                         slave_bufs[i], slave_nb_pkts[i]);
456
457                         /* if tx burst fails move packets to end of bufs */
458                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
459                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
460
461                                 tx_fail_total += tx_fail_slave;
462
463                                 memcpy(&bufs[nb_pkts - tx_fail_total],
464                                                 &slave_bufs[i][num_tx_slave],
465                                                 tx_fail_slave * sizeof(bufs[0]));
466                         }
467                         num_tx_total += num_tx_slave;
468                 }
469         }
470
471         return num_tx_total;
472 }
473
474 static uint16_t
475 bond_ethdev_tx_burst_active_backup(void *queue,
476                 struct rte_mbuf **bufs, uint16_t nb_pkts)
477 {
478         struct bond_dev_private *internals;
479         struct bond_tx_queue *bd_tx_q;
480
481         bd_tx_q = (struct bond_tx_queue *)queue;
482         internals = bd_tx_q->dev_private;
483
484         if (internals->active_slave_count < 1)
485                 return 0;
486
487         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
488                         bufs, nb_pkts);
489 }
490
491 static inline uint16_t
492 ether_hash(struct ether_hdr *eth_hdr)
493 {
494         unaligned_uint16_t *word_src_addr =
495                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
496         unaligned_uint16_t *word_dst_addr =
497                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
498
499         return (word_src_addr[0] ^ word_dst_addr[0]) ^
500                         (word_src_addr[1] ^ word_dst_addr[1]) ^
501                         (word_src_addr[2] ^ word_dst_addr[2]);
502 }
503
504 static inline uint32_t
505 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
506 {
507         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
508 }
509
510 static inline uint32_t
511 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
512 {
513         unaligned_uint32_t *word_src_addr =
514                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
515         unaligned_uint32_t *word_dst_addr =
516                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
517
518         return (word_src_addr[0] ^ word_dst_addr[0]) ^
519                         (word_src_addr[1] ^ word_dst_addr[1]) ^
520                         (word_src_addr[2] ^ word_dst_addr[2]) ^
521                         (word_src_addr[3] ^ word_dst_addr[3]);
522 }
523
524 uint16_t
525 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
526 {
527         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
528
529         uint32_t hash = ether_hash(eth_hdr);
530
531         return (hash ^= hash >> 8) % slave_count;
532 }
533
534 uint16_t
535 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
536 {
537         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
538         uint16_t proto = eth_hdr->ether_type;
539         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
540         uint32_t hash, l3hash = 0;
541
542         hash = ether_hash(eth_hdr);
543
544         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
545                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
546                                 ((char *)(eth_hdr + 1) + vlan_offset);
547                 l3hash = ipv4_hash(ipv4_hdr);
548
549         } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
550                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
551                                 ((char *)(eth_hdr + 1) + vlan_offset);
552                 l3hash = ipv6_hash(ipv6_hdr);
553         }
554
555         hash = hash ^ l3hash;
556         hash ^= hash >> 16;
557         hash ^= hash >> 8;
558
559         return hash % slave_count;
560 }
561
562 uint16_t
563 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
564 {
565         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
566         uint16_t proto = eth_hdr->ether_type;
567         size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
568
569         struct udp_hdr *udp_hdr = NULL;
570         struct tcp_hdr *tcp_hdr = NULL;
571         uint32_t hash, l3hash = 0, l4hash = 0;
572
573         if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
574                 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
575                                 ((char *)(eth_hdr + 1) + vlan_offset);
576                 size_t ip_hdr_offset;
577
578                 l3hash = ipv4_hash(ipv4_hdr);
579
580                 /* there is no L4 header in fragmented packet */
581                 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
582                         ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
583                                         IPV4_IHL_MULTIPLIER;
584
585                         if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
586                                 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
587                                                 ip_hdr_offset);
588                                 l4hash = HASH_L4_PORTS(tcp_hdr);
589                         } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
590                                 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
591                                                 ip_hdr_offset);
592                                 l4hash = HASH_L4_PORTS(udp_hdr);
593                         }
594                 }
595         } else if  (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
596                 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
597                                 ((char *)(eth_hdr + 1) + vlan_offset);
598                 l3hash = ipv6_hash(ipv6_hdr);
599
600                 if (ipv6_hdr->proto == IPPROTO_TCP) {
601                         tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
602                         l4hash = HASH_L4_PORTS(tcp_hdr);
603                 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
604                         udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
605                         l4hash = HASH_L4_PORTS(udp_hdr);
606                 }
607         }
608
609         hash = l3hash ^ l4hash;
610         hash ^= hash >> 16;
611         hash ^= hash >> 8;
612
613         return hash % slave_count;
614 }
615
616 struct bwg_slave {
617         uint64_t bwg_left_int;
618         uint64_t bwg_left_remainder;
619         uint8_t slave;
620 };
621
622 void
623 bond_tlb_activate_slave(struct bond_dev_private *internals) {
624         int i;
625
626         for (i = 0; i < internals->active_slave_count; i++) {
627                 tlb_last_obytets[internals->active_slaves[i]] = 0;
628         }
629 }
630
631 static int
632 bandwidth_cmp(const void *a, const void *b)
633 {
634         const struct bwg_slave *bwg_a = a;
635         const struct bwg_slave *bwg_b = b;
636         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
637         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
638                         (int64_t)bwg_a->bwg_left_remainder;
639         if (diff > 0)
640                 return 1;
641         else if (diff < 0)
642                 return -1;
643         else if (diff2 > 0)
644                 return 1;
645         else if (diff2 < 0)
646                 return -1;
647         else
648                 return 0;
649 }
650
651 static void
652 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
653                 struct bwg_slave *bwg_slave)
654 {
655         struct rte_eth_link link_status;
656
657         rte_eth_link_get(port_id, &link_status);
658         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
659         if (link_bwg == 0)
660                 return;
661         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
662         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
663         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
664 }
665
666 static void
667 bond_ethdev_update_tlb_slave_cb(void *arg)
668 {
669         struct bond_dev_private *internals = arg;
670         struct rte_eth_stats slave_stats;
671         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
672         uint8_t slave_count;
673         uint64_t tx_bytes;
674
675         uint8_t update_stats = 0;
676         uint8_t i, slave_id;
677
678         internals->slave_update_idx++;
679
680
681         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
682                 update_stats = 1;
683
684         for (i = 0; i < internals->active_slave_count; i++) {
685                 slave_id = internals->active_slaves[i];
686                 rte_eth_stats_get(slave_id, &slave_stats);
687                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
688                 bandwidth_left(slave_id, tx_bytes,
689                                 internals->slave_update_idx, &bwg_array[i]);
690                 bwg_array[i].slave = slave_id;
691
692                 if (update_stats) {
693                         tlb_last_obytets[slave_id] = slave_stats.obytes;
694                 }
695         }
696
697         if (update_stats == 1)
698                 internals->slave_update_idx = 0;
699
700         slave_count = i;
701         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
702         for (i = 0; i < slave_count; i++)
703                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
704
705         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
706                         (struct bond_dev_private *)internals);
707 }
708
709 static uint16_t
710 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
711 {
712         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
713         struct bond_dev_private *internals = bd_tx_q->dev_private;
714
715         struct rte_eth_dev *primary_port =
716                         &rte_eth_devices[internals->primary_port];
717         uint16_t num_tx_total = 0;
718         uint8_t i, j;
719
720         uint8_t num_of_slaves = internals->active_slave_count;
721         uint8_t slaves[RTE_MAX_ETHPORTS];
722
723         struct ether_hdr *ether_hdr;
724         struct ether_addr primary_slave_addr;
725         struct ether_addr active_slave_addr;
726
727         if (num_of_slaves < 1)
728                 return num_tx_total;
729
730         memcpy(slaves, internals->tlb_slaves_order,
731                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
732
733
734         ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
735
736         if (nb_pkts > 3) {
737                 for (i = 0; i < 3; i++)
738                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
739         }
740
741         for (i = 0; i < num_of_slaves; i++) {
742                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
743                 for (j = num_tx_total; j < nb_pkts; j++) {
744                         if (j + 3 < nb_pkts)
745                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
746
747                         ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
748                         if (is_same_ether_addr(&ether_hdr->s_addr, &primary_slave_addr))
749                                 ether_addr_copy(&active_slave_addr, &ether_hdr->s_addr);
750 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
751                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
752 #endif
753                 }
754
755                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
756                                 bufs + num_tx_total, nb_pkts - num_tx_total);
757
758                 if (num_tx_total == nb_pkts)
759                         break;
760         }
761
762         return num_tx_total;
763 }
764
765 void
766 bond_tlb_disable(struct bond_dev_private *internals)
767 {
768         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
769 }
770
771 void
772 bond_tlb_enable(struct bond_dev_private *internals)
773 {
774         bond_ethdev_update_tlb_slave_cb(internals);
775 }
776
777 static uint16_t
778 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
779 {
780         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
781         struct bond_dev_private *internals = bd_tx_q->dev_private;
782
783         struct ether_hdr *eth_h;
784         uint16_t ether_type, offset;
785
786         struct client_data *client_info;
787
788         /*
789          * We create transmit buffers for every slave and one additional to send
790          * through tlb. In worst case every packet will be send on one port.
791          */
792         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
793         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
794
795         /*
796          * We create separate transmit buffers for update packets as they wont be
797          * counted in num_tx_total.
798          */
799         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
800         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
801
802         struct rte_mbuf *upd_pkt;
803         size_t pkt_size;
804
805         uint16_t num_send, num_not_send = 0;
806         uint16_t num_tx_total = 0;
807         uint8_t slave_idx;
808
809         int i, j;
810
811         /* Search tx buffer for ARP packets and forward them to alb */
812         for (i = 0; i < nb_pkts; i++) {
813                 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
814                 ether_type = eth_h->ether_type;
815                 offset = get_vlan_offset(eth_h, &ether_type);
816
817                 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
818                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
819
820                         /* Change src mac in eth header */
821                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
822
823                         /* Add packet to slave tx buffer */
824                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
825                         slave_bufs_pkts[slave_idx]++;
826                 } else {
827                         /* If packet is not ARP, send it with TLB policy */
828                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
829                                         bufs[i];
830                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
831                 }
832         }
833
834         /* Update connected client ARP tables */
835         if (internals->mode6.ntt) {
836                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
837                         client_info = &internals->mode6.client_table[i];
838
839                         if (client_info->in_use) {
840                                 /* Allocate new packet to send ARP update on current slave */
841                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
842                                 if (upd_pkt == NULL) {
843                                         RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
844                                         continue;
845                                 }
846                                 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
847                                                 + client_info->vlan_count * sizeof(struct vlan_hdr);
848                                 upd_pkt->data_len = pkt_size;
849                                 upd_pkt->pkt_len = pkt_size;
850
851                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
852                                                 internals);
853
854                                 /* Add packet to update tx buffer */
855                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
856                                 update_bufs_pkts[slave_idx]++;
857                         }
858                 }
859                 internals->mode6.ntt = 0;
860         }
861
862         /* Send ARP packets on proper slaves */
863         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
864                 if (slave_bufs_pkts[i] > 0) {
865                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
866                                         slave_bufs[i], slave_bufs_pkts[i]);
867                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
868                                 bufs[nb_pkts - 1 - num_not_send - j] =
869                                                 slave_bufs[i][nb_pkts - 1 - j];
870                         }
871
872                         num_tx_total += num_send;
873                         num_not_send += slave_bufs_pkts[i] - num_send;
874
875 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
876         /* Print TX stats including update packets */
877                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
878                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
879                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
880                         }
881 #endif
882                 }
883         }
884
885         /* Send update packets on proper slaves */
886         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
887                 if (update_bufs_pkts[i] > 0) {
888                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
889                                         update_bufs_pkts[i]);
890                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
891                                 rte_pktmbuf_free(update_bufs[i][j]);
892                         }
893 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
894                         for (j = 0; j < update_bufs_pkts[i]; j++) {
895                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
896                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
897                         }
898 #endif
899                 }
900         }
901
902         /* Send non-ARP packets using tlb policy */
903         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
904                 num_send = bond_ethdev_tx_burst_tlb(queue,
905                                 slave_bufs[RTE_MAX_ETHPORTS],
906                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
907
908                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
909                         bufs[nb_pkts - 1 - num_not_send - j] =
910                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
911                 }
912
913                 num_tx_total += num_send;
914         }
915
916         return num_tx_total;
917 }
918
919 static uint16_t
920 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
921                 uint16_t nb_pkts)
922 {
923         struct bond_dev_private *internals;
924         struct bond_tx_queue *bd_tx_q;
925
926         uint8_t num_of_slaves;
927         uint8_t slaves[RTE_MAX_ETHPORTS];
928
929         uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
930
931         int i, op_slave_id;
932
933         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
934         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
935
936         bd_tx_q = (struct bond_tx_queue *)queue;
937         internals = bd_tx_q->dev_private;
938
939         /* Copy slave list to protect against slave up/down changes during tx
940          * bursting */
941         num_of_slaves = internals->active_slave_count;
942         memcpy(slaves, internals->active_slaves,
943                         sizeof(internals->active_slaves[0]) * num_of_slaves);
944
945         if (num_of_slaves < 1)
946                 return num_tx_total;
947
948         /* Populate slaves mbuf with the packets which are to be sent on it  */
949         for (i = 0; i < nb_pkts; i++) {
950                 /* Select output slave using hash based on xmit policy */
951                 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
952
953                 /* Populate slave mbuf arrays with mbufs for that slave */
954                 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
955         }
956
957         /* Send packet burst on each slave device */
958         for (i = 0; i < num_of_slaves; i++) {
959                 if (slave_nb_pkts[i] > 0) {
960                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
961                                         slave_bufs[i], slave_nb_pkts[i]);
962
963                         /* if tx burst fails move packets to end of bufs */
964                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
965                                 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
966
967                                 tx_fail_total += slave_tx_fail_count;
968                                 memcpy(&bufs[nb_pkts - tx_fail_total],
969                                                 &slave_bufs[i][num_tx_slave],
970                                                 slave_tx_fail_count * sizeof(bufs[0]));
971                         }
972
973                         num_tx_total += num_tx_slave;
974                 }
975         }
976
977         return num_tx_total;
978 }
979
980 static uint16_t
981 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
982                 uint16_t nb_pkts)
983 {
984         struct bond_dev_private *internals;
985         struct bond_tx_queue *bd_tx_q;
986
987         uint8_t num_of_slaves;
988         uint8_t slaves[RTE_MAX_ETHPORTS];
989          /* positions in slaves, not ID */
990         uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
991         uint8_t distributing_count;
992
993         uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
994         uint16_t i, j, op_slave_idx;
995         const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
996
997         /* Allocate additional packets in case 8023AD mode. */
998         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
999         void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
1000
1001         /* Total amount of packets in slave_bufs */
1002         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
1003         /* Slow packets placed in each slave */
1004         uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
1005
1006         bd_tx_q = (struct bond_tx_queue *)queue;
1007         internals = bd_tx_q->dev_private;
1008
1009         /* Copy slave list to protect against slave up/down changes during tx
1010          * bursting */
1011         num_of_slaves = internals->active_slave_count;
1012         if (num_of_slaves < 1)
1013                 return num_tx_total;
1014
1015         memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
1016
1017         distributing_count = 0;
1018         for (i = 0; i < num_of_slaves; i++) {
1019                 struct port *port = &mode_8023ad_ports[slaves[i]];
1020
1021                 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1022                                 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
1023                                 NULL);
1024                 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1025
1026                 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1027                         slave_bufs[i][j] = slow_pkts[j];
1028
1029                 if (ACTOR_STATE(port, DISTRIBUTING))
1030                         distributing_offsets[distributing_count++] = i;
1031         }
1032
1033         if (likely(distributing_count > 0)) {
1034                 /* Populate slaves mbuf with the packets which are to be sent on it */
1035                 for (i = 0; i < nb_pkts; i++) {
1036                         /* Select output slave using hash based on xmit policy */
1037                         op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1038
1039                         /* Populate slave mbuf arrays with mbufs for that slave. Use only
1040                          * slaves that are currently distributing. */
1041                         uint8_t slave_offset = distributing_offsets[op_slave_idx];
1042                         slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1043                         slave_nb_pkts[slave_offset]++;
1044                 }
1045         }
1046
1047         /* Send packet burst on each slave device */
1048         for (i = 0; i < num_of_slaves; i++) {
1049                 if (slave_nb_pkts[i] == 0)
1050                         continue;
1051
1052                 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1053                                 slave_bufs[i], slave_nb_pkts[i]);
1054
1055                 /* If tx burst fails drop slow packets */
1056                 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1057                         rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1058
1059                 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1060                 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1061
1062                 /* If tx burst fails move packets to end of bufs */
1063                 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1064                         uint16_t j = nb_pkts - num_tx_fail_total;
1065                         for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1066                                 bufs[j] = slave_bufs[i][num_tx_slave];
1067                 }
1068         }
1069
1070         return num_tx_total;
1071 }
1072
1073 static uint16_t
1074 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1075                 uint16_t nb_pkts)
1076 {
1077         struct bond_dev_private *internals;
1078         struct bond_tx_queue *bd_tx_q;
1079
1080         uint8_t tx_failed_flag = 0, num_of_slaves;
1081         uint8_t slaves[RTE_MAX_ETHPORTS];
1082
1083         uint16_t max_nb_of_tx_pkts = 0;
1084
1085         int slave_tx_total[RTE_MAX_ETHPORTS];
1086         int i, most_successful_tx_slave = -1;
1087
1088         bd_tx_q = (struct bond_tx_queue *)queue;
1089         internals = bd_tx_q->dev_private;
1090
1091         /* Copy slave list to protect against slave up/down changes during tx
1092          * bursting */
1093         num_of_slaves = internals->active_slave_count;
1094         memcpy(slaves, internals->active_slaves,
1095                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1096
1097         if (num_of_slaves < 1)
1098                 return 0;
1099
1100         /* Increment reference count on mbufs */
1101         for (i = 0; i < nb_pkts; i++)
1102                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1103
1104         /* Transmit burst on each active slave */
1105         for (i = 0; i < num_of_slaves; i++) {
1106                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1107                                         bufs, nb_pkts);
1108
1109                 if (unlikely(slave_tx_total[i] < nb_pkts))
1110                         tx_failed_flag = 1;
1111
1112                 /* record the value and slave index for the slave which transmits the
1113                  * maximum number of packets */
1114                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1115                         max_nb_of_tx_pkts = slave_tx_total[i];
1116                         most_successful_tx_slave = i;
1117                 }
1118         }
1119
1120         /* if slaves fail to transmit packets from burst, the calling application
1121          * is not expected to know about multiple references to packets so we must
1122          * handle failures of all packets except those of the most successful slave
1123          */
1124         if (unlikely(tx_failed_flag))
1125                 for (i = 0; i < num_of_slaves; i++)
1126                         if (i != most_successful_tx_slave)
1127                                 while (slave_tx_total[i] < nb_pkts)
1128                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1129
1130         return max_nb_of_tx_pkts;
1131 }
1132
1133 void
1134 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1135                 struct rte_eth_link *slave_dev_link)
1136 {
1137         struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1138         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1139
1140         if (slave_dev_link->link_status &&
1141                 bonded_eth_dev->data->dev_started) {
1142                 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1143                 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1144
1145                 internals->link_props_set = 1;
1146         }
1147 }
1148
1149 void
1150 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1151 {
1152         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1153
1154         memset(&(bonded_eth_dev->data->dev_link), 0,
1155                         sizeof(bonded_eth_dev->data->dev_link));
1156
1157         internals->link_props_set = 0;
1158 }
1159
1160 int
1161 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1162                 struct rte_eth_link *slave_dev_link)
1163 {
1164         if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1165                 bonded_dev_link->link_speed !=  slave_dev_link->link_speed)
1166                 return -1;
1167
1168         return 0;
1169 }
1170
1171 int
1172 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1173 {
1174         struct ether_addr *mac_addr;
1175
1176         if (eth_dev == NULL) {
1177                 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1178                 return -1;
1179         }
1180
1181         if (dst_mac_addr == NULL) {
1182                 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1183                 return -1;
1184         }
1185
1186         mac_addr = eth_dev->data->mac_addrs;
1187
1188         ether_addr_copy(mac_addr, dst_mac_addr);
1189         return 0;
1190 }
1191
1192 int
1193 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1194 {
1195         struct ether_addr *mac_addr;
1196
1197         if (eth_dev == NULL) {
1198                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1199                 return -1;
1200         }
1201
1202         if (new_mac_addr == NULL) {
1203                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1204                 return -1;
1205         }
1206
1207         mac_addr = eth_dev->data->mac_addrs;
1208
1209         /* If new MAC is different to current MAC then update */
1210         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1211                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1212
1213         return 0;
1214 }
1215
1216 int
1217 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1218 {
1219         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1220         int i;
1221
1222         /* Update slave devices MAC addresses */
1223         if (internals->slave_count < 1)
1224                 return -1;
1225
1226         switch (internals->mode) {
1227         case BONDING_MODE_ROUND_ROBIN:
1228         case BONDING_MODE_BALANCE:
1229         case BONDING_MODE_BROADCAST:
1230                 for (i = 0; i < internals->slave_count; i++) {
1231                         if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1232                                         bonded_eth_dev->data->mac_addrs)) {
1233                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1234                                                 internals->slaves[i].port_id);
1235                                 return -1;
1236                         }
1237                 }
1238                 break;
1239         case BONDING_MODE_8023AD:
1240                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1241                 break;
1242         case BONDING_MODE_ACTIVE_BACKUP:
1243         case BONDING_MODE_TLB:
1244         case BONDING_MODE_ALB:
1245         default:
1246                 for (i = 0; i < internals->slave_count; i++) {
1247                         if (internals->slaves[i].port_id ==
1248                                         internals->current_primary_port) {
1249                                 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1250                                                 bonded_eth_dev->data->mac_addrs)) {
1251                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1252                                                         internals->current_primary_port);
1253                                         return -1;
1254                                 }
1255                         } else {
1256                                 if (mac_address_set(
1257                                                 &rte_eth_devices[internals->slaves[i].port_id],
1258                                                 &internals->slaves[i].persisted_mac_addr)) {
1259                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1260                                                         internals->slaves[i].port_id);
1261                                         return -1;
1262                                 }
1263                         }
1264                 }
1265         }
1266
1267         return 0;
1268 }
1269
1270 int
1271 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1272 {
1273         struct bond_dev_private *internals;
1274
1275         internals = eth_dev->data->dev_private;
1276
1277         switch (mode) {
1278         case BONDING_MODE_ROUND_ROBIN:
1279                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1280                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1281                 break;
1282         case BONDING_MODE_ACTIVE_BACKUP:
1283                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1284                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1285                 break;
1286         case BONDING_MODE_BALANCE:
1287                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1288                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1289                 break;
1290         case BONDING_MODE_BROADCAST:
1291                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1292                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1293                 break;
1294         case BONDING_MODE_8023AD:
1295                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1296                         return -1;
1297
1298                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1299                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1300                 RTE_LOG(WARNING, PMD,
1301                                 "Using mode 4, it is necessary to do TX burst and RX burst "
1302                                 "at least every 100ms.\n");
1303                 break;
1304         case BONDING_MODE_TLB:
1305                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1306                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1307                 break;
1308         case BONDING_MODE_ALB:
1309                 if (bond_mode_alb_enable(eth_dev) != 0)
1310                         return -1;
1311
1312                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1313                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1314                 break;
1315         default:
1316                 return -1;
1317         }
1318
1319         internals->mode = mode;
1320
1321         return 0;
1322 }
1323
1324 int
1325 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1326                 struct rte_eth_dev *slave_eth_dev)
1327 {
1328         struct bond_rx_queue *bd_rx_q;
1329         struct bond_tx_queue *bd_tx_q;
1330
1331         int errval;
1332         uint16_t q_id;
1333
1334         /* Stop slave */
1335         rte_eth_dev_stop(slave_eth_dev->data->port_id);
1336
1337         /* Enable interrupts on slave device if supported */
1338         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1339                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1340
1341         /* If RSS is enabled for bonding, try to enable it for slaves  */
1342         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1343                 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1344                                 != 0) {
1345                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1346                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1347                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1348                                         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1349                 } else {
1350                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1351                 }
1352
1353                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1354                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1355                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1356                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1357         }
1358
1359         slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
1360                         bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
1361
1362         /* Configure device */
1363         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1364                         bonded_eth_dev->data->nb_rx_queues,
1365                         bonded_eth_dev->data->nb_tx_queues,
1366                         &(slave_eth_dev->data->dev_conf));
1367         if (errval != 0) {
1368                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1369                                 slave_eth_dev->data->port_id, errval);
1370                 return errval;
1371         }
1372
1373         /* Setup Rx Queues */
1374         for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1375                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1376
1377                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1378                                 bd_rx_q->nb_rx_desc,
1379                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1380                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1381                 if (errval != 0) {
1382                         RTE_BOND_LOG(ERR,
1383                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1384                                         slave_eth_dev->data->port_id, q_id, errval);
1385                         return errval;
1386                 }
1387         }
1388
1389         /* Setup Tx Queues */
1390         for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1391                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1392
1393                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1394                                 bd_tx_q->nb_tx_desc,
1395                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1396                                 &bd_tx_q->tx_conf);
1397                 if (errval != 0) {
1398                         RTE_BOND_LOG(ERR,
1399                                         "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1400                                         slave_eth_dev->data->port_id, q_id, errval);
1401                         return errval;
1402                 }
1403         }
1404
1405         /* Start device */
1406         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1407         if (errval != 0) {
1408                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1409                                 slave_eth_dev->data->port_id, errval);
1410                 return -1;
1411         }
1412
1413         /* If RSS is enabled for bonding, synchronize RETA */
1414         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1415                 int i;
1416                 struct bond_dev_private *internals;
1417
1418                 internals = bonded_eth_dev->data->dev_private;
1419
1420                 for (i = 0; i < internals->slave_count; i++) {
1421                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1422                                 errval = rte_eth_dev_rss_reta_update(
1423                                                 slave_eth_dev->data->port_id,
1424                                                 &internals->reta_conf[0],
1425                                                 internals->slaves[i].reta_size);
1426                                 if (errval != 0) {
1427                                         RTE_LOG(WARNING, PMD,
1428                                                         "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1429                                                         " RSS Configuration for bonding may be inconsistent.\n",
1430                                                         slave_eth_dev->data->port_id, errval);
1431                                 }
1432                                 break;
1433                         }
1434                 }
1435         }
1436
1437         /* If lsc interrupt is set, check initial slave's link status */
1438         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1439                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1440                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
1441
1442         return 0;
1443 }
1444
1445 void
1446 slave_remove(struct bond_dev_private *internals,
1447                 struct rte_eth_dev *slave_eth_dev)
1448 {
1449         uint8_t i;
1450
1451         for (i = 0; i < internals->slave_count; i++)
1452                 if (internals->slaves[i].port_id ==
1453                                 slave_eth_dev->data->port_id)
1454                         break;
1455
1456         if (i < (internals->slave_count - 1))
1457                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1458                                 sizeof(internals->slaves[0]) *
1459                                 (internals->slave_count - i - 1));
1460
1461         internals->slave_count--;
1462
1463         /* force reconfiguration of slave interfaces */
1464         _rte_eth_dev_reset(slave_eth_dev);
1465 }
1466
1467 static void
1468 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1469
1470 void
1471 slave_add(struct bond_dev_private *internals,
1472                 struct rte_eth_dev *slave_eth_dev)
1473 {
1474         struct bond_slave_details *slave_details =
1475                         &internals->slaves[internals->slave_count];
1476
1477         slave_details->port_id = slave_eth_dev->data->port_id;
1478         slave_details->last_link_status = 0;
1479
1480         /* Mark slave devices that don't support interrupts so we can
1481          * compensate when we start the bond
1482          */
1483         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1484                 slave_details->link_status_poll_enabled = 1;
1485         }
1486
1487         slave_details->link_status_wait_to_complete = 0;
1488         /* clean tlb_last_obytes when adding port for bonding device */
1489         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1490                         sizeof(struct ether_addr));
1491 }
1492
1493 void
1494 bond_ethdev_primary_set(struct bond_dev_private *internals,
1495                 uint8_t slave_port_id)
1496 {
1497         int i;
1498
1499         if (internals->active_slave_count < 1)
1500                 internals->current_primary_port = slave_port_id;
1501         else
1502                 /* Search bonded device slave ports for new proposed primary port */
1503                 for (i = 0; i < internals->active_slave_count; i++) {
1504                         if (internals->active_slaves[i] == slave_port_id)
1505                                 internals->current_primary_port = slave_port_id;
1506                 }
1507 }
1508
1509 static void
1510 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1511
1512 static int
1513 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1514 {
1515         struct bond_dev_private *internals;
1516         int i;
1517
1518         /* slave eth dev will be started by bonded device */
1519         if (check_for_bonded_ethdev(eth_dev)) {
1520                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1521                                 eth_dev->data->port_id);
1522                 return -1;
1523         }
1524
1525         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1526         eth_dev->data->dev_started = 1;
1527
1528         internals = eth_dev->data->dev_private;
1529
1530         if (internals->slave_count == 0) {
1531                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1532                 return -1;
1533         }
1534
1535         if (internals->user_defined_mac == 0) {
1536                 struct ether_addr *new_mac_addr = NULL;
1537
1538                 for (i = 0; i < internals->slave_count; i++)
1539                         if (internals->slaves[i].port_id == internals->primary_port)
1540                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1541
1542                 if (new_mac_addr == NULL)
1543                         return -1;
1544
1545                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1546                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1547                                         eth_dev->data->port_id);
1548                         return -1;
1549                 }
1550         }
1551
1552         /* Update all slave devices MACs*/
1553         if (mac_address_slaves_update(eth_dev) != 0)
1554                 return -1;
1555
1556         /* If bonded device is configure in promiscuous mode then re-apply config */
1557         if (internals->promiscuous_en)
1558                 bond_ethdev_promiscuous_enable(eth_dev);
1559
1560         /* Reconfigure each slave device if starting bonded device */
1561         for (i = 0; i < internals->slave_count; i++) {
1562                 if (slave_configure(eth_dev,
1563                                 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1564                         RTE_BOND_LOG(ERR,
1565                                         "bonded port (%d) failed to reconfigure slave device (%d)",
1566                                         eth_dev->data->port_id, internals->slaves[i].port_id);
1567                         return -1;
1568                 }
1569                 /* We will need to poll for link status if any slave doesn't
1570                  * support interrupts
1571                  */
1572                 if (internals->slaves[i].link_status_poll_enabled)
1573                         internals->link_status_polling_enabled = 1;
1574         }
1575         /* start polling if needed */
1576         if (internals->link_status_polling_enabled) {
1577                 rte_eal_alarm_set(
1578                         internals->link_status_polling_interval_ms * 1000,
1579                         bond_ethdev_slave_link_status_change_monitor,
1580                         (void *)&rte_eth_devices[internals->port_id]);
1581         }
1582
1583         if (internals->user_defined_primary_port)
1584                 bond_ethdev_primary_set(internals, internals->primary_port);
1585
1586         if (internals->mode == BONDING_MODE_8023AD)
1587                 bond_mode_8023ad_start(eth_dev);
1588
1589         if (internals->mode == BONDING_MODE_TLB ||
1590                         internals->mode == BONDING_MODE_ALB)
1591                 bond_tlb_enable(internals);
1592
1593         return 0;
1594 }
1595
1596 static void
1597 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1598 {
1599         uint8_t i;
1600
1601         if (dev->data->rx_queues != NULL) {
1602                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1603                         rte_free(dev->data->rx_queues[i]);
1604                         dev->data->rx_queues[i] = NULL;
1605                 }
1606                 dev->data->nb_rx_queues = 0;
1607         }
1608
1609         if (dev->data->tx_queues != NULL) {
1610                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1611                         rte_free(dev->data->tx_queues[i]);
1612                         dev->data->tx_queues[i] = NULL;
1613                 }
1614                 dev->data->nb_tx_queues = 0;
1615         }
1616 }
1617
1618 void
1619 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1620 {
1621         struct bond_dev_private *internals = eth_dev->data->dev_private;
1622         uint8_t i;
1623
1624         if (internals->mode == BONDING_MODE_8023AD) {
1625                 struct port *port;
1626                 void *pkt = NULL;
1627
1628                 bond_mode_8023ad_stop(eth_dev);
1629
1630                 /* Discard all messages to/from mode 4 state machines */
1631                 for (i = 0; i < internals->active_slave_count; i++) {
1632                         port = &mode_8023ad_ports[internals->active_slaves[i]];
1633
1634                         RTE_ASSERT(port->rx_ring != NULL);
1635                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1636                                 rte_pktmbuf_free(pkt);
1637
1638                         RTE_ASSERT(port->tx_ring != NULL);
1639                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1640                                 rte_pktmbuf_free(pkt);
1641                 }
1642         }
1643
1644         if (internals->mode == BONDING_MODE_TLB ||
1645                         internals->mode == BONDING_MODE_ALB) {
1646                 bond_tlb_disable(internals);
1647                 for (i = 0; i < internals->active_slave_count; i++)
1648                         tlb_last_obytets[internals->active_slaves[i]] = 0;
1649         }
1650
1651         internals->active_slave_count = 0;
1652         internals->link_status_polling_enabled = 0;
1653         for (i = 0; i < internals->slave_count; i++)
1654                 internals->slaves[i].last_link_status = 0;
1655
1656         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1657         eth_dev->data->dev_started = 0;
1658 }
1659
1660 void
1661 bond_ethdev_close(struct rte_eth_dev *dev)
1662 {
1663         struct bond_dev_private *internals = dev->data->dev_private;
1664
1665         bond_ethdev_free_queues(dev);
1666         rte_bitmap_reset(internals->vlan_filter_bmp);
1667 }
1668
1669 /* forward declaration */
1670 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1671
1672 static void
1673 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1674 {
1675         struct bond_dev_private *internals = dev->data->dev_private;
1676
1677         dev_info->max_mac_addrs = 1;
1678
1679         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
1680                                   internals->candidate_max_rx_pktlen : 2048;
1681
1682         dev_info->max_rx_queues = (uint16_t)128;
1683         dev_info->max_tx_queues = (uint16_t)512;
1684
1685         dev_info->min_rx_bufsize = 0;
1686
1687         dev_info->rx_offload_capa = internals->rx_offload_capa;
1688         dev_info->tx_offload_capa = internals->tx_offload_capa;
1689         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1690
1691         dev_info->reta_size = internals->reta_size;
1692 }
1693
1694 static int
1695 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1696 {
1697         int res;
1698         uint8_t i;
1699         struct bond_dev_private *internals = dev->data->dev_private;
1700
1701         /* don't do this while a slave is being added */
1702         rte_spinlock_lock(&internals->lock);
1703
1704         if (on)
1705                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
1706         else
1707                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
1708
1709         for (i = 0; i < internals->slave_count; i++) {
1710                 uint8_t port_id = internals->slaves[i].port_id;
1711
1712                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1713                 if (res == ENOTSUP)
1714                         RTE_LOG(WARNING, PMD,
1715                                 "Setting VLAN filter on slave port %u not supported.\n",
1716                                 port_id);
1717         }
1718
1719         rte_spinlock_unlock(&internals->lock);
1720         return 0;
1721 }
1722
1723 static int
1724 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1725                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1726                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1727 {
1728         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1729                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1730                                         0, dev->data->numa_node);
1731         if (bd_rx_q == NULL)
1732                 return -1;
1733
1734         bd_rx_q->queue_id = rx_queue_id;
1735         bd_rx_q->dev_private = dev->data->dev_private;
1736
1737         bd_rx_q->nb_rx_desc = nb_rx_desc;
1738
1739         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1740         bd_rx_q->mb_pool = mb_pool;
1741
1742         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1743
1744         return 0;
1745 }
1746
1747 static int
1748 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1749                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1750                 const struct rte_eth_txconf *tx_conf)
1751 {
1752         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
1753                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1754                                         0, dev->data->numa_node);
1755
1756         if (bd_tx_q == NULL)
1757                 return -1;
1758
1759         bd_tx_q->queue_id = tx_queue_id;
1760         bd_tx_q->dev_private = dev->data->dev_private;
1761
1762         bd_tx_q->nb_tx_desc = nb_tx_desc;
1763         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1764
1765         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1766
1767         return 0;
1768 }
1769
1770 static void
1771 bond_ethdev_rx_queue_release(void *queue)
1772 {
1773         if (queue == NULL)
1774                 return;
1775
1776         rte_free(queue);
1777 }
1778
1779 static void
1780 bond_ethdev_tx_queue_release(void *queue)
1781 {
1782         if (queue == NULL)
1783                 return;
1784
1785         rte_free(queue);
1786 }
1787
1788 static void
1789 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1790 {
1791         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1792         struct bond_dev_private *internals;
1793
1794         /* Default value for polling slave found is true as we don't want to
1795          * disable the polling thread if we cannot get the lock */
1796         int i, polling_slave_found = 1;
1797
1798         if (cb_arg == NULL)
1799                 return;
1800
1801         bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1802         internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1803
1804         if (!bonded_ethdev->data->dev_started ||
1805                 !internals->link_status_polling_enabled)
1806                 return;
1807
1808         /* If device is currently being configured then don't check slaves link
1809          * status, wait until next period */
1810         if (rte_spinlock_trylock(&internals->lock)) {
1811                 if (internals->slave_count > 0)
1812                         polling_slave_found = 0;
1813
1814                 for (i = 0; i < internals->slave_count; i++) {
1815                         if (!internals->slaves[i].link_status_poll_enabled)
1816                                 continue;
1817
1818                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1819                         polling_slave_found = 1;
1820
1821                         /* Update slave link status */
1822                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1823                                         internals->slaves[i].link_status_wait_to_complete);
1824
1825                         /* if link status has changed since last checked then call lsc
1826                          * event callback */
1827                         if (slave_ethdev->data->dev_link.link_status !=
1828                                         internals->slaves[i].last_link_status) {
1829                                 internals->slaves[i].last_link_status =
1830                                                 slave_ethdev->data->dev_link.link_status;
1831
1832                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1833                                                 RTE_ETH_EVENT_INTR_LSC,
1834                                                 &bonded_ethdev->data->port_id);
1835                         }
1836                 }
1837                 rte_spinlock_unlock(&internals->lock);
1838         }
1839
1840         if (polling_slave_found)
1841                 /* Set alarm to continue monitoring link status of slave ethdev's */
1842                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1843                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1844 }
1845
1846 static int
1847 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1848                 int wait_to_complete)
1849 {
1850         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1851
1852         if (!bonded_eth_dev->data->dev_started ||
1853                 internals->active_slave_count == 0) {
1854                 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1855                 return 0;
1856         } else {
1857                 struct rte_eth_dev *slave_eth_dev;
1858                 int i, link_up = 0;
1859
1860                 for (i = 0; i < internals->active_slave_count; i++) {
1861                         slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1862
1863                         (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1864                                         wait_to_complete);
1865                         if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
1866                                 link_up = 1;
1867                                 break;
1868                         }
1869                 }
1870
1871                 bonded_eth_dev->data->dev_link.link_status = link_up;
1872         }
1873
1874         return 0;
1875 }
1876
1877 static void
1878 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1879 {
1880         struct bond_dev_private *internals = dev->data->dev_private;
1881         struct rte_eth_stats slave_stats;
1882         int i, j;
1883
1884         for (i = 0; i < internals->slave_count; i++) {
1885                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1886
1887                 stats->ipackets += slave_stats.ipackets;
1888                 stats->opackets += slave_stats.opackets;
1889                 stats->ibytes += slave_stats.ibytes;
1890                 stats->obytes += slave_stats.obytes;
1891                 stats->imissed += slave_stats.imissed;
1892                 stats->ierrors += slave_stats.ierrors;
1893                 stats->oerrors += slave_stats.oerrors;
1894                 stats->rx_nombuf += slave_stats.rx_nombuf;
1895
1896                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1897                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1898                         stats->q_opackets[j] += slave_stats.q_opackets[j];
1899                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1900                         stats->q_obytes[j] += slave_stats.q_obytes[j];
1901                         stats->q_errors[j] += slave_stats.q_errors[j];
1902                 }
1903
1904         }
1905 }
1906
1907 static void
1908 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1909 {
1910         struct bond_dev_private *internals = dev->data->dev_private;
1911         int i;
1912
1913         for (i = 0; i < internals->slave_count; i++)
1914                 rte_eth_stats_reset(internals->slaves[i].port_id);
1915 }
1916
1917 static void
1918 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1919 {
1920         struct bond_dev_private *internals = eth_dev->data->dev_private;
1921         int i;
1922
1923         internals->promiscuous_en = 1;
1924
1925         switch (internals->mode) {
1926         /* Promiscuous mode is propagated to all slaves */
1927         case BONDING_MODE_ROUND_ROBIN:
1928         case BONDING_MODE_BALANCE:
1929         case BONDING_MODE_BROADCAST:
1930                 for (i = 0; i < internals->slave_count; i++)
1931                         rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1932                 break;
1933         /* In mode4 promiscus mode is managed when slave is added/removed */
1934         case BONDING_MODE_8023AD:
1935                 break;
1936         /* Promiscuous mode is propagated only to primary slave */
1937         case BONDING_MODE_ACTIVE_BACKUP:
1938         case BONDING_MODE_TLB:
1939         case BONDING_MODE_ALB:
1940         default:
1941                 rte_eth_promiscuous_enable(internals->current_primary_port);
1942         }
1943 }
1944
1945 static void
1946 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1947 {
1948         struct bond_dev_private *internals = dev->data->dev_private;
1949         int i;
1950
1951         internals->promiscuous_en = 0;
1952
1953         switch (internals->mode) {
1954         /* Promiscuous mode is propagated to all slaves */
1955         case BONDING_MODE_ROUND_ROBIN:
1956         case BONDING_MODE_BALANCE:
1957         case BONDING_MODE_BROADCAST:
1958                 for (i = 0; i < internals->slave_count; i++)
1959                         rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1960                 break;
1961         /* In mode4 promiscus mode is set managed when slave is added/removed */
1962         case BONDING_MODE_8023AD:
1963                 break;
1964         /* Promiscuous mode is propagated only to primary slave */
1965         case BONDING_MODE_ACTIVE_BACKUP:
1966         case BONDING_MODE_TLB:
1967         case BONDING_MODE_ALB:
1968         default:
1969                 rte_eth_promiscuous_disable(internals->current_primary_port);
1970         }
1971 }
1972
1973 static void
1974 bond_ethdev_delayed_lsc_propagation(void *arg)
1975 {
1976         if (arg == NULL)
1977                 return;
1978
1979         _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1980                         RTE_ETH_EVENT_INTR_LSC, NULL);
1981 }
1982
1983 void
1984 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1985                 void *param)
1986 {
1987         struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1988         struct bond_dev_private *internals;
1989         struct rte_eth_link link;
1990
1991         int i, valid_slave = 0;
1992         uint8_t active_pos;
1993         uint8_t lsc_flag = 0;
1994
1995         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1996                 return;
1997
1998         bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1999         slave_eth_dev = &rte_eth_devices[port_id];
2000
2001         if (check_for_bonded_ethdev(bonded_eth_dev))
2002                 return;
2003
2004         internals = bonded_eth_dev->data->dev_private;
2005
2006         /* If the device isn't started don't handle interrupts */
2007         if (!bonded_eth_dev->data->dev_started)
2008                 return;
2009
2010         /* verify that port_id is a valid slave of bonded port */
2011         for (i = 0; i < internals->slave_count; i++) {
2012                 if (internals->slaves[i].port_id == port_id) {
2013                         valid_slave = 1;
2014                         break;
2015                 }
2016         }
2017
2018         if (!valid_slave)
2019                 return;
2020
2021         /* Search for port in active port list */
2022         active_pos = find_slave_by_id(internals->active_slaves,
2023                         internals->active_slave_count, port_id);
2024
2025         rte_eth_link_get_nowait(port_id, &link);
2026         if (link.link_status) {
2027                 if (active_pos < internals->active_slave_count)
2028                         return;
2029
2030                 /* if no active slave ports then set this port to be primary port */
2031                 if (internals->active_slave_count < 1) {
2032                         /* If first active slave, then change link status */
2033                         bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
2034                         internals->current_primary_port = port_id;
2035                         lsc_flag = 1;
2036
2037                         mac_address_slaves_update(bonded_eth_dev);
2038
2039                         /* Inherit eth dev link properties from first active slave */
2040                         link_properties_set(bonded_eth_dev,
2041                                         &(slave_eth_dev->data->dev_link));
2042                 } else {
2043                         if (link_properties_valid(
2044                                 &bonded_eth_dev->data->dev_link, &link) != 0) {
2045                                 slave_eth_dev->data->dev_flags &=
2046                                         (~RTE_ETH_DEV_BONDED_SLAVE);
2047                                 RTE_LOG(ERR, PMD,
2048                                         "port %u invalid speed/duplex\n",
2049                                         port_id);
2050                                 return;
2051                         }
2052                 }
2053
2054                 activate_slave(bonded_eth_dev, port_id);
2055
2056                 /* If user has defined the primary port then default to using it */
2057                 if (internals->user_defined_primary_port &&
2058                                 internals->primary_port == port_id)
2059                         bond_ethdev_primary_set(internals, port_id);
2060         } else {
2061                 if (active_pos == internals->active_slave_count)
2062                         return;
2063
2064                 /* Remove from active slave list */
2065                 deactivate_slave(bonded_eth_dev, port_id);
2066
2067                 /* No active slaves, change link status to down and reset other
2068                  * link properties */
2069                 if (internals->active_slave_count < 1) {
2070                         lsc_flag = 1;
2071                         bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2072
2073                         link_properties_reset(bonded_eth_dev);
2074                 }
2075
2076                 /* Update primary id, take first active slave from list or if none
2077                  * available set to -1 */
2078                 if (port_id == internals->current_primary_port) {
2079                         if (internals->active_slave_count > 0)
2080                                 bond_ethdev_primary_set(internals,
2081                                                 internals->active_slaves[0]);
2082                         else
2083                                 internals->current_primary_port = internals->primary_port;
2084                 }
2085         }
2086
2087         if (lsc_flag) {
2088                 /* Cancel any possible outstanding interrupts if delays are enabled */
2089                 if (internals->link_up_delay_ms > 0 ||
2090                         internals->link_down_delay_ms > 0)
2091                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2092                                         bonded_eth_dev);
2093
2094                 if (bonded_eth_dev->data->dev_link.link_status) {
2095                         if (internals->link_up_delay_ms > 0)
2096                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2097                                                 bond_ethdev_delayed_lsc_propagation,
2098                                                 (void *)bonded_eth_dev);
2099                         else
2100                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2101                                                 RTE_ETH_EVENT_INTR_LSC, NULL);
2102
2103                 } else {
2104                         if (internals->link_down_delay_ms > 0)
2105                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2106                                                 bond_ethdev_delayed_lsc_propagation,
2107                                                 (void *)bonded_eth_dev);
2108                         else
2109                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2110                                                 RTE_ETH_EVENT_INTR_LSC, NULL);
2111                 }
2112         }
2113 }
2114
2115 static int
2116 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2117                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2118 {
2119         unsigned i, j;
2120         int result = 0;
2121         int slave_reta_size;
2122         unsigned reta_count;
2123         struct bond_dev_private *internals = dev->data->dev_private;
2124
2125         if (reta_size != internals->reta_size)
2126                 return -EINVAL;
2127
2128          /* Copy RETA table */
2129         reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2130
2131         for (i = 0; i < reta_count; i++) {
2132                 internals->reta_conf[i].mask = reta_conf[i].mask;
2133                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2134                         if ((reta_conf[i].mask >> j) & 0x01)
2135                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2136         }
2137
2138         /* Fill rest of array */
2139         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2140                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2141                                 sizeof(internals->reta_conf[0]) * reta_count);
2142
2143         /* Propagate RETA over slaves */
2144         for (i = 0; i < internals->slave_count; i++) {
2145                 slave_reta_size = internals->slaves[i].reta_size;
2146                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2147                                 &internals->reta_conf[0], slave_reta_size);
2148                 if (result < 0)
2149                         return result;
2150         }
2151
2152         return 0;
2153 }
2154
2155 static int
2156 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2157                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2158 {
2159         int i, j;
2160         struct bond_dev_private *internals = dev->data->dev_private;
2161
2162         if (reta_size != internals->reta_size)
2163                 return -EINVAL;
2164
2165          /* Copy RETA table */
2166         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2167                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2168                         if ((reta_conf[i].mask >> j) & 0x01)
2169                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2170
2171         return 0;
2172 }
2173
2174 static int
2175 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2176                 struct rte_eth_rss_conf *rss_conf)
2177 {
2178         int i, result = 0;
2179         struct bond_dev_private *internals = dev->data->dev_private;
2180         struct rte_eth_rss_conf bond_rss_conf;
2181
2182         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2183
2184         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2185
2186         if (bond_rss_conf.rss_hf != 0)
2187                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2188
2189         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2190                         sizeof(internals->rss_key)) {
2191                 if (bond_rss_conf.rss_key_len == 0)
2192                         bond_rss_conf.rss_key_len = 40;
2193                 internals->rss_key_len = bond_rss_conf.rss_key_len;
2194                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2195                                 internals->rss_key_len);
2196         }
2197
2198         for (i = 0; i < internals->slave_count; i++) {
2199                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2200                                 &bond_rss_conf);
2201                 if (result < 0)
2202                         return result;
2203         }
2204
2205         return 0;
2206 }
2207
2208 static int
2209 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2210                 struct rte_eth_rss_conf *rss_conf)
2211 {
2212         struct bond_dev_private *internals = dev->data->dev_private;
2213
2214         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2215         rss_conf->rss_key_len = internals->rss_key_len;
2216         if (rss_conf->rss_key)
2217                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2218
2219         return 0;
2220 }
2221
2222 const struct eth_dev_ops default_dev_ops = {
2223         .dev_start            = bond_ethdev_start,
2224         .dev_stop             = bond_ethdev_stop,
2225         .dev_close            = bond_ethdev_close,
2226         .dev_configure        = bond_ethdev_configure,
2227         .dev_infos_get        = bond_ethdev_info,
2228         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
2229         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
2230         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
2231         .rx_queue_release     = bond_ethdev_rx_queue_release,
2232         .tx_queue_release     = bond_ethdev_tx_queue_release,
2233         .link_update          = bond_ethdev_link_update,
2234         .stats_get            = bond_ethdev_stats_get,
2235         .stats_reset          = bond_ethdev_stats_reset,
2236         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
2237         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
2238         .reta_update          = bond_ethdev_rss_reta_update,
2239         .reta_query           = bond_ethdev_rss_reta_query,
2240         .rss_hash_update      = bond_ethdev_rss_hash_update,
2241         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get
2242 };
2243
2244 static int
2245 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
2246 {
2247         const char *name = rte_vdev_device_name(dev);
2248         uint8_t socket_id = dev->device.numa_node;
2249         struct bond_dev_private *internals = NULL;
2250         struct rte_eth_dev *eth_dev = NULL;
2251         uint32_t vlan_filter_bmp_size;
2252
2253         /* now do all data allocation - for eth_dev structure, dummy pci driver
2254          * and internal (private) data
2255          */
2256
2257         if (socket_id >= number_of_sockets()) {
2258                 RTE_BOND_LOG(ERR,
2259                                 "Invalid socket id specified to create bonded device on.");
2260                 goto err;
2261         }
2262
2263         /* reserve an ethdev entry */
2264         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
2265         if (eth_dev == NULL) {
2266                 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
2267                 goto err;
2268         }
2269
2270         internals = eth_dev->data->dev_private;
2271         eth_dev->data->nb_rx_queues = (uint16_t)1;
2272         eth_dev->data->nb_tx_queues = (uint16_t)1;
2273
2274         eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
2275                         socket_id);
2276         if (eth_dev->data->mac_addrs == NULL) {
2277                 RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
2278                 goto err;
2279         }
2280
2281         eth_dev->dev_ops = &default_dev_ops;
2282         eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
2283                 RTE_ETH_DEV_DETACHABLE;
2284
2285         rte_spinlock_init(&internals->lock);
2286
2287         internals->port_id = eth_dev->data->port_id;
2288         internals->mode = BONDING_MODE_INVALID;
2289         internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
2290         internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
2291         internals->xmit_hash = xmit_l2_hash;
2292         internals->user_defined_mac = 0;
2293         internals->link_props_set = 0;
2294
2295         internals->link_status_polling_enabled = 0;
2296
2297         internals->link_status_polling_interval_ms =
2298                 DEFAULT_POLLING_INTERVAL_10_MS;
2299         internals->link_down_delay_ms = 0;
2300         internals->link_up_delay_ms = 0;
2301
2302         internals->slave_count = 0;
2303         internals->active_slave_count = 0;
2304         internals->rx_offload_capa = 0;
2305         internals->tx_offload_capa = 0;
2306         internals->candidate_max_rx_pktlen = 0;
2307         internals->max_rx_pktlen = 0;
2308
2309         /* Initially allow to choose any offload type */
2310         internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
2311
2312         memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
2313         memset(internals->slaves, 0, sizeof(internals->slaves));
2314
2315         /* Set mode 4 default configuration */
2316         bond_mode_8023ad_setup(eth_dev, NULL);
2317         if (bond_ethdev_mode_set(eth_dev, mode)) {
2318                 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
2319                                  eth_dev->data->port_id, mode);
2320                 goto err;
2321         }
2322
2323         vlan_filter_bmp_size =
2324                 rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
2325         internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
2326                                                    RTE_CACHE_LINE_SIZE);
2327         if (internals->vlan_filter_bmpmem == NULL) {
2328                 RTE_BOND_LOG(ERR,
2329                              "Failed to allocate vlan bitmap for bonded device %u\n",
2330                              eth_dev->data->port_id);
2331                 goto err;
2332         }
2333
2334         internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
2335                         internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
2336         if (internals->vlan_filter_bmp == NULL) {
2337                 RTE_BOND_LOG(ERR,
2338                              "Failed to init vlan bitmap for bonded device %u\n",
2339                              eth_dev->data->port_id);
2340                 rte_free(internals->vlan_filter_bmpmem);
2341                 goto err;
2342         }
2343
2344         return eth_dev->data->port_id;
2345
2346 err:
2347         rte_free(internals);
2348         if (eth_dev != NULL) {
2349                 rte_free(eth_dev->data->mac_addrs);
2350                 rte_eth_dev_release_port(eth_dev);
2351         }
2352         return -1;
2353 }
2354
2355 static int
2356 bond_probe(struct rte_vdev_device *dev)
2357 {
2358         const char *name;
2359         struct bond_dev_private *internals;
2360         struct rte_kvargs *kvlist;
2361         uint8_t bonding_mode, socket_id;
2362         int  arg_count, port_id;
2363
2364         if (!dev)
2365                 return -EINVAL;
2366
2367         name = rte_vdev_device_name(dev);
2368         RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2369
2370         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
2371                 pmd_bond_init_valid_arguments);
2372         if (kvlist == NULL)
2373                 return -1;
2374
2375         /* Parse link bonding mode */
2376         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2377                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2378                                 &bond_ethdev_parse_slave_mode_kvarg,
2379                                 &bonding_mode) != 0) {
2380                         RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2381                                         name);
2382                         goto parse_error;
2383                 }
2384         } else {
2385                 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2386                                 "device %s\n", name);
2387                 goto parse_error;
2388         }
2389
2390         /* Parse socket id to create bonding device on */
2391         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2392         if (arg_count == 1) {
2393                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2394                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2395                                 != 0) {
2396                         RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2397                                         "bonded device %s\n", name);
2398                         goto parse_error;
2399                 }
2400         } else if (arg_count > 1) {
2401                 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2402                                 "bonded device %s\n", name);
2403                 goto parse_error;
2404         } else {
2405                 socket_id = rte_socket_id();
2406         }
2407
2408         dev->device.numa_node = socket_id;
2409
2410         /* Create link bonding eth device */
2411         port_id = bond_alloc(dev, bonding_mode);
2412         if (port_id < 0) {
2413                 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2414                                 "socket %u.\n", name, bonding_mode, socket_id);
2415                 goto parse_error;
2416         }
2417         internals = rte_eth_devices[port_id].data->dev_private;
2418         internals->kvlist = kvlist;
2419
2420         RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2421                         "socket %u.\n", name, port_id, bonding_mode, socket_id);
2422         return 0;
2423
2424 parse_error:
2425         rte_kvargs_free(kvlist);
2426
2427         return -1;
2428 }
2429
2430 static int
2431 bond_remove(struct rte_vdev_device *dev)
2432 {
2433         struct rte_eth_dev *eth_dev;
2434         struct bond_dev_private *internals;
2435         const char *name;
2436
2437         if (!dev)
2438                 return -EINVAL;
2439
2440         name = rte_vdev_device_name(dev);
2441         RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2442
2443         /* now free all data allocation - for eth_dev structure,
2444          * dummy pci driver and internal (private) data
2445          */
2446
2447         /* find an ethdev entry */
2448         eth_dev = rte_eth_dev_allocated(name);
2449         if (eth_dev == NULL)
2450                 return -ENODEV;
2451
2452         RTE_ASSERT(eth_dev->device == &dev->device);
2453
2454         internals = eth_dev->data->dev_private;
2455         if (internals->slave_count != 0)
2456                 return -EBUSY;
2457
2458         if (eth_dev->data->dev_started == 1) {
2459                 bond_ethdev_stop(eth_dev);
2460                 bond_ethdev_close(eth_dev);
2461         }
2462
2463         eth_dev->dev_ops = NULL;
2464         eth_dev->rx_pkt_burst = NULL;
2465         eth_dev->tx_pkt_burst = NULL;
2466
2467         internals = eth_dev->data->dev_private;
2468         rte_bitmap_free(internals->vlan_filter_bmp);
2469         rte_free(internals->vlan_filter_bmpmem);
2470         rte_free(eth_dev->data->dev_private);
2471         rte_free(eth_dev->data->mac_addrs);
2472
2473         rte_eth_dev_release_port(eth_dev);
2474
2475         return 0;
2476 }
2477
2478 /* this part will resolve the slave portids after all the other pdev and vdev
2479  * have been allocated */
2480 static int
2481 bond_ethdev_configure(struct rte_eth_dev *dev)
2482 {
2483         char *name = dev->data->name;
2484         struct bond_dev_private *internals = dev->data->dev_private;
2485         struct rte_kvargs *kvlist = internals->kvlist;
2486         int arg_count;
2487         uint8_t port_id = dev - rte_eth_devices;
2488
2489         static const uint8_t default_rss_key[40] = {
2490                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2491                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2492                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2493                 0xBE, 0xAC, 0x01, 0xFA
2494         };
2495
2496         unsigned i, j;
2497
2498         /* If RSS is enabled, fill table and key with default values */
2499         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2500                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2501                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2502                 memcpy(internals->rss_key, default_rss_key, 40);
2503
2504                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2505                         internals->reta_conf[i].mask = ~0LL;
2506                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2507                                 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2508                 }
2509         }
2510
2511         /* set the max_rx_pktlen */
2512         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
2513
2514         /*
2515          * if no kvlist, it means that this bonded device has been created
2516          * through the bonding api.
2517          */
2518         if (!kvlist)
2519                 return 0;
2520
2521         /* Parse MAC address for bonded device */
2522         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2523         if (arg_count == 1) {
2524                 struct ether_addr bond_mac;
2525
2526                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2527                                 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2528                         RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2529                                         name);
2530                         return -1;
2531                 }
2532
2533                 /* Set MAC address */
2534                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2535                         RTE_LOG(ERR, EAL,
2536                                         "Failed to set mac address on bonded device %s\n",
2537                                         name);
2538                         return -1;
2539                 }
2540         } else if (arg_count > 1) {
2541                 RTE_LOG(ERR, EAL,
2542                                 "MAC address can be specified only once for bonded device %s\n",
2543                                 name);
2544                 return -1;
2545         }
2546
2547         /* Parse/set balance mode transmit policy */
2548         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2549         if (arg_count == 1) {
2550                 uint8_t xmit_policy;
2551
2552                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2553                                 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2554                                                 0) {
2555                         RTE_LOG(INFO, EAL,
2556                                         "Invalid xmit policy specified for bonded device %s\n",
2557                                         name);
2558                         return -1;
2559                 }
2560
2561                 /* Set balance mode transmit policy*/
2562                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2563                         RTE_LOG(ERR, EAL,
2564                                         "Failed to set balance xmit policy on bonded device %s\n",
2565                                         name);
2566                         return -1;
2567                 }
2568         } else if (arg_count > 1) {
2569                 RTE_LOG(ERR, EAL,
2570                                 "Transmit policy can be specified only once for bonded device"
2571                                 " %s\n", name);
2572                 return -1;
2573         }
2574
2575         /* Parse/add slave ports to bonded device */
2576         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2577                 struct bond_ethdev_slave_ports slave_ports;
2578                 unsigned i;
2579
2580                 memset(&slave_ports, 0, sizeof(slave_ports));
2581
2582                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2583                                 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2584                         RTE_LOG(ERR, EAL,
2585                                         "Failed to parse slave ports for bonded device %s\n",
2586                                         name);
2587                         return -1;
2588                 }
2589
2590                 for (i = 0; i < slave_ports.slave_count; i++) {
2591                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2592                                 RTE_LOG(ERR, EAL,
2593                                                 "Failed to add port %d as slave to bonded device %s\n",
2594                                                 slave_ports.slaves[i], name);
2595                         }
2596                 }
2597
2598         } else {
2599                 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2600                 return -1;
2601         }
2602
2603         /* Parse/set primary slave port id*/
2604         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2605         if (arg_count == 1) {
2606                 uint8_t primary_slave_port_id;
2607
2608                 if (rte_kvargs_process(kvlist,
2609                                 PMD_BOND_PRIMARY_SLAVE_KVARG,
2610                                 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2611                                 &primary_slave_port_id) < 0) {
2612                         RTE_LOG(INFO, EAL,
2613                                         "Invalid primary slave port id specified for bonded device"
2614                                         " %s\n", name);
2615                         return -1;
2616                 }
2617
2618                 /* Set balance mode transmit policy*/
2619                 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2620                                 != 0) {
2621                         RTE_LOG(ERR, EAL,
2622                                         "Failed to set primary slave port %d on bonded device %s\n",
2623                                         primary_slave_port_id, name);
2624                         return -1;
2625                 }
2626         } else if (arg_count > 1) {
2627                 RTE_LOG(INFO, EAL,
2628                                 "Primary slave can be specified only once for bonded device"
2629                                 " %s\n", name);
2630                 return -1;
2631         }
2632
2633         /* Parse link status monitor polling interval */
2634         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2635         if (arg_count == 1) {
2636                 uint32_t lsc_poll_interval_ms;
2637
2638                 if (rte_kvargs_process(kvlist,
2639                                 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2640                                 &bond_ethdev_parse_time_ms_kvarg,
2641                                 &lsc_poll_interval_ms) < 0) {
2642                         RTE_LOG(INFO, EAL,
2643                                         "Invalid lsc polling interval value specified for bonded"
2644                                         " device %s\n", name);
2645                         return -1;
2646                 }
2647
2648                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2649                                 != 0) {
2650                         RTE_LOG(ERR, EAL,
2651                                         "Failed to set lsc monitor polling interval (%u ms) on"
2652                                         " bonded device %s\n", lsc_poll_interval_ms, name);
2653                         return -1;
2654                 }
2655         } else if (arg_count > 1) {
2656                 RTE_LOG(INFO, EAL,
2657                                 "LSC polling interval can be specified only once for bonded"
2658                                 " device %s\n", name);
2659                 return -1;
2660         }
2661
2662         /* Parse link up interrupt propagation delay */
2663         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2664         if (arg_count == 1) {
2665                 uint32_t link_up_delay_ms;
2666
2667                 if (rte_kvargs_process(kvlist,
2668                                 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2669                                 &bond_ethdev_parse_time_ms_kvarg,
2670                                 &link_up_delay_ms) < 0) {
2671                         RTE_LOG(INFO, EAL,
2672                                         "Invalid link up propagation delay value specified for"
2673                                         " bonded device %s\n", name);
2674                         return -1;
2675                 }
2676
2677                 /* Set balance mode transmit policy*/
2678                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2679                                 != 0) {
2680                         RTE_LOG(ERR, EAL,
2681                                         "Failed to set link up propagation delay (%u ms) on bonded"
2682                                         " device %s\n", link_up_delay_ms, name);
2683                         return -1;
2684                 }
2685         } else if (arg_count > 1) {
2686                 RTE_LOG(INFO, EAL,
2687                                 "Link up propagation delay can be specified only once for"
2688                                 " bonded device %s\n", name);
2689                 return -1;
2690         }
2691
2692         /* Parse link down interrupt propagation delay */
2693         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2694         if (arg_count == 1) {
2695                 uint32_t link_down_delay_ms;
2696
2697                 if (rte_kvargs_process(kvlist,
2698                                 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2699                                 &bond_ethdev_parse_time_ms_kvarg,
2700                                 &link_down_delay_ms) < 0) {
2701                         RTE_LOG(INFO, EAL,
2702                                         "Invalid link down propagation delay value specified for"
2703                                         " bonded device %s\n", name);
2704                         return -1;
2705                 }
2706
2707                 /* Set balance mode transmit policy*/
2708                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2709                                 != 0) {
2710                         RTE_LOG(ERR, EAL,
2711                                         "Failed to set link down propagation delay (%u ms) on"
2712                                         " bonded device %s\n", link_down_delay_ms, name);
2713                         return -1;
2714                 }
2715         } else if (arg_count > 1) {
2716                 RTE_LOG(INFO, EAL,
2717                                 "Link down propagation delay can be specified only once for"
2718                                 " bonded device %s\n", name);
2719                 return -1;
2720         }
2721
2722         return 0;
2723 }
2724
2725 struct rte_vdev_driver pmd_bond_drv = {
2726         .probe = bond_probe,
2727         .remove = bond_remove,
2728 };
2729
2730 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
2731 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
2732
2733 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
2734         "slave=<ifc> "
2735         "primary=<ifc> "
2736         "mode=[0-6] "
2737         "xmit_policy=[l2 | l23 | l34] "
2738         "socket_id=<int> "
2739         "mac=<mac addr> "
2740         "lsc_poll_period_ms=<int> "
2741         "up_delay=<int> "
2742         "down_delay=<int>");