a1b50141f9ecdbd037b485adfc956a5597e8ae0b
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <stdbool.h>
6 #include <netinet/in.h>
7
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 #include <rte_ip.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
22
23 #include "rte_eth_bond.h"
24 #include "rte_eth_bond_private.h"
25 #include "rte_eth_bond_8023ad_private.h"
26
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
30
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
32
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
35
36 static inline size_t
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
38 {
39         size_t vlan_offset = 0;
40
41         if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42                 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43                 struct rte_vlan_hdr *vlan_hdr =
44                         (struct rte_vlan_hdr *)(eth_hdr + 1);
45
46                 vlan_offset = sizeof(struct rte_vlan_hdr);
47                 *proto = vlan_hdr->eth_proto;
48
49                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50                         vlan_hdr = vlan_hdr + 1;
51                         *proto = vlan_hdr->eth_proto;
52                         vlan_offset += sizeof(struct rte_vlan_hdr);
53                 }
54         }
55         return vlan_offset;
56 }
57
58 static uint16_t
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
60 {
61         struct bond_dev_private *internals;
62
63         uint16_t num_rx_total = 0;
64         uint16_t slave_count;
65         uint16_t active_slave;
66         int i;
67
68         /* Cast to structure, containing bonded device's port id and queue id */
69         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70         internals = bd_rx_q->dev_private;
71         slave_count = internals->active_slave_count;
72         active_slave = internals->active_slave;
73
74         for (i = 0; i < slave_count && nb_pkts; i++) {
75                 uint16_t num_rx_slave;
76
77                 /* Offset of pointer to *bufs increases as packets are received
78                  * from other slaves */
79                 num_rx_slave =
80                         rte_eth_rx_burst(internals->active_slaves[active_slave],
81                                          bd_rx_q->queue_id,
82                                          bufs + num_rx_total, nb_pkts);
83                 num_rx_total += num_rx_slave;
84                 nb_pkts -= num_rx_slave;
85                 if (++active_slave == slave_count)
86                         active_slave = 0;
87         }
88
89         if (++internals->active_slave >= slave_count)
90                 internals->active_slave = 0;
91         return num_rx_total;
92 }
93
94 static uint16_t
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
96                 uint16_t nb_pkts)
97 {
98         struct bond_dev_private *internals;
99
100         /* Cast to structure, containing bonded device's port id and queue id */
101         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
102
103         internals = bd_rx_q->dev_private;
104
105         return rte_eth_rx_burst(internals->current_primary_port,
106                         bd_rx_q->queue_id, bufs, nb_pkts);
107 }
108
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
111 {
112         const uint16_t ether_type_slow_be =
113                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
114
115         return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116                 (ethertype == ether_type_slow_be &&
117                 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
118 }
119
120 /*****************************************************************************
121  * Flow director's setup for mode 4 optimization
122  */
123
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125         .dst.addr_bytes = { 0 },
126         .src.addr_bytes = { 0 },
127         .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
128 };
129
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131         .dst.addr_bytes = { 0 },
132         .src.addr_bytes = { 0 },
133         .type = 0xFFFF,
134 };
135
136 static struct rte_flow_item flow_item_8023ad[] = {
137         {
138                 .type = RTE_FLOW_ITEM_TYPE_ETH,
139                 .spec = &flow_item_eth_type_8023ad,
140                 .last = NULL,
141                 .mask = &flow_item_eth_mask_type_8023ad,
142         },
143         {
144                 .type = RTE_FLOW_ITEM_TYPE_END,
145                 .spec = NULL,
146                 .last = NULL,
147                 .mask = NULL,
148         }
149 };
150
151 const struct rte_flow_attr flow_attr_8023ad = {
152         .group = 0,
153         .priority = 0,
154         .ingress = 1,
155         .egress = 0,
156         .reserved = 0,
157 };
158
159 int
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161                 uint16_t slave_port) {
162         struct rte_eth_dev_info slave_info;
163         struct rte_flow_error error;
164         struct bond_dev_private *internals = bond_dev->data->dev_private;
165
166         const struct rte_flow_action_queue lacp_queue_conf = {
167                 .index = 0,
168         };
169
170         const struct rte_flow_action actions[] = {
171                 {
172                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173                         .conf = &lacp_queue_conf
174                 },
175                 {
176                         .type = RTE_FLOW_ACTION_TYPE_END,
177                 }
178         };
179
180         int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181                         flow_item_8023ad, actions, &error);
182         if (ret < 0) {
183                 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184                                 __func__, error.message, slave_port,
185                                 internals->mode4.dedicated_queues.rx_qid);
186                 return -1;
187         }
188
189         ret = rte_eth_dev_info_get(slave_port, &slave_info);
190         if (ret != 0) {
191                 RTE_BOND_LOG(ERR,
192                         "%s: Error during getting device (port %u) info: %s\n",
193                         __func__, slave_port, strerror(-ret));
194
195                 return ret;
196         }
197
198         if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199                         slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
200                 RTE_BOND_LOG(ERR,
201                         "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202                         __func__, slave_port);
203                 return -1;
204         }
205
206         return 0;
207 }
208
209 int
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211         struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212         struct bond_dev_private *internals = bond_dev->data->dev_private;
213         struct rte_eth_dev_info bond_info;
214         uint16_t idx;
215         int ret;
216
217         /* Verify if all slaves in bonding supports flow director and */
218         if (internals->slave_count > 0) {
219                 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
220                 if (ret != 0) {
221                         RTE_BOND_LOG(ERR,
222                                 "%s: Error during getting device (port %u) info: %s\n",
223                                 __func__, bond_dev->data->port_id,
224                                 strerror(-ret));
225
226                         return ret;
227                 }
228
229                 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230                 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
231
232                 for (idx = 0; idx < internals->slave_count; idx++) {
233                         if (bond_ethdev_8023ad_flow_verify(bond_dev,
234                                         internals->slaves[idx].port_id) != 0)
235                                 return -1;
236                 }
237         }
238
239         return 0;
240 }
241
242 int
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
244
245         struct rte_flow_error error;
246         struct bond_dev_private *internals = bond_dev->data->dev_private;
247         struct rte_flow_action_queue lacp_queue_conf = {
248                 .index = internals->mode4.dedicated_queues.rx_qid,
249         };
250
251         const struct rte_flow_action actions[] = {
252                 {
253                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254                         .conf = &lacp_queue_conf
255                 },
256                 {
257                         .type = RTE_FLOW_ACTION_TYPE_END,
258                 }
259         };
260
261         internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262                         &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263         if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264                 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265                                 "(slave_port=%d queue_id=%d)",
266                                 error.message, slave_port,
267                                 internals->mode4.dedicated_queues.rx_qid);
268                 return -1;
269         }
270
271         return 0;
272 }
273
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
276                 bool dedicated_rxq)
277 {
278         /* Cast to structure, containing bonded device's port id and queue id */
279         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280         struct bond_dev_private *internals = bd_rx_q->dev_private;
281         struct rte_eth_dev *bonded_eth_dev =
282                                         &rte_eth_devices[internals->port_id];
283         struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284         struct rte_ether_hdr *hdr;
285
286         const uint16_t ether_type_slow_be =
287                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288         uint16_t num_rx_total = 0;      /* Total number of received packets */
289         uint16_t slaves[RTE_MAX_ETHPORTS];
290         uint16_t slave_count, idx;
291
292         uint8_t collecting;  /* current slave collecting status */
293         const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294         const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
295         uint8_t subtype;
296         uint16_t i;
297         uint16_t j;
298         uint16_t k;
299
300         /* Copy slave list to protect against slave up/down changes during tx
301          * bursting */
302         slave_count = internals->active_slave_count;
303         memcpy(slaves, internals->active_slaves,
304                         sizeof(internals->active_slaves[0]) * slave_count);
305
306         idx = internals->active_slave;
307         if (idx >= slave_count) {
308                 internals->active_slave = 0;
309                 idx = 0;
310         }
311         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
312                 j = num_rx_total;
313                 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
314                                          COLLECTING);
315
316                 /* Read packets from this slave */
317                 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
319
320                 for (k = j; k < 2 && k < num_rx_total; k++)
321                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
322
323                 /* Handle slow protocol packets. */
324                 while (j < num_rx_total) {
325                         if (j + 3 < num_rx_total)
326                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
327
328                         hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329                         subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
330
331                         /* Remove packet from array if:
332                          * - it is slow packet but no dedicated rxq is present,
333                          * - slave is not in collecting state,
334                          * - bonding interface is not in promiscuous mode:
335                          *   - packet is unicast and address does not match,
336                          *   - packet is multicast and bonding interface
337                          *     is not in allmulti,
338                          */
339                         if (unlikely(
340                                 (!dedicated_rxq &&
341                                  is_lacp_packets(hdr->ether_type, subtype,
342                                                  bufs[j])) ||
343                                 !collecting ||
344                                 (!promisc &&
345                                  ((rte_is_unicast_ether_addr(&hdr->d_addr) &&
346                                    !rte_is_same_ether_addr(bond_mac,
347                                                        &hdr->d_addr)) ||
348                                   (!allmulti &&
349                                    rte_is_multicast_ether_addr(&hdr->d_addr)))))) {
350
351                                 if (hdr->ether_type == ether_type_slow_be) {
352                                         bond_mode_8023ad_handle_slow_pkt(
353                                             internals, slaves[idx], bufs[j]);
354                                 } else
355                                         rte_pktmbuf_free(bufs[j]);
356
357                                 /* Packet is managed by mode 4 or dropped, shift the array */
358                                 num_rx_total--;
359                                 if (j < num_rx_total) {
360                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
361                                                 (num_rx_total - j));
362                                 }
363                         } else
364                                 j++;
365                 }
366                 if (unlikely(++idx == slave_count))
367                         idx = 0;
368         }
369
370         if (++internals->active_slave >= slave_count)
371                 internals->active_slave = 0;
372
373         return num_rx_total;
374 }
375
376 static uint16_t
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
378                 uint16_t nb_pkts)
379 {
380         return rx_burst_8023ad(queue, bufs, nb_pkts, false);
381 }
382
383 static uint16_t
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
385                 uint16_t nb_pkts)
386 {
387         return rx_burst_8023ad(queue, bufs, nb_pkts, true);
388 }
389
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
393
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
395
396 static void
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
398 {
399         switch (arp_op) {
400         case RTE_ARP_OP_REQUEST:
401                 strlcpy(buf, "ARP Request", buf_len);
402                 return;
403         case RTE_ARP_OP_REPLY:
404                 strlcpy(buf, "ARP Reply", buf_len);
405                 return;
406         case RTE_ARP_OP_REVREQUEST:
407                 strlcpy(buf, "Reverse ARP Request", buf_len);
408                 return;
409         case RTE_ARP_OP_REVREPLY:
410                 strlcpy(buf, "Reverse ARP Reply", buf_len);
411                 return;
412         case RTE_ARP_OP_INVREQUEST:
413                 strlcpy(buf, "Peer Identify Request", buf_len);
414                 return;
415         case RTE_ARP_OP_INVREPLY:
416                 strlcpy(buf, "Peer Identify Reply", buf_len);
417                 return;
418         default:
419                 break;
420         }
421         strlcpy(buf, "Unknown", buf_len);
422         return;
423 }
424 #endif
425 #define MaxIPv4String   16
426 static void
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
428 {
429         uint32_t ipv4_addr;
430
431         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
434                 ipv4_addr & 0xFF);
435 }
436
437 #define MAX_CLIENTS_NUMBER      128
438 uint8_t active_clients;
439 struct client_stats_t {
440         uint16_t port;
441         uint32_t ipv4_addr;
442         uint32_t ipv4_rx_packets;
443         uint32_t ipv4_tx_packets;
444 };
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
446
447 static void
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
449 {
450         int i = 0;
451
452         for (; i < MAX_CLIENTS_NUMBER; i++)     {
453                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
454                         /* Just update RX packets number for this client */
455                         if (TXorRXindicator == &burstnumberRX)
456                                 client_stats[i].ipv4_rx_packets++;
457                         else
458                                 client_stats[i].ipv4_tx_packets++;
459                         return;
460                 }
461         }
462         /* We have a new client. Insert him to the table, and increment stats */
463         if (TXorRXindicator == &burstnumberRX)
464                 client_stats[active_clients].ipv4_rx_packets++;
465         else
466                 client_stats[active_clients].ipv4_tx_packets++;
467         client_stats[active_clients].ipv4_addr = addr;
468         client_stats[active_clients].port = port;
469         active_clients++;
470
471 }
472
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475         rte_log(RTE_LOG_DEBUG, bond_logtype,                            \
476                 "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
477                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
478                 info,                                                   \
479                 port,                                                   \
480                 eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
481                 eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
482                 eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
483                 src_ip,                                                 \
484                 eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
485                 eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
486                 eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
487                 dst_ip,                                                 \
488                 arp_op, ++burstnumber)
489 #endif
490
491 static void
492 mode6_debug(const char __attribute__((unused)) *info,
493         struct rte_ether_hdr *eth_h, uint16_t port,
494         uint32_t __attribute__((unused)) *burstnumber)
495 {
496         struct rte_ipv4_hdr *ipv4_h;
497 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
498         struct rte_arp_hdr *arp_h;
499         char dst_ip[16];
500         char ArpOp[24];
501         char buf[16];
502 #endif
503         char src_ip[16];
504
505         uint16_t ether_type = eth_h->ether_type;
506         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
507
508 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
509         strlcpy(buf, info, 16);
510 #endif
511
512         if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
513                 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
514                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
515 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
516                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
517                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
518 #endif
519                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
520         }
521 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
522         else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
523                 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
524                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
525                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
526                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
527                                 ArpOp, sizeof(ArpOp));
528                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
529         }
530 #endif
531 }
532 #endif
533
534 static uint16_t
535 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
536 {
537         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
538         struct bond_dev_private *internals = bd_tx_q->dev_private;
539         struct rte_ether_hdr *eth_h;
540         uint16_t ether_type, offset;
541         uint16_t nb_recv_pkts;
542         int i;
543
544         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
545
546         for (i = 0; i < nb_recv_pkts; i++) {
547                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
548                 ether_type = eth_h->ether_type;
549                 offset = get_vlan_offset(eth_h, &ether_type);
550
551                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
552 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
553                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
554 #endif
555                         bond_mode_alb_arp_recv(eth_h, offset, internals);
556                 }
557 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
558                 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
559                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
560 #endif
561         }
562
563         return nb_recv_pkts;
564 }
565
566 static uint16_t
567 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
568                 uint16_t nb_pkts)
569 {
570         struct bond_dev_private *internals;
571         struct bond_tx_queue *bd_tx_q;
572
573         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
574         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
575
576         uint16_t num_of_slaves;
577         uint16_t slaves[RTE_MAX_ETHPORTS];
578
579         uint16_t num_tx_total = 0, num_tx_slave;
580
581         static int slave_idx = 0;
582         int i, cslave_idx = 0, tx_fail_total = 0;
583
584         bd_tx_q = (struct bond_tx_queue *)queue;
585         internals = bd_tx_q->dev_private;
586
587         /* Copy slave list to protect against slave up/down changes during tx
588          * bursting */
589         num_of_slaves = internals->active_slave_count;
590         memcpy(slaves, internals->active_slaves,
591                         sizeof(internals->active_slaves[0]) * num_of_slaves);
592
593         if (num_of_slaves < 1)
594                 return num_tx_total;
595
596         /* Populate slaves mbuf with which packets are to be sent on it  */
597         for (i = 0; i < nb_pkts; i++) {
598                 cslave_idx = (slave_idx + i) % num_of_slaves;
599                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
600         }
601
602         /* increment current slave index so the next call to tx burst starts on the
603          * next slave */
604         slave_idx = ++cslave_idx;
605
606         /* Send packet burst on each slave device */
607         for (i = 0; i < num_of_slaves; i++) {
608                 if (slave_nb_pkts[i] > 0) {
609                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
610                                         slave_bufs[i], slave_nb_pkts[i]);
611
612                         /* if tx burst fails move packets to end of bufs */
613                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
614                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
615
616                                 tx_fail_total += tx_fail_slave;
617
618                                 memcpy(&bufs[nb_pkts - tx_fail_total],
619                                        &slave_bufs[i][num_tx_slave],
620                                        tx_fail_slave * sizeof(bufs[0]));
621                         }
622                         num_tx_total += num_tx_slave;
623                 }
624         }
625
626         return num_tx_total;
627 }
628
629 static uint16_t
630 bond_ethdev_tx_burst_active_backup(void *queue,
631                 struct rte_mbuf **bufs, uint16_t nb_pkts)
632 {
633         struct bond_dev_private *internals;
634         struct bond_tx_queue *bd_tx_q;
635
636         bd_tx_q = (struct bond_tx_queue *)queue;
637         internals = bd_tx_q->dev_private;
638
639         if (internals->active_slave_count < 1)
640                 return 0;
641
642         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
643                         bufs, nb_pkts);
644 }
645
646 static inline uint16_t
647 ether_hash(struct rte_ether_hdr *eth_hdr)
648 {
649         unaligned_uint16_t *word_src_addr =
650                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
651         unaligned_uint16_t *word_dst_addr =
652                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
653
654         return (word_src_addr[0] ^ word_dst_addr[0]) ^
655                         (word_src_addr[1] ^ word_dst_addr[1]) ^
656                         (word_src_addr[2] ^ word_dst_addr[2]);
657 }
658
659 static inline uint32_t
660 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
661 {
662         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
663 }
664
665 static inline uint32_t
666 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
667 {
668         unaligned_uint32_t *word_src_addr =
669                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
670         unaligned_uint32_t *word_dst_addr =
671                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
672
673         return (word_src_addr[0] ^ word_dst_addr[0]) ^
674                         (word_src_addr[1] ^ word_dst_addr[1]) ^
675                         (word_src_addr[2] ^ word_dst_addr[2]) ^
676                         (word_src_addr[3] ^ word_dst_addr[3]);
677 }
678
679
680 void
681 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
682                 uint16_t slave_count, uint16_t *slaves)
683 {
684         struct rte_ether_hdr *eth_hdr;
685         uint32_t hash;
686         int i;
687
688         for (i = 0; i < nb_pkts; i++) {
689                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
690
691                 hash = ether_hash(eth_hdr);
692
693                 slaves[i] = (hash ^= hash >> 8) % slave_count;
694         }
695 }
696
697 void
698 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
699                 uint16_t slave_count, uint16_t *slaves)
700 {
701         uint16_t i;
702         struct rte_ether_hdr *eth_hdr;
703         uint16_t proto;
704         size_t vlan_offset;
705         uint32_t hash, l3hash;
706
707         for (i = 0; i < nb_pkts; i++) {
708                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
709                 l3hash = 0;
710
711                 proto = eth_hdr->ether_type;
712                 hash = ether_hash(eth_hdr);
713
714                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
715
716                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
717                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
718                                         ((char *)(eth_hdr + 1) + vlan_offset);
719                         l3hash = ipv4_hash(ipv4_hdr);
720
721                 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
722                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
723                                         ((char *)(eth_hdr + 1) + vlan_offset);
724                         l3hash = ipv6_hash(ipv6_hdr);
725                 }
726
727                 hash = hash ^ l3hash;
728                 hash ^= hash >> 16;
729                 hash ^= hash >> 8;
730
731                 slaves[i] = hash % slave_count;
732         }
733 }
734
735 void
736 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
737                 uint16_t slave_count, uint16_t *slaves)
738 {
739         struct rte_ether_hdr *eth_hdr;
740         uint16_t proto;
741         size_t vlan_offset;
742         int i;
743
744         struct rte_udp_hdr *udp_hdr;
745         struct rte_tcp_hdr *tcp_hdr;
746         uint32_t hash, l3hash, l4hash;
747
748         for (i = 0; i < nb_pkts; i++) {
749                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
750                 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
751                 proto = eth_hdr->ether_type;
752                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
753                 l3hash = 0;
754                 l4hash = 0;
755
756                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
757                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
758                                         ((char *)(eth_hdr + 1) + vlan_offset);
759                         size_t ip_hdr_offset;
760
761                         l3hash = ipv4_hash(ipv4_hdr);
762
763                         /* there is no L4 header in fragmented packet */
764                         if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
765                                                                 == 0)) {
766                                 ip_hdr_offset = (ipv4_hdr->version_ihl
767                                         & RTE_IPV4_HDR_IHL_MASK) *
768                                         RTE_IPV4_IHL_MULTIPLIER;
769
770                                 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
771                                         tcp_hdr = (struct rte_tcp_hdr *)
772                                                 ((char *)ipv4_hdr +
773                                                         ip_hdr_offset);
774                                         if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
775                                                         < pkt_end)
776                                                 l4hash = HASH_L4_PORTS(tcp_hdr);
777                                 } else if (ipv4_hdr->next_proto_id ==
778                                                                 IPPROTO_UDP) {
779                                         udp_hdr = (struct rte_udp_hdr *)
780                                                 ((char *)ipv4_hdr +
781                                                         ip_hdr_offset);
782                                         if ((size_t)udp_hdr + sizeof(*udp_hdr)
783                                                         < pkt_end)
784                                                 l4hash = HASH_L4_PORTS(udp_hdr);
785                                 }
786                         }
787                 } else if  (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
788                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
789                                         ((char *)(eth_hdr + 1) + vlan_offset);
790                         l3hash = ipv6_hash(ipv6_hdr);
791
792                         if (ipv6_hdr->proto == IPPROTO_TCP) {
793                                 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
794                                 l4hash = HASH_L4_PORTS(tcp_hdr);
795                         } else if (ipv6_hdr->proto == IPPROTO_UDP) {
796                                 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
797                                 l4hash = HASH_L4_PORTS(udp_hdr);
798                         }
799                 }
800
801                 hash = l3hash ^ l4hash;
802                 hash ^= hash >> 16;
803                 hash ^= hash >> 8;
804
805                 slaves[i] = hash % slave_count;
806         }
807 }
808
809 struct bwg_slave {
810         uint64_t bwg_left_int;
811         uint64_t bwg_left_remainder;
812         uint16_t slave;
813 };
814
815 void
816 bond_tlb_activate_slave(struct bond_dev_private *internals) {
817         int i;
818
819         for (i = 0; i < internals->active_slave_count; i++) {
820                 tlb_last_obytets[internals->active_slaves[i]] = 0;
821         }
822 }
823
824 static int
825 bandwidth_cmp(const void *a, const void *b)
826 {
827         const struct bwg_slave *bwg_a = a;
828         const struct bwg_slave *bwg_b = b;
829         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
830         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
831                         (int64_t)bwg_a->bwg_left_remainder;
832         if (diff > 0)
833                 return 1;
834         else if (diff < 0)
835                 return -1;
836         else if (diff2 > 0)
837                 return 1;
838         else if (diff2 < 0)
839                 return -1;
840         else
841                 return 0;
842 }
843
844 static void
845 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
846                 struct bwg_slave *bwg_slave)
847 {
848         struct rte_eth_link link_status;
849
850         rte_eth_link_get_nowait(port_id, &link_status);
851         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
852         if (link_bwg == 0)
853                 return;
854         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
855         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
856         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
857 }
858
859 static void
860 bond_ethdev_update_tlb_slave_cb(void *arg)
861 {
862         struct bond_dev_private *internals = arg;
863         struct rte_eth_stats slave_stats;
864         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
865         uint16_t slave_count;
866         uint64_t tx_bytes;
867
868         uint8_t update_stats = 0;
869         uint16_t slave_id;
870         uint16_t i;
871
872         internals->slave_update_idx++;
873
874
875         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
876                 update_stats = 1;
877
878         for (i = 0; i < internals->active_slave_count; i++) {
879                 slave_id = internals->active_slaves[i];
880                 rte_eth_stats_get(slave_id, &slave_stats);
881                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
882                 bandwidth_left(slave_id, tx_bytes,
883                                 internals->slave_update_idx, &bwg_array[i]);
884                 bwg_array[i].slave = slave_id;
885
886                 if (update_stats) {
887                         tlb_last_obytets[slave_id] = slave_stats.obytes;
888                 }
889         }
890
891         if (update_stats == 1)
892                 internals->slave_update_idx = 0;
893
894         slave_count = i;
895         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
896         for (i = 0; i < slave_count; i++)
897                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
898
899         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
900                         (struct bond_dev_private *)internals);
901 }
902
903 static uint16_t
904 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
905 {
906         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
907         struct bond_dev_private *internals = bd_tx_q->dev_private;
908
909         struct rte_eth_dev *primary_port =
910                         &rte_eth_devices[internals->primary_port];
911         uint16_t num_tx_total = 0;
912         uint16_t i, j;
913
914         uint16_t num_of_slaves = internals->active_slave_count;
915         uint16_t slaves[RTE_MAX_ETHPORTS];
916
917         struct rte_ether_hdr *ether_hdr;
918         struct rte_ether_addr primary_slave_addr;
919         struct rte_ether_addr active_slave_addr;
920
921         if (num_of_slaves < 1)
922                 return num_tx_total;
923
924         memcpy(slaves, internals->tlb_slaves_order,
925                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
926
927
928         rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
929
930         if (nb_pkts > 3) {
931                 for (i = 0; i < 3; i++)
932                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
933         }
934
935         for (i = 0; i < num_of_slaves; i++) {
936                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
937                 for (j = num_tx_total; j < nb_pkts; j++) {
938                         if (j + 3 < nb_pkts)
939                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
940
941                         ether_hdr = rte_pktmbuf_mtod(bufs[j],
942                                                 struct rte_ether_hdr *);
943                         if (rte_is_same_ether_addr(&ether_hdr->s_addr,
944                                                         &primary_slave_addr))
945                                 rte_ether_addr_copy(&active_slave_addr,
946                                                 &ether_hdr->s_addr);
947 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
948                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
949 #endif
950                 }
951
952                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
953                                 bufs + num_tx_total, nb_pkts - num_tx_total);
954
955                 if (num_tx_total == nb_pkts)
956                         break;
957         }
958
959         return num_tx_total;
960 }
961
962 void
963 bond_tlb_disable(struct bond_dev_private *internals)
964 {
965         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
966 }
967
968 void
969 bond_tlb_enable(struct bond_dev_private *internals)
970 {
971         bond_ethdev_update_tlb_slave_cb(internals);
972 }
973
974 static uint16_t
975 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
976 {
977         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
978         struct bond_dev_private *internals = bd_tx_q->dev_private;
979
980         struct rte_ether_hdr *eth_h;
981         uint16_t ether_type, offset;
982
983         struct client_data *client_info;
984
985         /*
986          * We create transmit buffers for every slave and one additional to send
987          * through tlb. In worst case every packet will be send on one port.
988          */
989         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
990         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
991
992         /*
993          * We create separate transmit buffers for update packets as they won't
994          * be counted in num_tx_total.
995          */
996         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
997         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
998
999         struct rte_mbuf *upd_pkt;
1000         size_t pkt_size;
1001
1002         uint16_t num_send, num_not_send = 0;
1003         uint16_t num_tx_total = 0;
1004         uint16_t slave_idx;
1005
1006         int i, j;
1007
1008         /* Search tx buffer for ARP packets and forward them to alb */
1009         for (i = 0; i < nb_pkts; i++) {
1010                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1011                 ether_type = eth_h->ether_type;
1012                 offset = get_vlan_offset(eth_h, &ether_type);
1013
1014                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1015                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1016
1017                         /* Change src mac in eth header */
1018                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
1019
1020                         /* Add packet to slave tx buffer */
1021                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1022                         slave_bufs_pkts[slave_idx]++;
1023                 } else {
1024                         /* If packet is not ARP, send it with TLB policy */
1025                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1026                                         bufs[i];
1027                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1028                 }
1029         }
1030
1031         /* Update connected client ARP tables */
1032         if (internals->mode6.ntt) {
1033                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1034                         client_info = &internals->mode6.client_table[i];
1035
1036                         if (client_info->in_use) {
1037                                 /* Allocate new packet to send ARP update on current slave */
1038                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1039                                 if (upd_pkt == NULL) {
1040                                         RTE_BOND_LOG(ERR,
1041                                                      "Failed to allocate ARP packet from pool");
1042                                         continue;
1043                                 }
1044                                 pkt_size = sizeof(struct rte_ether_hdr) +
1045                                         sizeof(struct rte_arp_hdr) +
1046                                         client_info->vlan_count *
1047                                         sizeof(struct rte_vlan_hdr);
1048                                 upd_pkt->data_len = pkt_size;
1049                                 upd_pkt->pkt_len = pkt_size;
1050
1051                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1052                                                 internals);
1053
1054                                 /* Add packet to update tx buffer */
1055                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1056                                 update_bufs_pkts[slave_idx]++;
1057                         }
1058                 }
1059                 internals->mode6.ntt = 0;
1060         }
1061
1062         /* Send ARP packets on proper slaves */
1063         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1064                 if (slave_bufs_pkts[i] > 0) {
1065                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1066                                         slave_bufs[i], slave_bufs_pkts[i]);
1067                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1068                                 bufs[nb_pkts - 1 - num_not_send - j] =
1069                                                 slave_bufs[i][nb_pkts - 1 - j];
1070                         }
1071
1072                         num_tx_total += num_send;
1073                         num_not_send += slave_bufs_pkts[i] - num_send;
1074
1075 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1076         /* Print TX stats including update packets */
1077                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
1078                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1079                                                         struct rte_ether_hdr *);
1080                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1081                         }
1082 #endif
1083                 }
1084         }
1085
1086         /* Send update packets on proper slaves */
1087         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1088                 if (update_bufs_pkts[i] > 0) {
1089                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1090                                         update_bufs_pkts[i]);
1091                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
1092                                 rte_pktmbuf_free(update_bufs[i][j]);
1093                         }
1094 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1095                         for (j = 0; j < update_bufs_pkts[i]; j++) {
1096                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1097                                                         struct rte_ether_hdr *);
1098                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1099                         }
1100 #endif
1101                 }
1102         }
1103
1104         /* Send non-ARP packets using tlb policy */
1105         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1106                 num_send = bond_ethdev_tx_burst_tlb(queue,
1107                                 slave_bufs[RTE_MAX_ETHPORTS],
1108                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1109
1110                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1111                         bufs[nb_pkts - 1 - num_not_send - j] =
1112                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1113                 }
1114
1115                 num_tx_total += num_send;
1116         }
1117
1118         return num_tx_total;
1119 }
1120
1121 static inline uint16_t
1122 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1123                  uint16_t *slave_port_ids, uint16_t slave_count)
1124 {
1125         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1126         struct bond_dev_private *internals = bd_tx_q->dev_private;
1127
1128         /* Array to sort mbufs for transmission on each slave into */
1129         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1130         /* Number of mbufs for transmission on each slave */
1131         uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1132         /* Mapping array generated by hash function to map mbufs to slaves */
1133         uint16_t bufs_slave_port_idxs[nb_bufs];
1134
1135         uint16_t slave_tx_count;
1136         uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1137
1138         uint16_t i;
1139
1140         /*
1141          * Populate slaves mbuf with the packets which are to be sent on it
1142          * selecting output slave using hash based on xmit policy
1143          */
1144         internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1145                         bufs_slave_port_idxs);
1146
1147         for (i = 0; i < nb_bufs; i++) {
1148                 /* Populate slave mbuf arrays with mbufs for that slave. */
1149                 uint16_t slave_idx = bufs_slave_port_idxs[i];
1150
1151                 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1152         }
1153
1154         /* Send packet burst on each slave device */
1155         for (i = 0; i < slave_count; i++) {
1156                 if (slave_nb_bufs[i] == 0)
1157                         continue;
1158
1159                 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1160                                 bd_tx_q->queue_id, slave_bufs[i],
1161                                 slave_nb_bufs[i]);
1162
1163                 total_tx_count += slave_tx_count;
1164
1165                 /* If tx burst fails move packets to end of bufs */
1166                 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1167                         int slave_tx_fail_count = slave_nb_bufs[i] -
1168                                         slave_tx_count;
1169                         total_tx_fail_count += slave_tx_fail_count;
1170                         memcpy(&bufs[nb_bufs - total_tx_fail_count],
1171                                &slave_bufs[i][slave_tx_count],
1172                                slave_tx_fail_count * sizeof(bufs[0]));
1173                 }
1174         }
1175
1176         return total_tx_count;
1177 }
1178
1179 static uint16_t
1180 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1181                 uint16_t nb_bufs)
1182 {
1183         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1184         struct bond_dev_private *internals = bd_tx_q->dev_private;
1185
1186         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1187         uint16_t slave_count;
1188
1189         if (unlikely(nb_bufs == 0))
1190                 return 0;
1191
1192         /* Copy slave list to protect against slave up/down changes during tx
1193          * bursting
1194          */
1195         slave_count = internals->active_slave_count;
1196         if (unlikely(slave_count < 1))
1197                 return 0;
1198
1199         memcpy(slave_port_ids, internals->active_slaves,
1200                         sizeof(slave_port_ids[0]) * slave_count);
1201         return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1202                                 slave_count);
1203 }
1204
1205 static inline uint16_t
1206 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1207                 bool dedicated_txq)
1208 {
1209         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1210         struct bond_dev_private *internals = bd_tx_q->dev_private;
1211
1212         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1213         uint16_t slave_count;
1214
1215         uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1216         uint16_t dist_slave_count;
1217
1218         uint16_t slave_tx_count;
1219
1220         uint16_t i;
1221
1222         /* Copy slave list to protect against slave up/down changes during tx
1223          * bursting */
1224         slave_count = internals->active_slave_count;
1225         if (unlikely(slave_count < 1))
1226                 return 0;
1227
1228         memcpy(slave_port_ids, internals->active_slaves,
1229                         sizeof(slave_port_ids[0]) * slave_count);
1230
1231         if (dedicated_txq)
1232                 goto skip_tx_ring;
1233
1234         /* Check for LACP control packets and send if available */
1235         for (i = 0; i < slave_count; i++) {
1236                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1237                 struct rte_mbuf *ctrl_pkt = NULL;
1238
1239                 if (likely(rte_ring_empty(port->tx_ring)))
1240                         continue;
1241
1242                 if (rte_ring_dequeue(port->tx_ring,
1243                                      (void **)&ctrl_pkt) != -ENOENT) {
1244                         slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1245                                         bd_tx_q->queue_id, &ctrl_pkt, 1);
1246                         /*
1247                          * re-enqueue LAG control plane packets to buffering
1248                          * ring if transmission fails so the packet isn't lost.
1249                          */
1250                         if (slave_tx_count != 1)
1251                                 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1252                 }
1253         }
1254
1255 skip_tx_ring:
1256         if (unlikely(nb_bufs == 0))
1257                 return 0;
1258
1259         dist_slave_count = 0;
1260         for (i = 0; i < slave_count; i++) {
1261                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1262
1263                 if (ACTOR_STATE(port, DISTRIBUTING))
1264                         dist_slave_port_ids[dist_slave_count++] =
1265                                         slave_port_ids[i];
1266         }
1267
1268         if (unlikely(dist_slave_count < 1))
1269                 return 0;
1270
1271         return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1272                                 dist_slave_count);
1273 }
1274
1275 static uint16_t
1276 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1277                 uint16_t nb_bufs)
1278 {
1279         return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1280 }
1281
1282 static uint16_t
1283 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1284                 uint16_t nb_bufs)
1285 {
1286         return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1287 }
1288
1289 static uint16_t
1290 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1291                 uint16_t nb_pkts)
1292 {
1293         struct bond_dev_private *internals;
1294         struct bond_tx_queue *bd_tx_q;
1295
1296         uint16_t slaves[RTE_MAX_ETHPORTS];
1297         uint8_t tx_failed_flag = 0;
1298         uint16_t num_of_slaves;
1299
1300         uint16_t max_nb_of_tx_pkts = 0;
1301
1302         int slave_tx_total[RTE_MAX_ETHPORTS];
1303         int i, most_successful_tx_slave = -1;
1304
1305         bd_tx_q = (struct bond_tx_queue *)queue;
1306         internals = bd_tx_q->dev_private;
1307
1308         /* Copy slave list to protect against slave up/down changes during tx
1309          * bursting */
1310         num_of_slaves = internals->active_slave_count;
1311         memcpy(slaves, internals->active_slaves,
1312                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1313
1314         if (num_of_slaves < 1)
1315                 return 0;
1316
1317         /* Increment reference count on mbufs */
1318         for (i = 0; i < nb_pkts; i++)
1319                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1320
1321         /* Transmit burst on each active slave */
1322         for (i = 0; i < num_of_slaves; i++) {
1323                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1324                                         bufs, nb_pkts);
1325
1326                 if (unlikely(slave_tx_total[i] < nb_pkts))
1327                         tx_failed_flag = 1;
1328
1329                 /* record the value and slave index for the slave which transmits the
1330                  * maximum number of packets */
1331                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1332                         max_nb_of_tx_pkts = slave_tx_total[i];
1333                         most_successful_tx_slave = i;
1334                 }
1335         }
1336
1337         /* if slaves fail to transmit packets from burst, the calling application
1338          * is not expected to know about multiple references to packets so we must
1339          * handle failures of all packets except those of the most successful slave
1340          */
1341         if (unlikely(tx_failed_flag))
1342                 for (i = 0; i < num_of_slaves; i++)
1343                         if (i != most_successful_tx_slave)
1344                                 while (slave_tx_total[i] < nb_pkts)
1345                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1346
1347         return max_nb_of_tx_pkts;
1348 }
1349
1350 static void
1351 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1352 {
1353         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1354
1355         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1356                 /**
1357                  * If in mode 4 then save the link properties of the first
1358                  * slave, all subsequent slaves must match these properties
1359                  */
1360                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1361
1362                 bond_link->link_autoneg = slave_link->link_autoneg;
1363                 bond_link->link_duplex = slave_link->link_duplex;
1364                 bond_link->link_speed = slave_link->link_speed;
1365         } else {
1366                 /**
1367                  * In any other mode the link properties are set to default
1368                  * values of AUTONEG/DUPLEX
1369                  */
1370                 ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
1371                 ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1372         }
1373 }
1374
1375 static int
1376 link_properties_valid(struct rte_eth_dev *ethdev,
1377                 struct rte_eth_link *slave_link)
1378 {
1379         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1380
1381         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1382                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1383
1384                 if (bond_link->link_duplex != slave_link->link_duplex ||
1385                         bond_link->link_autoneg != slave_link->link_autoneg ||
1386                         bond_link->link_speed != slave_link->link_speed)
1387                         return -1;
1388         }
1389
1390         return 0;
1391 }
1392
1393 int
1394 mac_address_get(struct rte_eth_dev *eth_dev,
1395                 struct rte_ether_addr *dst_mac_addr)
1396 {
1397         struct rte_ether_addr *mac_addr;
1398
1399         if (eth_dev == NULL) {
1400                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1401                 return -1;
1402         }
1403
1404         if (dst_mac_addr == NULL) {
1405                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1406                 return -1;
1407         }
1408
1409         mac_addr = eth_dev->data->mac_addrs;
1410
1411         rte_ether_addr_copy(mac_addr, dst_mac_addr);
1412         return 0;
1413 }
1414
1415 int
1416 mac_address_set(struct rte_eth_dev *eth_dev,
1417                 struct rte_ether_addr *new_mac_addr)
1418 {
1419         struct rte_ether_addr *mac_addr;
1420
1421         if (eth_dev == NULL) {
1422                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1423                 return -1;
1424         }
1425
1426         if (new_mac_addr == NULL) {
1427                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1428                 return -1;
1429         }
1430
1431         mac_addr = eth_dev->data->mac_addrs;
1432
1433         /* If new MAC is different to current MAC then update */
1434         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1435                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1436
1437         return 0;
1438 }
1439
1440 static const struct rte_ether_addr null_mac_addr;
1441
1442 /*
1443  * Add additional MAC addresses to the slave
1444  */
1445 int
1446 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1447                 uint16_t slave_port_id)
1448 {
1449         int i, ret;
1450         struct rte_ether_addr *mac_addr;
1451
1452         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1453                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1454                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1455                         break;
1456
1457                 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1458                 if (ret < 0) {
1459                         /* rollback */
1460                         for (i--; i > 0; i--)
1461                                 rte_eth_dev_mac_addr_remove(slave_port_id,
1462                                         &bonded_eth_dev->data->mac_addrs[i]);
1463                         return ret;
1464                 }
1465         }
1466
1467         return 0;
1468 }
1469
1470 /*
1471  * Remove additional MAC addresses from the slave
1472  */
1473 int
1474 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1475                 uint16_t slave_port_id)
1476 {
1477         int i, rc, ret;
1478         struct rte_ether_addr *mac_addr;
1479
1480         rc = 0;
1481         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1482                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1483                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1484                         break;
1485
1486                 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1487                 /* save only the first error */
1488                 if (ret < 0 && rc == 0)
1489                         rc = ret;
1490         }
1491
1492         return rc;
1493 }
1494
1495 int
1496 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1497 {
1498         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1499         int i;
1500
1501         /* Update slave devices MAC addresses */
1502         if (internals->slave_count < 1)
1503                 return -1;
1504
1505         switch (internals->mode) {
1506         case BONDING_MODE_ROUND_ROBIN:
1507         case BONDING_MODE_BALANCE:
1508         case BONDING_MODE_BROADCAST:
1509                 for (i = 0; i < internals->slave_count; i++) {
1510                         if (rte_eth_dev_default_mac_addr_set(
1511                                         internals->slaves[i].port_id,
1512                                         bonded_eth_dev->data->mac_addrs)) {
1513                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1514                                                 internals->slaves[i].port_id);
1515                                 return -1;
1516                         }
1517                 }
1518                 break;
1519         case BONDING_MODE_8023AD:
1520                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1521                 break;
1522         case BONDING_MODE_ACTIVE_BACKUP:
1523         case BONDING_MODE_TLB:
1524         case BONDING_MODE_ALB:
1525         default:
1526                 for (i = 0; i < internals->slave_count; i++) {
1527                         if (internals->slaves[i].port_id ==
1528                                         internals->current_primary_port) {
1529                                 if (rte_eth_dev_default_mac_addr_set(
1530                                                 internals->primary_port,
1531                                                 bonded_eth_dev->data->mac_addrs)) {
1532                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1533                                                         internals->current_primary_port);
1534                                         return -1;
1535                                 }
1536                         } else {
1537                                 if (rte_eth_dev_default_mac_addr_set(
1538                                                 internals->slaves[i].port_id,
1539                                                 &internals->slaves[i].persisted_mac_addr)) {
1540                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1541                                                         internals->slaves[i].port_id);
1542                                         return -1;
1543                                 }
1544                         }
1545                 }
1546         }
1547
1548         return 0;
1549 }
1550
1551 int
1552 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1553 {
1554         struct bond_dev_private *internals;
1555
1556         internals = eth_dev->data->dev_private;
1557
1558         switch (mode) {
1559         case BONDING_MODE_ROUND_ROBIN:
1560                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1561                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1562                 break;
1563         case BONDING_MODE_ACTIVE_BACKUP:
1564                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1565                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1566                 break;
1567         case BONDING_MODE_BALANCE:
1568                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1569                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1570                 break;
1571         case BONDING_MODE_BROADCAST:
1572                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1573                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1574                 break;
1575         case BONDING_MODE_8023AD:
1576                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1577                         return -1;
1578
1579                 if (internals->mode4.dedicated_queues.enabled == 0) {
1580                         eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1581                         eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1582                         RTE_BOND_LOG(WARNING,
1583                                 "Using mode 4, it is necessary to do TX burst "
1584                                 "and RX burst at least every 100ms.");
1585                 } else {
1586                         /* Use flow director's optimization */
1587                         eth_dev->rx_pkt_burst =
1588                                         bond_ethdev_rx_burst_8023ad_fast_queue;
1589                         eth_dev->tx_pkt_burst =
1590                                         bond_ethdev_tx_burst_8023ad_fast_queue;
1591                 }
1592                 break;
1593         case BONDING_MODE_TLB:
1594                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1595                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1596                 break;
1597         case BONDING_MODE_ALB:
1598                 if (bond_mode_alb_enable(eth_dev) != 0)
1599                         return -1;
1600
1601                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1602                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1603                 break;
1604         default:
1605                 return -1;
1606         }
1607
1608         internals->mode = mode;
1609
1610         return 0;
1611 }
1612
1613
1614 static int
1615 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1616                 struct rte_eth_dev *slave_eth_dev)
1617 {
1618         int errval = 0;
1619         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1620         struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1621
1622         if (port->slow_pool == NULL) {
1623                 char mem_name[256];
1624                 int slave_id = slave_eth_dev->data->port_id;
1625
1626                 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1627                                 slave_id);
1628                 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1629                         250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1630                         slave_eth_dev->data->numa_node);
1631
1632                 /* Any memory allocation failure in initialization is critical because
1633                  * resources can't be free, so reinitialization is impossible. */
1634                 if (port->slow_pool == NULL) {
1635                         rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1636                                 slave_id, mem_name, rte_strerror(rte_errno));
1637                 }
1638         }
1639
1640         if (internals->mode4.dedicated_queues.enabled == 1) {
1641                 /* Configure slow Rx queue */
1642
1643                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1644                                 internals->mode4.dedicated_queues.rx_qid, 128,
1645                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1646                                 NULL, port->slow_pool);
1647                 if (errval != 0) {
1648                         RTE_BOND_LOG(ERR,
1649                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1650                                         slave_eth_dev->data->port_id,
1651                                         internals->mode4.dedicated_queues.rx_qid,
1652                                         errval);
1653                         return errval;
1654                 }
1655
1656                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1657                                 internals->mode4.dedicated_queues.tx_qid, 512,
1658                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1659                                 NULL);
1660                 if (errval != 0) {
1661                         RTE_BOND_LOG(ERR,
1662                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1663                                 slave_eth_dev->data->port_id,
1664                                 internals->mode4.dedicated_queues.tx_qid,
1665                                 errval);
1666                         return errval;
1667                 }
1668         }
1669         return 0;
1670 }
1671
1672 int
1673 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1674                 struct rte_eth_dev *slave_eth_dev)
1675 {
1676         struct bond_rx_queue *bd_rx_q;
1677         struct bond_tx_queue *bd_tx_q;
1678         uint16_t nb_rx_queues;
1679         uint16_t nb_tx_queues;
1680
1681         int errval;
1682         uint16_t q_id;
1683         struct rte_flow_error flow_error;
1684
1685         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1686
1687         /* Stop slave */
1688         rte_eth_dev_stop(slave_eth_dev->data->port_id);
1689
1690         /* Enable interrupts on slave device if supported */
1691         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1692                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1693
1694         /* If RSS is enabled for bonding, try to enable it for slaves  */
1695         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1696                 if (internals->rss_key_len != 0) {
1697                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1698                                         internals->rss_key_len;
1699                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1700                                         internals->rss_key;
1701                 } else {
1702                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1703                 }
1704
1705                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1706                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1707                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1708                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1709         }
1710
1711         if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
1712                         DEV_RX_OFFLOAD_VLAN_FILTER)
1713                 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1714                                 DEV_RX_OFFLOAD_VLAN_FILTER;
1715         else
1716                 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1717                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
1718
1719         nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1720         nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1721
1722         if (internals->mode == BONDING_MODE_8023AD) {
1723                 if (internals->mode4.dedicated_queues.enabled == 1) {
1724                         nb_rx_queues++;
1725                         nb_tx_queues++;
1726                 }
1727         }
1728
1729         errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1730                                      bonded_eth_dev->data->mtu);
1731         if (errval != 0 && errval != -ENOTSUP) {
1732                 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1733                                 slave_eth_dev->data->port_id, errval);
1734                 return errval;
1735         }
1736
1737         /* Configure device */
1738         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1739                         nb_rx_queues, nb_tx_queues,
1740                         &(slave_eth_dev->data->dev_conf));
1741         if (errval != 0) {
1742                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1743                                 slave_eth_dev->data->port_id, errval);
1744                 return errval;
1745         }
1746
1747         /* Setup Rx Queues */
1748         for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1749                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1750
1751                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1752                                 bd_rx_q->nb_rx_desc,
1753                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1754                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1755                 if (errval != 0) {
1756                         RTE_BOND_LOG(ERR,
1757                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1758                                         slave_eth_dev->data->port_id, q_id, errval);
1759                         return errval;
1760                 }
1761         }
1762
1763         /* Setup Tx Queues */
1764         for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1765                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1766
1767                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1768                                 bd_tx_q->nb_tx_desc,
1769                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1770                                 &bd_tx_q->tx_conf);
1771                 if (errval != 0) {
1772                         RTE_BOND_LOG(ERR,
1773                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1774                                 slave_eth_dev->data->port_id, q_id, errval);
1775                         return errval;
1776                 }
1777         }
1778
1779         if (internals->mode == BONDING_MODE_8023AD &&
1780                         internals->mode4.dedicated_queues.enabled == 1) {
1781                 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1782                                 != 0)
1783                         return errval;
1784
1785                 if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1786                                 slave_eth_dev->data->port_id) != 0) {
1787                         RTE_BOND_LOG(ERR,
1788                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1789                                 slave_eth_dev->data->port_id, q_id, errval);
1790                         return -1;
1791                 }
1792
1793                 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1794                         rte_flow_destroy(slave_eth_dev->data->port_id,
1795                                         internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1796                                         &flow_error);
1797
1798                 bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1799                                 slave_eth_dev->data->port_id);
1800         }
1801
1802         /* Start device */
1803         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1804         if (errval != 0) {
1805                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1806                                 slave_eth_dev->data->port_id, errval);
1807                 return -1;
1808         }
1809
1810         /* If RSS is enabled for bonding, synchronize RETA */
1811         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1812                 int i;
1813                 struct bond_dev_private *internals;
1814
1815                 internals = bonded_eth_dev->data->dev_private;
1816
1817                 for (i = 0; i < internals->slave_count; i++) {
1818                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1819                                 errval = rte_eth_dev_rss_reta_update(
1820                                                 slave_eth_dev->data->port_id,
1821                                                 &internals->reta_conf[0],
1822                                                 internals->slaves[i].reta_size);
1823                                 if (errval != 0) {
1824                                         RTE_BOND_LOG(WARNING,
1825                                                      "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1826                                                      " RSS Configuration for bonding may be inconsistent.",
1827                                                      slave_eth_dev->data->port_id, errval);
1828                                 }
1829                                 break;
1830                         }
1831                 }
1832         }
1833
1834         /* If lsc interrupt is set, check initial slave's link status */
1835         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1836                 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1837                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1838                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1839                         NULL);
1840         }
1841
1842         return 0;
1843 }
1844
1845 void
1846 slave_remove(struct bond_dev_private *internals,
1847                 struct rte_eth_dev *slave_eth_dev)
1848 {
1849         uint16_t i;
1850
1851         for (i = 0; i < internals->slave_count; i++)
1852                 if (internals->slaves[i].port_id ==
1853                                 slave_eth_dev->data->port_id)
1854                         break;
1855
1856         if (i < (internals->slave_count - 1)) {
1857                 struct rte_flow *flow;
1858
1859                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1860                                 sizeof(internals->slaves[0]) *
1861                                 (internals->slave_count - i - 1));
1862                 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1863                         memmove(&flow->flows[i], &flow->flows[i + 1],
1864                                 sizeof(flow->flows[0]) *
1865                                 (internals->slave_count - i - 1));
1866                         flow->flows[internals->slave_count - 1] = NULL;
1867                 }
1868         }
1869
1870         internals->slave_count--;
1871
1872         /* force reconfiguration of slave interfaces */
1873         _rte_eth_dev_reset(slave_eth_dev);
1874 }
1875
1876 static void
1877 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1878
1879 void
1880 slave_add(struct bond_dev_private *internals,
1881                 struct rte_eth_dev *slave_eth_dev)
1882 {
1883         struct bond_slave_details *slave_details =
1884                         &internals->slaves[internals->slave_count];
1885
1886         slave_details->port_id = slave_eth_dev->data->port_id;
1887         slave_details->last_link_status = 0;
1888
1889         /* Mark slave devices that don't support interrupts so we can
1890          * compensate when we start the bond
1891          */
1892         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1893                 slave_details->link_status_poll_enabled = 1;
1894         }
1895
1896         slave_details->link_status_wait_to_complete = 0;
1897         /* clean tlb_last_obytes when adding port for bonding device */
1898         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1899                         sizeof(struct rte_ether_addr));
1900 }
1901
1902 void
1903 bond_ethdev_primary_set(struct bond_dev_private *internals,
1904                 uint16_t slave_port_id)
1905 {
1906         int i;
1907
1908         if (internals->active_slave_count < 1)
1909                 internals->current_primary_port = slave_port_id;
1910         else
1911                 /* Search bonded device slave ports for new proposed primary port */
1912                 for (i = 0; i < internals->active_slave_count; i++) {
1913                         if (internals->active_slaves[i] == slave_port_id)
1914                                 internals->current_primary_port = slave_port_id;
1915                 }
1916 }
1917
1918 static void
1919 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1920
1921 static int
1922 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1923 {
1924         struct bond_dev_private *internals;
1925         int i;
1926
1927         /* slave eth dev will be started by bonded device */
1928         if (check_for_bonded_ethdev(eth_dev)) {
1929                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1930                                 eth_dev->data->port_id);
1931                 return -1;
1932         }
1933
1934         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1935         eth_dev->data->dev_started = 1;
1936
1937         internals = eth_dev->data->dev_private;
1938
1939         if (internals->slave_count == 0) {
1940                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1941                 goto out_err;
1942         }
1943
1944         if (internals->user_defined_mac == 0) {
1945                 struct rte_ether_addr *new_mac_addr = NULL;
1946
1947                 for (i = 0; i < internals->slave_count; i++)
1948                         if (internals->slaves[i].port_id == internals->primary_port)
1949                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1950
1951                 if (new_mac_addr == NULL)
1952                         goto out_err;
1953
1954                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1955                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1956                                         eth_dev->data->port_id);
1957                         goto out_err;
1958                 }
1959         }
1960
1961         if (internals->mode == BONDING_MODE_8023AD) {
1962                 if (internals->mode4.dedicated_queues.enabled == 1) {
1963                         internals->mode4.dedicated_queues.rx_qid =
1964                                         eth_dev->data->nb_rx_queues;
1965                         internals->mode4.dedicated_queues.tx_qid =
1966                                         eth_dev->data->nb_tx_queues;
1967                 }
1968         }
1969
1970
1971         /* Reconfigure each slave device if starting bonded device */
1972         for (i = 0; i < internals->slave_count; i++) {
1973                 struct rte_eth_dev *slave_ethdev =
1974                                 &(rte_eth_devices[internals->slaves[i].port_id]);
1975                 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1976                         RTE_BOND_LOG(ERR,
1977                                 "bonded port (%d) failed to reconfigure slave device (%d)",
1978                                 eth_dev->data->port_id,
1979                                 internals->slaves[i].port_id);
1980                         goto out_err;
1981                 }
1982                 /* We will need to poll for link status if any slave doesn't
1983                  * support interrupts
1984                  */
1985                 if (internals->slaves[i].link_status_poll_enabled)
1986                         internals->link_status_polling_enabled = 1;
1987         }
1988
1989         /* start polling if needed */
1990         if (internals->link_status_polling_enabled) {
1991                 rte_eal_alarm_set(
1992                         internals->link_status_polling_interval_ms * 1000,
1993                         bond_ethdev_slave_link_status_change_monitor,
1994                         (void *)&rte_eth_devices[internals->port_id]);
1995         }
1996
1997         /* Update all slave devices MACs*/
1998         if (mac_address_slaves_update(eth_dev) != 0)
1999                 goto out_err;
2000
2001         if (internals->user_defined_primary_port)
2002                 bond_ethdev_primary_set(internals, internals->primary_port);
2003
2004         if (internals->mode == BONDING_MODE_8023AD)
2005                 bond_mode_8023ad_start(eth_dev);
2006
2007         if (internals->mode == BONDING_MODE_TLB ||
2008                         internals->mode == BONDING_MODE_ALB)
2009                 bond_tlb_enable(internals);
2010
2011         return 0;
2012
2013 out_err:
2014         eth_dev->data->dev_started = 0;
2015         return -1;
2016 }
2017
2018 static void
2019 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2020 {
2021         uint16_t i;
2022
2023         if (dev->data->rx_queues != NULL) {
2024                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2025                         rte_free(dev->data->rx_queues[i]);
2026                         dev->data->rx_queues[i] = NULL;
2027                 }
2028                 dev->data->nb_rx_queues = 0;
2029         }
2030
2031         if (dev->data->tx_queues != NULL) {
2032                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2033                         rte_free(dev->data->tx_queues[i]);
2034                         dev->data->tx_queues[i] = NULL;
2035                 }
2036                 dev->data->nb_tx_queues = 0;
2037         }
2038 }
2039
2040 void
2041 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2042 {
2043         struct bond_dev_private *internals = eth_dev->data->dev_private;
2044         uint16_t i;
2045
2046         if (internals->mode == BONDING_MODE_8023AD) {
2047                 struct port *port;
2048                 void *pkt = NULL;
2049
2050                 bond_mode_8023ad_stop(eth_dev);
2051
2052                 /* Discard all messages to/from mode 4 state machines */
2053                 for (i = 0; i < internals->active_slave_count; i++) {
2054                         port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2055
2056                         RTE_ASSERT(port->rx_ring != NULL);
2057                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2058                                 rte_pktmbuf_free(pkt);
2059
2060                         RTE_ASSERT(port->tx_ring != NULL);
2061                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2062                                 rte_pktmbuf_free(pkt);
2063                 }
2064         }
2065
2066         if (internals->mode == BONDING_MODE_TLB ||
2067                         internals->mode == BONDING_MODE_ALB) {
2068                 bond_tlb_disable(internals);
2069                 for (i = 0; i < internals->active_slave_count; i++)
2070                         tlb_last_obytets[internals->active_slaves[i]] = 0;
2071         }
2072
2073         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2074         eth_dev->data->dev_started = 0;
2075
2076         internals->link_status_polling_enabled = 0;
2077         for (i = 0; i < internals->slave_count; i++) {
2078                 uint16_t slave_id = internals->slaves[i].port_id;
2079                 if (find_slave_by_id(internals->active_slaves,
2080                                 internals->active_slave_count, slave_id) !=
2081                                                 internals->active_slave_count) {
2082                         internals->slaves[i].last_link_status = 0;
2083                         rte_eth_dev_stop(slave_id);
2084                         deactivate_slave(eth_dev, slave_id);
2085                 }
2086         }
2087 }
2088
2089 void
2090 bond_ethdev_close(struct rte_eth_dev *dev)
2091 {
2092         struct bond_dev_private *internals = dev->data->dev_private;
2093         uint16_t bond_port_id = internals->port_id;
2094         int skipped = 0;
2095         struct rte_flow_error ferror;
2096
2097         RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2098         while (internals->slave_count != skipped) {
2099                 uint16_t port_id = internals->slaves[skipped].port_id;
2100
2101                 rte_eth_dev_stop(port_id);
2102
2103                 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2104                         RTE_BOND_LOG(ERR,
2105                                      "Failed to remove port %d from bonded device %s",
2106                                      port_id, dev->device->name);
2107                         skipped++;
2108                 }
2109         }
2110         bond_flow_ops.flush(dev, &ferror);
2111         bond_ethdev_free_queues(dev);
2112         rte_bitmap_reset(internals->vlan_filter_bmp);
2113 }
2114
2115 /* forward declaration */
2116 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2117
2118 static void
2119 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2120 {
2121         struct bond_dev_private *internals = dev->data->dev_private;
2122         struct bond_slave_details slave;
2123         int ret;
2124
2125         uint16_t max_nb_rx_queues = UINT16_MAX;
2126         uint16_t max_nb_tx_queues = UINT16_MAX;
2127         uint16_t max_rx_desc_lim = UINT16_MAX;
2128         uint16_t max_tx_desc_lim = UINT16_MAX;
2129
2130         dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2131
2132         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2133                         internals->candidate_max_rx_pktlen :
2134                         RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2135
2136         /* Max number of tx/rx queues that the bonded device can support is the
2137          * minimum values of the bonded slaves, as all slaves must be capable
2138          * of supporting the same number of tx/rx queues.
2139          */
2140         if (internals->slave_count > 0) {
2141                 struct rte_eth_dev_info slave_info;
2142                 uint16_t idx;
2143
2144                 for (idx = 0; idx < internals->slave_count; idx++) {
2145                         slave = internals->slaves[idx];
2146                         ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2147                         if (ret != 0) {
2148                                 RTE_BOND_LOG(ERR,
2149                                         "%s: Error during getting device (port %u) info: %s\n",
2150                                         __func__,
2151                                         slave.port_id,
2152                                         strerror(-ret));
2153
2154                                 return;
2155                         }
2156
2157                         if (slave_info.max_rx_queues < max_nb_rx_queues)
2158                                 max_nb_rx_queues = slave_info.max_rx_queues;
2159
2160                         if (slave_info.max_tx_queues < max_nb_tx_queues)
2161                                 max_nb_tx_queues = slave_info.max_tx_queues;
2162
2163                         if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2164                                 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2165
2166                         if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2167                                 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2168                 }
2169         }
2170
2171         dev_info->max_rx_queues = max_nb_rx_queues;
2172         dev_info->max_tx_queues = max_nb_tx_queues;
2173
2174         memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2175                sizeof(dev_info->default_rxconf));
2176         memcpy(&dev_info->default_txconf, &internals->default_txconf,
2177                sizeof(dev_info->default_txconf));
2178
2179         dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2180         dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2181
2182         /**
2183          * If dedicated hw queues enabled for link bonding device in LACP mode
2184          * then we need to reduce the maximum number of data path queues by 1.
2185          */
2186         if (internals->mode == BONDING_MODE_8023AD &&
2187                 internals->mode4.dedicated_queues.enabled == 1) {
2188                 dev_info->max_rx_queues--;
2189                 dev_info->max_tx_queues--;
2190         }
2191
2192         dev_info->min_rx_bufsize = 0;
2193
2194         dev_info->rx_offload_capa = internals->rx_offload_capa;
2195         dev_info->tx_offload_capa = internals->tx_offload_capa;
2196         dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2197         dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2198         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2199
2200         dev_info->reta_size = internals->reta_size;
2201 }
2202
2203 static int
2204 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2205 {
2206         int res;
2207         uint16_t i;
2208         struct bond_dev_private *internals = dev->data->dev_private;
2209
2210         /* don't do this while a slave is being added */
2211         rte_spinlock_lock(&internals->lock);
2212
2213         if (on)
2214                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2215         else
2216                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2217
2218         for (i = 0; i < internals->slave_count; i++) {
2219                 uint16_t port_id = internals->slaves[i].port_id;
2220
2221                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2222                 if (res == ENOTSUP)
2223                         RTE_BOND_LOG(WARNING,
2224                                      "Setting VLAN filter on slave port %u not supported.",
2225                                      port_id);
2226         }
2227
2228         rte_spinlock_unlock(&internals->lock);
2229         return 0;
2230 }
2231
2232 static int
2233 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2234                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2235                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2236 {
2237         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2238                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2239                                         0, dev->data->numa_node);
2240         if (bd_rx_q == NULL)
2241                 return -1;
2242
2243         bd_rx_q->queue_id = rx_queue_id;
2244         bd_rx_q->dev_private = dev->data->dev_private;
2245
2246         bd_rx_q->nb_rx_desc = nb_rx_desc;
2247
2248         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2249         bd_rx_q->mb_pool = mb_pool;
2250
2251         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2252
2253         return 0;
2254 }
2255
2256 static int
2257 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2258                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2259                 const struct rte_eth_txconf *tx_conf)
2260 {
2261         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
2262                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2263                                         0, dev->data->numa_node);
2264
2265         if (bd_tx_q == NULL)
2266                 return -1;
2267
2268         bd_tx_q->queue_id = tx_queue_id;
2269         bd_tx_q->dev_private = dev->data->dev_private;
2270
2271         bd_tx_q->nb_tx_desc = nb_tx_desc;
2272         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2273
2274         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2275
2276         return 0;
2277 }
2278
2279 static void
2280 bond_ethdev_rx_queue_release(void *queue)
2281 {
2282         if (queue == NULL)
2283                 return;
2284
2285         rte_free(queue);
2286 }
2287
2288 static void
2289 bond_ethdev_tx_queue_release(void *queue)
2290 {
2291         if (queue == NULL)
2292                 return;
2293
2294         rte_free(queue);
2295 }
2296
2297 static void
2298 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2299 {
2300         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2301         struct bond_dev_private *internals;
2302
2303         /* Default value for polling slave found is true as we don't want to
2304          * disable the polling thread if we cannot get the lock */
2305         int i, polling_slave_found = 1;
2306
2307         if (cb_arg == NULL)
2308                 return;
2309
2310         bonded_ethdev = cb_arg;
2311         internals = bonded_ethdev->data->dev_private;
2312
2313         if (!bonded_ethdev->data->dev_started ||
2314                 !internals->link_status_polling_enabled)
2315                 return;
2316
2317         /* If device is currently being configured then don't check slaves link
2318          * status, wait until next period */
2319         if (rte_spinlock_trylock(&internals->lock)) {
2320                 if (internals->slave_count > 0)
2321                         polling_slave_found = 0;
2322
2323                 for (i = 0; i < internals->slave_count; i++) {
2324                         if (!internals->slaves[i].link_status_poll_enabled)
2325                                 continue;
2326
2327                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2328                         polling_slave_found = 1;
2329
2330                         /* Update slave link status */
2331                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2332                                         internals->slaves[i].link_status_wait_to_complete);
2333
2334                         /* if link status has changed since last checked then call lsc
2335                          * event callback */
2336                         if (slave_ethdev->data->dev_link.link_status !=
2337                                         internals->slaves[i].last_link_status) {
2338                                 internals->slaves[i].last_link_status =
2339                                                 slave_ethdev->data->dev_link.link_status;
2340
2341                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2342                                                 RTE_ETH_EVENT_INTR_LSC,
2343                                                 &bonded_ethdev->data->port_id,
2344                                                 NULL);
2345                         }
2346                 }
2347                 rte_spinlock_unlock(&internals->lock);
2348         }
2349
2350         if (polling_slave_found)
2351                 /* Set alarm to continue monitoring link status of slave ethdev's */
2352                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2353                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2354 }
2355
2356 static int
2357 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2358 {
2359         void (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2360
2361         struct bond_dev_private *bond_ctx;
2362         struct rte_eth_link slave_link;
2363
2364         uint32_t idx;
2365
2366         bond_ctx = ethdev->data->dev_private;
2367
2368         ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2369
2370         if (ethdev->data->dev_started == 0 ||
2371                         bond_ctx->active_slave_count == 0) {
2372                 ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
2373                 return 0;
2374         }
2375
2376         ethdev->data->dev_link.link_status = ETH_LINK_UP;
2377
2378         if (wait_to_complete)
2379                 link_update = rte_eth_link_get;
2380         else
2381                 link_update = rte_eth_link_get_nowait;
2382
2383         switch (bond_ctx->mode) {
2384         case BONDING_MODE_BROADCAST:
2385                 /**
2386                  * Setting link speed to UINT32_MAX to ensure we pick up the
2387                  * value of the first active slave
2388                  */
2389                 ethdev->data->dev_link.link_speed = UINT32_MAX;
2390
2391                 /**
2392                  * link speed is minimum value of all the slaves link speed as
2393                  * packet loss will occur on this slave if transmission at rates
2394                  * greater than this are attempted
2395                  */
2396                 for (idx = 1; idx < bond_ctx->active_slave_count; idx++) {
2397                         link_update(bond_ctx->active_slaves[0], &slave_link);
2398
2399                         if (slave_link.link_speed <
2400                                         ethdev->data->dev_link.link_speed)
2401                                 ethdev->data->dev_link.link_speed =
2402                                                 slave_link.link_speed;
2403                 }
2404                 break;
2405         case BONDING_MODE_ACTIVE_BACKUP:
2406                 /* Current primary slave */
2407                 link_update(bond_ctx->current_primary_port, &slave_link);
2408
2409                 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2410                 break;
2411         case BONDING_MODE_8023AD:
2412                 ethdev->data->dev_link.link_autoneg =
2413                                 bond_ctx->mode4.slave_link.link_autoneg;
2414                 ethdev->data->dev_link.link_duplex =
2415                                 bond_ctx->mode4.slave_link.link_duplex;
2416                 /* fall through to update link speed */
2417         case BONDING_MODE_ROUND_ROBIN:
2418         case BONDING_MODE_BALANCE:
2419         case BONDING_MODE_TLB:
2420         case BONDING_MODE_ALB:
2421         default:
2422                 /**
2423                  * In theses mode the maximum theoretical link speed is the sum
2424                  * of all the slaves
2425                  */
2426                 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2427
2428                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2429                         link_update(bond_ctx->active_slaves[idx], &slave_link);
2430
2431                         ethdev->data->dev_link.link_speed +=
2432                                         slave_link.link_speed;
2433                 }
2434         }
2435
2436
2437         return 0;
2438 }
2439
2440
2441 static int
2442 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2443 {
2444         struct bond_dev_private *internals = dev->data->dev_private;
2445         struct rte_eth_stats slave_stats;
2446         int i, j;
2447
2448         for (i = 0; i < internals->slave_count; i++) {
2449                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2450
2451                 stats->ipackets += slave_stats.ipackets;
2452                 stats->opackets += slave_stats.opackets;
2453                 stats->ibytes += slave_stats.ibytes;
2454                 stats->obytes += slave_stats.obytes;
2455                 stats->imissed += slave_stats.imissed;
2456                 stats->ierrors += slave_stats.ierrors;
2457                 stats->oerrors += slave_stats.oerrors;
2458                 stats->rx_nombuf += slave_stats.rx_nombuf;
2459
2460                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2461                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2462                         stats->q_opackets[j] += slave_stats.q_opackets[j];
2463                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2464                         stats->q_obytes[j] += slave_stats.q_obytes[j];
2465                         stats->q_errors[j] += slave_stats.q_errors[j];
2466                 }
2467
2468         }
2469
2470         return 0;
2471 }
2472
2473 static void
2474 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2475 {
2476         struct bond_dev_private *internals = dev->data->dev_private;
2477         int i;
2478
2479         for (i = 0; i < internals->slave_count; i++)
2480                 rte_eth_stats_reset(internals->slaves[i].port_id);
2481 }
2482
2483 static void
2484 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2485 {
2486         struct bond_dev_private *internals = eth_dev->data->dev_private;
2487         int i;
2488
2489         switch (internals->mode) {
2490         /* Promiscuous mode is propagated to all slaves */
2491         case BONDING_MODE_ROUND_ROBIN:
2492         case BONDING_MODE_BALANCE:
2493         case BONDING_MODE_BROADCAST:
2494         case BONDING_MODE_8023AD:
2495                 for (i = 0; i < internals->slave_count; i++) {
2496                         uint16_t port_id = internals->slaves[i].port_id;
2497
2498                         rte_eth_promiscuous_enable(port_id);
2499                 }
2500                 break;
2501         /* Promiscuous mode is propagated only to primary slave */
2502         case BONDING_MODE_ACTIVE_BACKUP:
2503         case BONDING_MODE_TLB:
2504         case BONDING_MODE_ALB:
2505         default:
2506                 /* Do not touch promisc when there cannot be primary ports */
2507                 if (internals->slave_count == 0)
2508                         break;
2509                 rte_eth_promiscuous_enable(internals->current_primary_port);
2510         }
2511 }
2512
2513 static void
2514 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2515 {
2516         struct bond_dev_private *internals = dev->data->dev_private;
2517         int i;
2518
2519         switch (internals->mode) {
2520         /* Promiscuous mode is propagated to all slaves */
2521         case BONDING_MODE_ROUND_ROBIN:
2522         case BONDING_MODE_BALANCE:
2523         case BONDING_MODE_BROADCAST:
2524         case BONDING_MODE_8023AD:
2525                 for (i = 0; i < internals->slave_count; i++) {
2526                         uint16_t port_id = internals->slaves[i].port_id;
2527
2528                         if (internals->mode == BONDING_MODE_8023AD &&
2529                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2530                                         BOND_8023AD_FORCED_PROMISC)
2531                                 continue;
2532                         rte_eth_promiscuous_disable(port_id);
2533                 }
2534                 break;
2535         /* Promiscuous mode is propagated only to primary slave */
2536         case BONDING_MODE_ACTIVE_BACKUP:
2537         case BONDING_MODE_TLB:
2538         case BONDING_MODE_ALB:
2539         default:
2540                 /* Do not touch promisc when there cannot be primary ports */
2541                 if (internals->slave_count == 0)
2542                         break;
2543                 rte_eth_promiscuous_disable(internals->current_primary_port);
2544         }
2545 }
2546
2547 static void
2548 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2549 {
2550         struct bond_dev_private *internals = eth_dev->data->dev_private;
2551         int i;
2552
2553         switch (internals->mode) {
2554         /* allmulti mode is propagated to all slaves */
2555         case BONDING_MODE_ROUND_ROBIN:
2556         case BONDING_MODE_BALANCE:
2557         case BONDING_MODE_BROADCAST:
2558         case BONDING_MODE_8023AD:
2559                 for (i = 0; i < internals->slave_count; i++) {
2560                         uint16_t port_id = internals->slaves[i].port_id;
2561
2562                         rte_eth_allmulticast_enable(port_id);
2563                 }
2564                 break;
2565         /* allmulti mode is propagated only to primary slave */
2566         case BONDING_MODE_ACTIVE_BACKUP:
2567         case BONDING_MODE_TLB:
2568         case BONDING_MODE_ALB:
2569         default:
2570                 /* Do not touch allmulti when there cannot be primary ports */
2571                 if (internals->slave_count == 0)
2572                         break;
2573                 rte_eth_allmulticast_enable(internals->current_primary_port);
2574         }
2575 }
2576
2577 static void
2578 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2579 {
2580         struct bond_dev_private *internals = eth_dev->data->dev_private;
2581         int i;
2582
2583         switch (internals->mode) {
2584         /* allmulti mode is propagated to all slaves */
2585         case BONDING_MODE_ROUND_ROBIN:
2586         case BONDING_MODE_BALANCE:
2587         case BONDING_MODE_BROADCAST:
2588         case BONDING_MODE_8023AD:
2589                 for (i = 0; i < internals->slave_count; i++) {
2590                         uint16_t port_id = internals->slaves[i].port_id;
2591
2592                         if (internals->mode == BONDING_MODE_8023AD &&
2593                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2594                                         BOND_8023AD_FORCED_ALLMULTI)
2595                                 continue;
2596                         rte_eth_allmulticast_disable(port_id);
2597                 }
2598                 break;
2599         /* allmulti mode is propagated only to primary slave */
2600         case BONDING_MODE_ACTIVE_BACKUP:
2601         case BONDING_MODE_TLB:
2602         case BONDING_MODE_ALB:
2603         default:
2604                 /* Do not touch allmulti when there cannot be primary ports */
2605                 if (internals->slave_count == 0)
2606                         break;
2607                 rte_eth_allmulticast_disable(internals->current_primary_port);
2608         }
2609 }
2610
2611 static void
2612 bond_ethdev_delayed_lsc_propagation(void *arg)
2613 {
2614         if (arg == NULL)
2615                 return;
2616
2617         _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2618                         RTE_ETH_EVENT_INTR_LSC, NULL);
2619 }
2620
2621 int
2622 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2623                 void *param, void *ret_param __rte_unused)
2624 {
2625         struct rte_eth_dev *bonded_eth_dev;
2626         struct bond_dev_private *internals;
2627         struct rte_eth_link link;
2628         int rc = -1;
2629
2630         uint8_t lsc_flag = 0;
2631         int valid_slave = 0;
2632         uint16_t active_pos;
2633         uint16_t i;
2634
2635         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2636                 return rc;
2637
2638         bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2639
2640         if (check_for_bonded_ethdev(bonded_eth_dev))
2641                 return rc;
2642
2643         internals = bonded_eth_dev->data->dev_private;
2644
2645         /* If the device isn't started don't handle interrupts */
2646         if (!bonded_eth_dev->data->dev_started)
2647                 return rc;
2648
2649         /* verify that port_id is a valid slave of bonded port */
2650         for (i = 0; i < internals->slave_count; i++) {
2651                 if (internals->slaves[i].port_id == port_id) {
2652                         valid_slave = 1;
2653                         break;
2654                 }
2655         }
2656
2657         if (!valid_slave)
2658                 return rc;
2659
2660         /* Synchronize lsc callback parallel calls either by real link event
2661          * from the slaves PMDs or by the bonding PMD itself.
2662          */
2663         rte_spinlock_lock(&internals->lsc_lock);
2664
2665         /* Search for port in active port list */
2666         active_pos = find_slave_by_id(internals->active_slaves,
2667                         internals->active_slave_count, port_id);
2668
2669         rte_eth_link_get_nowait(port_id, &link);
2670         if (link.link_status) {
2671                 if (active_pos < internals->active_slave_count)
2672                         goto link_update;
2673
2674                 /* check link state properties if bonded link is up*/
2675                 if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
2676                         if (link_properties_valid(bonded_eth_dev, &link) != 0)
2677                                 RTE_BOND_LOG(ERR, "Invalid link properties "
2678                                              "for slave %d in bonding mode %d",
2679                                              port_id, internals->mode);
2680                 } else {
2681                         /* inherit slave link properties */
2682                         link_properties_set(bonded_eth_dev, &link);
2683                 }
2684
2685                 /* If no active slave ports then set this port to be
2686                  * the primary port.
2687                  */
2688                 if (internals->active_slave_count < 1) {
2689                         /* If first active slave, then change link status */
2690                         bonded_eth_dev->data->dev_link.link_status =
2691                                                                 ETH_LINK_UP;
2692                         internals->current_primary_port = port_id;
2693                         lsc_flag = 1;
2694
2695                         mac_address_slaves_update(bonded_eth_dev);
2696                 }
2697
2698                 activate_slave(bonded_eth_dev, port_id);
2699
2700                 /* If the user has defined the primary port then default to
2701                  * using it.
2702                  */
2703                 if (internals->user_defined_primary_port &&
2704                                 internals->primary_port == port_id)
2705                         bond_ethdev_primary_set(internals, port_id);
2706         } else {
2707                 if (active_pos == internals->active_slave_count)
2708                         goto link_update;
2709
2710                 /* Remove from active slave list */
2711                 deactivate_slave(bonded_eth_dev, port_id);
2712
2713                 if (internals->active_slave_count < 1)
2714                         lsc_flag = 1;
2715
2716                 /* Update primary id, take first active slave from list or if none
2717                  * available set to -1 */
2718                 if (port_id == internals->current_primary_port) {
2719                         if (internals->active_slave_count > 0)
2720                                 bond_ethdev_primary_set(internals,
2721                                                 internals->active_slaves[0]);
2722                         else
2723                                 internals->current_primary_port = internals->primary_port;
2724                 }
2725         }
2726
2727 link_update:
2728         /**
2729          * Update bonded device link properties after any change to active
2730          * slaves
2731          */
2732         bond_ethdev_link_update(bonded_eth_dev, 0);
2733
2734         if (lsc_flag) {
2735                 /* Cancel any possible outstanding interrupts if delays are enabled */
2736                 if (internals->link_up_delay_ms > 0 ||
2737                         internals->link_down_delay_ms > 0)
2738                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2739                                         bonded_eth_dev);
2740
2741                 if (bonded_eth_dev->data->dev_link.link_status) {
2742                         if (internals->link_up_delay_ms > 0)
2743                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2744                                                 bond_ethdev_delayed_lsc_propagation,
2745                                                 (void *)bonded_eth_dev);
2746                         else
2747                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2748                                                 RTE_ETH_EVENT_INTR_LSC,
2749                                                 NULL);
2750
2751                 } else {
2752                         if (internals->link_down_delay_ms > 0)
2753                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2754                                                 bond_ethdev_delayed_lsc_propagation,
2755                                                 (void *)bonded_eth_dev);
2756                         else
2757                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2758                                                 RTE_ETH_EVENT_INTR_LSC,
2759                                                 NULL);
2760                 }
2761         }
2762
2763         rte_spinlock_unlock(&internals->lsc_lock);
2764
2765         return rc;
2766 }
2767
2768 static int
2769 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2770                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2771 {
2772         unsigned i, j;
2773         int result = 0;
2774         int slave_reta_size;
2775         unsigned reta_count;
2776         struct bond_dev_private *internals = dev->data->dev_private;
2777
2778         if (reta_size != internals->reta_size)
2779                 return -EINVAL;
2780
2781          /* Copy RETA table */
2782         reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2783
2784         for (i = 0; i < reta_count; i++) {
2785                 internals->reta_conf[i].mask = reta_conf[i].mask;
2786                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2787                         if ((reta_conf[i].mask >> j) & 0x01)
2788                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2789         }
2790
2791         /* Fill rest of array */
2792         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2793                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2794                                 sizeof(internals->reta_conf[0]) * reta_count);
2795
2796         /* Propagate RETA over slaves */
2797         for (i = 0; i < internals->slave_count; i++) {
2798                 slave_reta_size = internals->slaves[i].reta_size;
2799                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2800                                 &internals->reta_conf[0], slave_reta_size);
2801                 if (result < 0)
2802                         return result;
2803         }
2804
2805         return 0;
2806 }
2807
2808 static int
2809 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2810                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2811 {
2812         int i, j;
2813         struct bond_dev_private *internals = dev->data->dev_private;
2814
2815         if (reta_size != internals->reta_size)
2816                 return -EINVAL;
2817
2818          /* Copy RETA table */
2819         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2820                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2821                         if ((reta_conf[i].mask >> j) & 0x01)
2822                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2823
2824         return 0;
2825 }
2826
2827 static int
2828 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2829                 struct rte_eth_rss_conf *rss_conf)
2830 {
2831         int i, result = 0;
2832         struct bond_dev_private *internals = dev->data->dev_private;
2833         struct rte_eth_rss_conf bond_rss_conf;
2834
2835         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2836
2837         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2838
2839         if (bond_rss_conf.rss_hf != 0)
2840                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2841
2842         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2843                         sizeof(internals->rss_key)) {
2844                 if (bond_rss_conf.rss_key_len == 0)
2845                         bond_rss_conf.rss_key_len = 40;
2846                 internals->rss_key_len = bond_rss_conf.rss_key_len;
2847                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2848                                 internals->rss_key_len);
2849         }
2850
2851         for (i = 0; i < internals->slave_count; i++) {
2852                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2853                                 &bond_rss_conf);
2854                 if (result < 0)
2855                         return result;
2856         }
2857
2858         return 0;
2859 }
2860
2861 static int
2862 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2863                 struct rte_eth_rss_conf *rss_conf)
2864 {
2865         struct bond_dev_private *internals = dev->data->dev_private;
2866
2867         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2868         rss_conf->rss_key_len = internals->rss_key_len;
2869         if (rss_conf->rss_key)
2870                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2871
2872         return 0;
2873 }
2874
2875 static int
2876 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2877 {
2878         struct rte_eth_dev *slave_eth_dev;
2879         struct bond_dev_private *internals = dev->data->dev_private;
2880         int ret, i;
2881
2882         rte_spinlock_lock(&internals->lock);
2883
2884         for (i = 0; i < internals->slave_count; i++) {
2885                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
2886                 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
2887                         rte_spinlock_unlock(&internals->lock);
2888                         return -ENOTSUP;
2889                 }
2890         }
2891         for (i = 0; i < internals->slave_count; i++) {
2892                 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
2893                 if (ret < 0) {
2894                         rte_spinlock_unlock(&internals->lock);
2895                         return ret;
2896                 }
2897         }
2898
2899         rte_spinlock_unlock(&internals->lock);
2900         return 0;
2901 }
2902
2903 static int
2904 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
2905                         struct rte_ether_addr *addr)
2906 {
2907         if (mac_address_set(dev, addr)) {
2908                 RTE_BOND_LOG(ERR, "Failed to update MAC address");
2909                 return -EINVAL;
2910         }
2911
2912         return 0;
2913 }
2914
2915 static int
2916 bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
2917                  enum rte_filter_type type, enum rte_filter_op op, void *arg)
2918 {
2919         if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
2920                 *(const void **)arg = &bond_flow_ops;
2921                 return 0;
2922         }
2923         return -ENOTSUP;
2924 }
2925
2926 static int
2927 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
2928                         struct rte_ether_addr *mac_addr,
2929                         __rte_unused uint32_t index, uint32_t vmdq)
2930 {
2931         struct rte_eth_dev *slave_eth_dev;
2932         struct bond_dev_private *internals = dev->data->dev_private;
2933         int ret, i;
2934
2935         rte_spinlock_lock(&internals->lock);
2936
2937         for (i = 0; i < internals->slave_count; i++) {
2938                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
2939                 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
2940                          *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
2941                         ret = -ENOTSUP;
2942                         goto end;
2943                 }
2944         }
2945
2946         for (i = 0; i < internals->slave_count; i++) {
2947                 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
2948                                 mac_addr, vmdq);
2949                 if (ret < 0) {
2950                         /* rollback */
2951                         for (i--; i >= 0; i--)
2952                                 rte_eth_dev_mac_addr_remove(
2953                                         internals->slaves[i].port_id, mac_addr);
2954                         goto end;
2955                 }
2956         }
2957
2958         ret = 0;
2959 end:
2960         rte_spinlock_unlock(&internals->lock);
2961         return ret;
2962 }
2963
2964 static void
2965 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2966 {
2967         struct rte_eth_dev *slave_eth_dev;
2968         struct bond_dev_private *internals = dev->data->dev_private;
2969         int i;
2970
2971         rte_spinlock_lock(&internals->lock);
2972
2973         for (i = 0; i < internals->slave_count; i++) {
2974                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
2975                 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
2976                         goto end;
2977         }
2978
2979         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
2980
2981         for (i = 0; i < internals->slave_count; i++)
2982                 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
2983                                 mac_addr);
2984
2985 end:
2986         rte_spinlock_unlock(&internals->lock);
2987 }
2988
2989 const struct eth_dev_ops default_dev_ops = {
2990         .dev_start            = bond_ethdev_start,
2991         .dev_stop             = bond_ethdev_stop,
2992         .dev_close            = bond_ethdev_close,
2993         .dev_configure        = bond_ethdev_configure,
2994         .dev_infos_get        = bond_ethdev_info,
2995         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
2996         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
2997         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
2998         .rx_queue_release     = bond_ethdev_rx_queue_release,
2999         .tx_queue_release     = bond_ethdev_tx_queue_release,
3000         .link_update          = bond_ethdev_link_update,
3001         .stats_get            = bond_ethdev_stats_get,
3002         .stats_reset          = bond_ethdev_stats_reset,
3003         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
3004         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
3005         .allmulticast_enable  = bond_ethdev_allmulticast_enable,
3006         .allmulticast_disable = bond_ethdev_allmulticast_disable,
3007         .reta_update          = bond_ethdev_rss_reta_update,
3008         .reta_query           = bond_ethdev_rss_reta_query,
3009         .rss_hash_update      = bond_ethdev_rss_hash_update,
3010         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get,
3011         .mtu_set              = bond_ethdev_mtu_set,
3012         .mac_addr_set         = bond_ethdev_mac_address_set,
3013         .mac_addr_add         = bond_ethdev_mac_addr_add,
3014         .mac_addr_remove      = bond_ethdev_mac_addr_remove,
3015         .filter_ctrl          = bond_filter_ctrl
3016 };
3017
3018 static int
3019 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3020 {
3021         const char *name = rte_vdev_device_name(dev);
3022         uint8_t socket_id = dev->device.numa_node;
3023         struct bond_dev_private *internals = NULL;
3024         struct rte_eth_dev *eth_dev = NULL;
3025         uint32_t vlan_filter_bmp_size;
3026
3027         /* now do all data allocation - for eth_dev structure, dummy pci driver
3028          * and internal (private) data
3029          */
3030
3031         /* reserve an ethdev entry */
3032         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3033         if (eth_dev == NULL) {
3034                 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3035                 goto err;
3036         }
3037
3038         internals = eth_dev->data->dev_private;
3039         eth_dev->data->nb_rx_queues = (uint16_t)1;
3040         eth_dev->data->nb_tx_queues = (uint16_t)1;
3041
3042         /* Allocate memory for storing MAC addresses */
3043         eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3044                         BOND_MAX_MAC_ADDRS, 0, socket_id);
3045         if (eth_dev->data->mac_addrs == NULL) {
3046                 RTE_BOND_LOG(ERR,
3047                              "Failed to allocate %u bytes needed to store MAC addresses",
3048                              RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3049                 goto err;
3050         }
3051
3052         eth_dev->dev_ops = &default_dev_ops;
3053         eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
3054
3055         rte_spinlock_init(&internals->lock);
3056         rte_spinlock_init(&internals->lsc_lock);
3057
3058         internals->port_id = eth_dev->data->port_id;
3059         internals->mode = BONDING_MODE_INVALID;
3060         internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3061         internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3062         internals->burst_xmit_hash = burst_xmit_l2_hash;
3063         internals->user_defined_mac = 0;
3064
3065         internals->link_status_polling_enabled = 0;
3066
3067         internals->link_status_polling_interval_ms =
3068                 DEFAULT_POLLING_INTERVAL_10_MS;
3069         internals->link_down_delay_ms = 0;
3070         internals->link_up_delay_ms = 0;
3071
3072         internals->slave_count = 0;
3073         internals->active_slave_count = 0;
3074         internals->rx_offload_capa = 0;
3075         internals->tx_offload_capa = 0;
3076         internals->rx_queue_offload_capa = 0;
3077         internals->tx_queue_offload_capa = 0;
3078         internals->candidate_max_rx_pktlen = 0;
3079         internals->max_rx_pktlen = 0;
3080
3081         /* Initially allow to choose any offload type */
3082         internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
3083
3084         memset(&internals->default_rxconf, 0,
3085                sizeof(internals->default_rxconf));
3086         memset(&internals->default_txconf, 0,
3087                sizeof(internals->default_txconf));
3088
3089         memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3090         memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3091
3092         memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3093         memset(internals->slaves, 0, sizeof(internals->slaves));
3094
3095         TAILQ_INIT(&internals->flow_list);
3096         internals->flow_isolated_valid = 0;
3097
3098         /* Set mode 4 default configuration */
3099         bond_mode_8023ad_setup(eth_dev, NULL);
3100         if (bond_ethdev_mode_set(eth_dev, mode)) {
3101                 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
3102                                  eth_dev->data->port_id, mode);
3103                 goto err;
3104         }
3105
3106         vlan_filter_bmp_size =
3107                 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3108         internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3109                                                    RTE_CACHE_LINE_SIZE);
3110         if (internals->vlan_filter_bmpmem == NULL) {
3111                 RTE_BOND_LOG(ERR,
3112                              "Failed to allocate vlan bitmap for bonded device %u",
3113                              eth_dev->data->port_id);
3114                 goto err;
3115         }
3116
3117         internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3118                         internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3119         if (internals->vlan_filter_bmp == NULL) {
3120                 RTE_BOND_LOG(ERR,
3121                              "Failed to init vlan bitmap for bonded device %u",
3122                              eth_dev->data->port_id);
3123                 rte_free(internals->vlan_filter_bmpmem);
3124                 goto err;
3125         }
3126
3127         return eth_dev->data->port_id;
3128
3129 err:
3130         rte_free(internals);
3131         if (eth_dev != NULL)
3132                 eth_dev->data->dev_private = NULL;
3133         rte_eth_dev_release_port(eth_dev);
3134         return -1;
3135 }
3136
3137 static int
3138 bond_probe(struct rte_vdev_device *dev)
3139 {
3140         const char *name;
3141         struct bond_dev_private *internals;
3142         struct rte_kvargs *kvlist;
3143         uint8_t bonding_mode, socket_id/*, agg_mode*/;
3144         int  arg_count, port_id;
3145         uint8_t agg_mode;
3146         struct rte_eth_dev *eth_dev;
3147
3148         if (!dev)
3149                 return -EINVAL;
3150
3151         name = rte_vdev_device_name(dev);
3152         RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3153
3154         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3155                 eth_dev = rte_eth_dev_attach_secondary(name);
3156                 if (!eth_dev) {
3157                         RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3158                         return -1;
3159                 }
3160                 /* TODO: request info from primary to set up Rx and Tx */
3161                 eth_dev->dev_ops = &default_dev_ops;
3162                 eth_dev->device = &dev->device;
3163                 rte_eth_dev_probing_finish(eth_dev);
3164                 return 0;
3165         }
3166
3167         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3168                 pmd_bond_init_valid_arguments);
3169         if (kvlist == NULL)
3170                 return -1;
3171
3172         /* Parse link bonding mode */
3173         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3174                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3175                                 &bond_ethdev_parse_slave_mode_kvarg,
3176                                 &bonding_mode) != 0) {
3177                         RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3178                                         name);
3179                         goto parse_error;
3180                 }
3181         } else {
3182                 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3183                                 "device %s", name);
3184                 goto parse_error;
3185         }
3186
3187         /* Parse socket id to create bonding device on */
3188         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3189         if (arg_count == 1) {
3190                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3191                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3192                                 != 0) {
3193                         RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3194                                         "bonded device %s", name);
3195                         goto parse_error;
3196                 }
3197         } else if (arg_count > 1) {
3198                 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3199                                 "bonded device %s", name);
3200                 goto parse_error;
3201         } else {
3202                 socket_id = rte_socket_id();
3203         }
3204
3205         dev->device.numa_node = socket_id;
3206
3207         /* Create link bonding eth device */
3208         port_id = bond_alloc(dev, bonding_mode);
3209         if (port_id < 0) {
3210                 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3211                                 "socket %u.",   name, bonding_mode, socket_id);
3212                 goto parse_error;
3213         }
3214         internals = rte_eth_devices[port_id].data->dev_private;
3215         internals->kvlist = kvlist;
3216
3217         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3218                 if (rte_kvargs_process(kvlist,
3219                                 PMD_BOND_AGG_MODE_KVARG,
3220                                 &bond_ethdev_parse_slave_agg_mode_kvarg,
3221                                 &agg_mode) != 0) {
3222                         RTE_BOND_LOG(ERR,
3223                                         "Failed to parse agg selection mode for bonded device %s",
3224                                         name);
3225                         goto parse_error;
3226                 }
3227
3228                 if (internals->mode == BONDING_MODE_8023AD)
3229                         internals->mode4.agg_selection = agg_mode;
3230         } else {
3231                 internals->mode4.agg_selection = AGG_STABLE;
3232         }
3233
3234         rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3235         RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3236                         "socket %u.",   name, port_id, bonding_mode, socket_id);
3237         return 0;
3238
3239 parse_error:
3240         rte_kvargs_free(kvlist);
3241
3242         return -1;
3243 }
3244
3245 static int
3246 bond_remove(struct rte_vdev_device *dev)
3247 {
3248         struct rte_eth_dev *eth_dev;
3249         struct bond_dev_private *internals;
3250         const char *name;
3251
3252         if (!dev)
3253                 return -EINVAL;
3254
3255         name = rte_vdev_device_name(dev);
3256         RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3257
3258         /* now free all data allocation - for eth_dev structure,
3259          * dummy pci driver and internal (private) data
3260          */
3261
3262         /* find an ethdev entry */
3263         eth_dev = rte_eth_dev_allocated(name);
3264         if (eth_dev == NULL)
3265                 return -ENODEV;
3266
3267         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3268                 return rte_eth_dev_release_port(eth_dev);
3269
3270         RTE_ASSERT(eth_dev->device == &dev->device);
3271
3272         internals = eth_dev->data->dev_private;
3273         if (internals->slave_count != 0)
3274                 return -EBUSY;
3275
3276         if (eth_dev->data->dev_started == 1) {
3277                 bond_ethdev_stop(eth_dev);
3278                 bond_ethdev_close(eth_dev);
3279         }
3280
3281         eth_dev->dev_ops = NULL;
3282         eth_dev->rx_pkt_burst = NULL;
3283         eth_dev->tx_pkt_burst = NULL;
3284
3285         internals = eth_dev->data->dev_private;
3286         /* Try to release mempool used in mode6. If the bond
3287          * device is not mode6, free the NULL is not problem.
3288          */
3289         rte_mempool_free(internals->mode6.mempool);
3290         rte_bitmap_free(internals->vlan_filter_bmp);
3291         rte_free(internals->vlan_filter_bmpmem);
3292
3293         rte_eth_dev_release_port(eth_dev);
3294
3295         return 0;
3296 }
3297
3298 /* this part will resolve the slave portids after all the other pdev and vdev
3299  * have been allocated */
3300 static int
3301 bond_ethdev_configure(struct rte_eth_dev *dev)
3302 {
3303         const char *name = dev->device->name;
3304         struct bond_dev_private *internals = dev->data->dev_private;
3305         struct rte_kvargs *kvlist = internals->kvlist;
3306         int arg_count;
3307         uint16_t port_id = dev - rte_eth_devices;
3308         uint8_t agg_mode;
3309
3310         static const uint8_t default_rss_key[40] = {
3311                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3312                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3313                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3314                 0xBE, 0xAC, 0x01, 0xFA
3315         };
3316
3317         unsigned i, j;
3318
3319         /*
3320          * If RSS is enabled, fill table with default values and
3321          * set key to the the value specified in port RSS configuration.
3322          * Fall back to default RSS key if the key is not specified
3323          */
3324         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
3325                 if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
3326                         internals->rss_key_len =
3327                                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
3328                         memcpy(internals->rss_key,
3329                                dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
3330                                internals->rss_key_len);
3331                 } else {
3332                         internals->rss_key_len = sizeof(default_rss_key);
3333                         memcpy(internals->rss_key, default_rss_key,
3334                                internals->rss_key_len);
3335                 }
3336
3337                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3338                         internals->reta_conf[i].mask = ~0LL;
3339                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3340                                 internals->reta_conf[i].reta[j] =
3341                                                 (i * RTE_RETA_GROUP_SIZE + j) %
3342                                                 dev->data->nb_rx_queues;
3343                 }
3344         }
3345
3346         /* set the max_rx_pktlen */
3347         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3348
3349         /*
3350          * if no kvlist, it means that this bonded device has been created
3351          * through the bonding api.
3352          */
3353         if (!kvlist)
3354                 return 0;
3355
3356         /* Parse MAC address for bonded device */
3357         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3358         if (arg_count == 1) {
3359                 struct rte_ether_addr bond_mac;
3360
3361                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3362                                        &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3363                         RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3364                                      name);
3365                         return -1;
3366                 }
3367
3368                 /* Set MAC address */
3369                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3370                         RTE_BOND_LOG(ERR,
3371                                      "Failed to set mac address on bonded device %s",
3372                                      name);
3373                         return -1;
3374                 }
3375         } else if (arg_count > 1) {
3376                 RTE_BOND_LOG(ERR,
3377                              "MAC address can be specified only once for bonded device %s",
3378                              name);
3379                 return -1;
3380         }
3381
3382         /* Parse/set balance mode transmit policy */
3383         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3384         if (arg_count == 1) {
3385                 uint8_t xmit_policy;
3386
3387                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3388                                        &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3389                     0) {
3390                         RTE_BOND_LOG(INFO,
3391                                      "Invalid xmit policy specified for bonded device %s",
3392                                      name);
3393                         return -1;
3394                 }
3395
3396                 /* Set balance mode transmit policy*/
3397                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3398                         RTE_BOND_LOG(ERR,
3399                                      "Failed to set balance xmit policy on bonded device %s",
3400                                      name);
3401                         return -1;
3402                 }
3403         } else if (arg_count > 1) {
3404                 RTE_BOND_LOG(ERR,
3405                              "Transmit policy can be specified only once for bonded device %s",
3406                              name);
3407                 return -1;
3408         }
3409
3410         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3411                 if (rte_kvargs_process(kvlist,
3412                                        PMD_BOND_AGG_MODE_KVARG,
3413                                        &bond_ethdev_parse_slave_agg_mode_kvarg,
3414                                        &agg_mode) != 0) {
3415                         RTE_BOND_LOG(ERR,
3416                                      "Failed to parse agg selection mode for bonded device %s",
3417                                      name);
3418                 }
3419                 if (internals->mode == BONDING_MODE_8023AD) {
3420                         int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3421                                         agg_mode);
3422                         if (ret < 0) {
3423                                 RTE_BOND_LOG(ERR,
3424                                         "Invalid args for agg selection set for bonded device %s",
3425                                         name);
3426                                 return -1;
3427                         }
3428                 }
3429         }
3430
3431         /* Parse/add slave ports to bonded device */
3432         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3433                 struct bond_ethdev_slave_ports slave_ports;
3434                 unsigned i;
3435
3436                 memset(&slave_ports, 0, sizeof(slave_ports));
3437
3438                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3439                                        &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3440                         RTE_BOND_LOG(ERR,
3441                                      "Failed to parse slave ports for bonded device %s",
3442                                      name);
3443                         return -1;
3444                 }
3445
3446                 for (i = 0; i < slave_ports.slave_count; i++) {
3447                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3448                                 RTE_BOND_LOG(ERR,
3449                                              "Failed to add port %d as slave to bonded device %s",
3450                                              slave_ports.slaves[i], name);
3451                         }
3452                 }
3453
3454         } else {
3455                 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3456                 return -1;
3457         }
3458
3459         /* Parse/set primary slave port id*/
3460         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3461         if (arg_count == 1) {
3462                 uint16_t primary_slave_port_id;
3463
3464                 if (rte_kvargs_process(kvlist,
3465                                        PMD_BOND_PRIMARY_SLAVE_KVARG,
3466                                        &bond_ethdev_parse_primary_slave_port_id_kvarg,
3467                                        &primary_slave_port_id) < 0) {
3468                         RTE_BOND_LOG(INFO,
3469                                      "Invalid primary slave port id specified for bonded device %s",
3470                                      name);
3471                         return -1;
3472                 }
3473
3474                 /* Set balance mode transmit policy*/
3475                 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3476                     != 0) {
3477                         RTE_BOND_LOG(ERR,
3478                                      "Failed to set primary slave port %d on bonded device %s",
3479                                      primary_slave_port_id, name);
3480                         return -1;
3481                 }
3482         } else if (arg_count > 1) {
3483                 RTE_BOND_LOG(INFO,
3484                              "Primary slave can be specified only once for bonded device %s",
3485                              name);
3486                 return -1;
3487         }
3488
3489         /* Parse link status monitor polling interval */
3490         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3491         if (arg_count == 1) {
3492                 uint32_t lsc_poll_interval_ms;
3493
3494                 if (rte_kvargs_process(kvlist,
3495                                        PMD_BOND_LSC_POLL_PERIOD_KVARG,
3496                                        &bond_ethdev_parse_time_ms_kvarg,
3497                                        &lsc_poll_interval_ms) < 0) {
3498                         RTE_BOND_LOG(INFO,
3499                                      "Invalid lsc polling interval value specified for bonded"
3500                                      " device %s", name);
3501                         return -1;
3502                 }
3503
3504                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3505                     != 0) {
3506                         RTE_BOND_LOG(ERR,
3507                                      "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3508                                      lsc_poll_interval_ms, name);
3509                         return -1;
3510                 }
3511         } else if (arg_count > 1) {
3512                 RTE_BOND_LOG(INFO,
3513                              "LSC polling interval can be specified only once for bonded"
3514                              " device %s", name);
3515                 return -1;
3516         }
3517
3518         /* Parse link up interrupt propagation delay */
3519         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3520         if (arg_count == 1) {
3521                 uint32_t link_up_delay_ms;
3522
3523                 if (rte_kvargs_process(kvlist,
3524                                        PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3525                                        &bond_ethdev_parse_time_ms_kvarg,
3526                                        &link_up_delay_ms) < 0) {
3527                         RTE_BOND_LOG(INFO,
3528                                      "Invalid link up propagation delay value specified for"
3529                                      " bonded device %s", name);
3530                         return -1;
3531                 }
3532
3533                 /* Set balance mode transmit policy*/
3534                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3535                     != 0) {
3536                         RTE_BOND_LOG(ERR,
3537                                      "Failed to set link up propagation delay (%u ms) on bonded"
3538                                      " device %s", link_up_delay_ms, name);
3539                         return -1;
3540                 }
3541         } else if (arg_count > 1) {
3542                 RTE_BOND_LOG(INFO,
3543                              "Link up propagation delay can be specified only once for"
3544                              " bonded device %s", name);
3545                 return -1;
3546         }
3547
3548         /* Parse link down interrupt propagation delay */
3549         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3550         if (arg_count == 1) {
3551                 uint32_t link_down_delay_ms;
3552
3553                 if (rte_kvargs_process(kvlist,
3554                                        PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3555                                        &bond_ethdev_parse_time_ms_kvarg,
3556                                        &link_down_delay_ms) < 0) {
3557                         RTE_BOND_LOG(INFO,
3558                                      "Invalid link down propagation delay value specified for"
3559                                      " bonded device %s", name);
3560                         return -1;
3561                 }
3562
3563                 /* Set balance mode transmit policy*/
3564                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3565                     != 0) {
3566                         RTE_BOND_LOG(ERR,
3567                                      "Failed to set link down propagation delay (%u ms) on bonded device %s",
3568                                      link_down_delay_ms, name);
3569                         return -1;
3570                 }
3571         } else if (arg_count > 1) {
3572                 RTE_BOND_LOG(INFO,
3573                              "Link down propagation delay can be specified only once for  bonded device %s",
3574                              name);
3575                 return -1;
3576         }
3577
3578         return 0;
3579 }
3580
3581 struct rte_vdev_driver pmd_bond_drv = {
3582         .probe = bond_probe,
3583         .remove = bond_remove,
3584 };
3585
3586 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3587 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3588
3589 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3590         "slave=<ifc> "
3591         "primary=<ifc> "
3592         "mode=[0-6] "
3593         "xmit_policy=[l2 | l23 | l34] "
3594         "agg_mode=[count | stable | bandwidth] "
3595         "socket_id=<int> "
3596         "mac=<mac addr> "
3597         "lsc_poll_period_ms=<int> "
3598         "up_delay=<int> "
3599         "down_delay=<int>");
3600
3601 int bond_logtype;
3602
3603 RTE_INIT(bond_init_log)
3604 {
3605         bond_logtype = rte_log_register("pmd.net.bond");
3606         if (bond_logtype >= 0)
3607                 rte_log_set_level(bond_logtype, RTE_LOG_NOTICE);
3608 }