net/bonding: fix MAC address when switching active port
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <stdbool.h>
6 #include <netinet/in.h>
7
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 #include <rte_ip.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
22
23 #include "rte_eth_bond.h"
24 #include "eth_bond_private.h"
25 #include "eth_bond_8023ad_private.h"
26
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
30
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
32
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
35
36 static inline size_t
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
38 {
39         size_t vlan_offset = 0;
40
41         if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42                 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43                 struct rte_vlan_hdr *vlan_hdr =
44                         (struct rte_vlan_hdr *)(eth_hdr + 1);
45
46                 vlan_offset = sizeof(struct rte_vlan_hdr);
47                 *proto = vlan_hdr->eth_proto;
48
49                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50                         vlan_hdr = vlan_hdr + 1;
51                         *proto = vlan_hdr->eth_proto;
52                         vlan_offset += sizeof(struct rte_vlan_hdr);
53                 }
54         }
55         return vlan_offset;
56 }
57
58 static uint16_t
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
60 {
61         struct bond_dev_private *internals;
62
63         uint16_t num_rx_total = 0;
64         uint16_t slave_count;
65         uint16_t active_slave;
66         int i;
67
68         /* Cast to structure, containing bonded device's port id and queue id */
69         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70         internals = bd_rx_q->dev_private;
71         slave_count = internals->active_slave_count;
72         active_slave = internals->active_slave;
73
74         for (i = 0; i < slave_count && nb_pkts; i++) {
75                 uint16_t num_rx_slave;
76
77                 /* Offset of pointer to *bufs increases as packets are received
78                  * from other slaves */
79                 num_rx_slave =
80                         rte_eth_rx_burst(internals->active_slaves[active_slave],
81                                          bd_rx_q->queue_id,
82                                          bufs + num_rx_total, nb_pkts);
83                 num_rx_total += num_rx_slave;
84                 nb_pkts -= num_rx_slave;
85                 if (++active_slave == slave_count)
86                         active_slave = 0;
87         }
88
89         if (++internals->active_slave >= slave_count)
90                 internals->active_slave = 0;
91         return num_rx_total;
92 }
93
94 static uint16_t
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
96                 uint16_t nb_pkts)
97 {
98         struct bond_dev_private *internals;
99
100         /* Cast to structure, containing bonded device's port id and queue id */
101         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
102
103         internals = bd_rx_q->dev_private;
104
105         return rte_eth_rx_burst(internals->current_primary_port,
106                         bd_rx_q->queue_id, bufs, nb_pkts);
107 }
108
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
111 {
112         const uint16_t ether_type_slow_be =
113                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
114
115         return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116                 (ethertype == ether_type_slow_be &&
117                 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
118 }
119
120 /*****************************************************************************
121  * Flow director's setup for mode 4 optimization
122  */
123
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125         .dst.addr_bytes = { 0 },
126         .src.addr_bytes = { 0 },
127         .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
128 };
129
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131         .dst.addr_bytes = { 0 },
132         .src.addr_bytes = { 0 },
133         .type = 0xFFFF,
134 };
135
136 static struct rte_flow_item flow_item_8023ad[] = {
137         {
138                 .type = RTE_FLOW_ITEM_TYPE_ETH,
139                 .spec = &flow_item_eth_type_8023ad,
140                 .last = NULL,
141                 .mask = &flow_item_eth_mask_type_8023ad,
142         },
143         {
144                 .type = RTE_FLOW_ITEM_TYPE_END,
145                 .spec = NULL,
146                 .last = NULL,
147                 .mask = NULL,
148         }
149 };
150
151 const struct rte_flow_attr flow_attr_8023ad = {
152         .group = 0,
153         .priority = 0,
154         .ingress = 1,
155         .egress = 0,
156         .reserved = 0,
157 };
158
159 int
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161                 uint16_t slave_port) {
162         struct rte_eth_dev_info slave_info;
163         struct rte_flow_error error;
164         struct bond_dev_private *internals = bond_dev->data->dev_private;
165
166         const struct rte_flow_action_queue lacp_queue_conf = {
167                 .index = 0,
168         };
169
170         const struct rte_flow_action actions[] = {
171                 {
172                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173                         .conf = &lacp_queue_conf
174                 },
175                 {
176                         .type = RTE_FLOW_ACTION_TYPE_END,
177                 }
178         };
179
180         int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181                         flow_item_8023ad, actions, &error);
182         if (ret < 0) {
183                 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184                                 __func__, error.message, slave_port,
185                                 internals->mode4.dedicated_queues.rx_qid);
186                 return -1;
187         }
188
189         ret = rte_eth_dev_info_get(slave_port, &slave_info);
190         if (ret != 0) {
191                 RTE_BOND_LOG(ERR,
192                         "%s: Error during getting device (port %u) info: %s\n",
193                         __func__, slave_port, strerror(-ret));
194
195                 return ret;
196         }
197
198         if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199                         slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
200                 RTE_BOND_LOG(ERR,
201                         "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202                         __func__, slave_port);
203                 return -1;
204         }
205
206         return 0;
207 }
208
209 int
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211         struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212         struct bond_dev_private *internals = bond_dev->data->dev_private;
213         struct rte_eth_dev_info bond_info;
214         uint16_t idx;
215         int ret;
216
217         /* Verify if all slaves in bonding supports flow director and */
218         if (internals->slave_count > 0) {
219                 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
220                 if (ret != 0) {
221                         RTE_BOND_LOG(ERR,
222                                 "%s: Error during getting device (port %u) info: %s\n",
223                                 __func__, bond_dev->data->port_id,
224                                 strerror(-ret));
225
226                         return ret;
227                 }
228
229                 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230                 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
231
232                 for (idx = 0; idx < internals->slave_count; idx++) {
233                         if (bond_ethdev_8023ad_flow_verify(bond_dev,
234                                         internals->slaves[idx].port_id) != 0)
235                                 return -1;
236                 }
237         }
238
239         return 0;
240 }
241
242 int
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
244
245         struct rte_flow_error error;
246         struct bond_dev_private *internals = bond_dev->data->dev_private;
247         struct rte_flow_action_queue lacp_queue_conf = {
248                 .index = internals->mode4.dedicated_queues.rx_qid,
249         };
250
251         const struct rte_flow_action actions[] = {
252                 {
253                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254                         .conf = &lacp_queue_conf
255                 },
256                 {
257                         .type = RTE_FLOW_ACTION_TYPE_END,
258                 }
259         };
260
261         internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262                         &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263         if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264                 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265                                 "(slave_port=%d queue_id=%d)",
266                                 error.message, slave_port,
267                                 internals->mode4.dedicated_queues.rx_qid);
268                 return -1;
269         }
270
271         return 0;
272 }
273
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
276                 bool dedicated_rxq)
277 {
278         /* Cast to structure, containing bonded device's port id and queue id */
279         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280         struct bond_dev_private *internals = bd_rx_q->dev_private;
281         struct rte_eth_dev *bonded_eth_dev =
282                                         &rte_eth_devices[internals->port_id];
283         struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284         struct rte_ether_hdr *hdr;
285
286         const uint16_t ether_type_slow_be =
287                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288         uint16_t num_rx_total = 0;      /* Total number of received packets */
289         uint16_t slaves[RTE_MAX_ETHPORTS];
290         uint16_t slave_count, idx;
291
292         uint8_t collecting;  /* current slave collecting status */
293         const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294         const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
295         uint8_t subtype;
296         uint16_t i;
297         uint16_t j;
298         uint16_t k;
299
300         /* Copy slave list to protect against slave up/down changes during tx
301          * bursting */
302         slave_count = internals->active_slave_count;
303         memcpy(slaves, internals->active_slaves,
304                         sizeof(internals->active_slaves[0]) * slave_count);
305
306         idx = internals->active_slave;
307         if (idx >= slave_count) {
308                 internals->active_slave = 0;
309                 idx = 0;
310         }
311         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
312                 j = num_rx_total;
313                 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
314                                          COLLECTING);
315
316                 /* Read packets from this slave */
317                 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
319
320                 for (k = j; k < 2 && k < num_rx_total; k++)
321                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
322
323                 /* Handle slow protocol packets. */
324                 while (j < num_rx_total) {
325                         if (j + 3 < num_rx_total)
326                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
327
328                         hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329                         subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
330
331                         /* Remove packet from array if:
332                          * - it is slow packet but no dedicated rxq is present,
333                          * - slave is not in collecting state,
334                          * - bonding interface is not in promiscuous mode:
335                          *   - packet is unicast and address does not match,
336                          *   - packet is multicast and bonding interface
337                          *     is not in allmulti,
338                          */
339                         if (unlikely(
340                                 (!dedicated_rxq &&
341                                  is_lacp_packets(hdr->ether_type, subtype,
342                                                  bufs[j])) ||
343                                 !collecting ||
344                                 (!promisc &&
345                                  ((rte_is_unicast_ether_addr(&hdr->d_addr) &&
346                                    !rte_is_same_ether_addr(bond_mac,
347                                                        &hdr->d_addr)) ||
348                                   (!allmulti &&
349                                    rte_is_multicast_ether_addr(&hdr->d_addr)))))) {
350
351                                 if (hdr->ether_type == ether_type_slow_be) {
352                                         bond_mode_8023ad_handle_slow_pkt(
353                                             internals, slaves[idx], bufs[j]);
354                                 } else
355                                         rte_pktmbuf_free(bufs[j]);
356
357                                 /* Packet is managed by mode 4 or dropped, shift the array */
358                                 num_rx_total--;
359                                 if (j < num_rx_total) {
360                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
361                                                 (num_rx_total - j));
362                                 }
363                         } else
364                                 j++;
365                 }
366                 if (unlikely(++idx == slave_count))
367                         idx = 0;
368         }
369
370         if (++internals->active_slave >= slave_count)
371                 internals->active_slave = 0;
372
373         return num_rx_total;
374 }
375
376 static uint16_t
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
378                 uint16_t nb_pkts)
379 {
380         return rx_burst_8023ad(queue, bufs, nb_pkts, false);
381 }
382
383 static uint16_t
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
385                 uint16_t nb_pkts)
386 {
387         return rx_burst_8023ad(queue, bufs, nb_pkts, true);
388 }
389
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
393
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
395
396 static void
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
398 {
399         switch (arp_op) {
400         case RTE_ARP_OP_REQUEST:
401                 strlcpy(buf, "ARP Request", buf_len);
402                 return;
403         case RTE_ARP_OP_REPLY:
404                 strlcpy(buf, "ARP Reply", buf_len);
405                 return;
406         case RTE_ARP_OP_REVREQUEST:
407                 strlcpy(buf, "Reverse ARP Request", buf_len);
408                 return;
409         case RTE_ARP_OP_REVREPLY:
410                 strlcpy(buf, "Reverse ARP Reply", buf_len);
411                 return;
412         case RTE_ARP_OP_INVREQUEST:
413                 strlcpy(buf, "Peer Identify Request", buf_len);
414                 return;
415         case RTE_ARP_OP_INVREPLY:
416                 strlcpy(buf, "Peer Identify Reply", buf_len);
417                 return;
418         default:
419                 break;
420         }
421         strlcpy(buf, "Unknown", buf_len);
422         return;
423 }
424 #endif
425 #define MaxIPv4String   16
426 static void
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
428 {
429         uint32_t ipv4_addr;
430
431         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
434                 ipv4_addr & 0xFF);
435 }
436
437 #define MAX_CLIENTS_NUMBER      128
438 uint8_t active_clients;
439 struct client_stats_t {
440         uint16_t port;
441         uint32_t ipv4_addr;
442         uint32_t ipv4_rx_packets;
443         uint32_t ipv4_tx_packets;
444 };
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
446
447 static void
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
449 {
450         int i = 0;
451
452         for (; i < MAX_CLIENTS_NUMBER; i++)     {
453                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
454                         /* Just update RX packets number for this client */
455                         if (TXorRXindicator == &burstnumberRX)
456                                 client_stats[i].ipv4_rx_packets++;
457                         else
458                                 client_stats[i].ipv4_tx_packets++;
459                         return;
460                 }
461         }
462         /* We have a new client. Insert him to the table, and increment stats */
463         if (TXorRXindicator == &burstnumberRX)
464                 client_stats[active_clients].ipv4_rx_packets++;
465         else
466                 client_stats[active_clients].ipv4_tx_packets++;
467         client_stats[active_clients].ipv4_addr = addr;
468         client_stats[active_clients].port = port;
469         active_clients++;
470
471 }
472
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475         rte_log(RTE_LOG_DEBUG, bond_logtype,                            \
476                 "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
477                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
478                 info,                                                   \
479                 port,                                                   \
480                 eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
481                 eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
482                 eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
483                 src_ip,                                                 \
484                 eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
485                 eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
486                 eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
487                 dst_ip,                                                 \
488                 arp_op, ++burstnumber)
489 #endif
490
491 static void
492 mode6_debug(const char __rte_unused *info,
493         struct rte_ether_hdr *eth_h, uint16_t port,
494         uint32_t __rte_unused *burstnumber)
495 {
496         struct rte_ipv4_hdr *ipv4_h;
497 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
498         struct rte_arp_hdr *arp_h;
499         char dst_ip[16];
500         char ArpOp[24];
501         char buf[16];
502 #endif
503         char src_ip[16];
504
505         uint16_t ether_type = eth_h->ether_type;
506         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
507
508 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
509         strlcpy(buf, info, 16);
510 #endif
511
512         if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
513                 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
514                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
515 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
516                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
517                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
518 #endif
519                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
520         }
521 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
522         else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
523                 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
524                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
525                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
526                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
527                                 ArpOp, sizeof(ArpOp));
528                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
529         }
530 #endif
531 }
532 #endif
533
534 static uint16_t
535 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
536 {
537         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
538         struct bond_dev_private *internals = bd_tx_q->dev_private;
539         struct rte_ether_hdr *eth_h;
540         uint16_t ether_type, offset;
541         uint16_t nb_recv_pkts;
542         int i;
543
544         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
545
546         for (i = 0; i < nb_recv_pkts; i++) {
547                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
548                 ether_type = eth_h->ether_type;
549                 offset = get_vlan_offset(eth_h, &ether_type);
550
551                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
552 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
553                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
554 #endif
555                         bond_mode_alb_arp_recv(eth_h, offset, internals);
556                 }
557 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
558                 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
559                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
560 #endif
561         }
562
563         return nb_recv_pkts;
564 }
565
566 static uint16_t
567 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
568                 uint16_t nb_pkts)
569 {
570         struct bond_dev_private *internals;
571         struct bond_tx_queue *bd_tx_q;
572
573         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
574         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
575
576         uint16_t num_of_slaves;
577         uint16_t slaves[RTE_MAX_ETHPORTS];
578
579         uint16_t num_tx_total = 0, num_tx_slave;
580
581         static int slave_idx = 0;
582         int i, cslave_idx = 0, tx_fail_total = 0;
583
584         bd_tx_q = (struct bond_tx_queue *)queue;
585         internals = bd_tx_q->dev_private;
586
587         /* Copy slave list to protect against slave up/down changes during tx
588          * bursting */
589         num_of_slaves = internals->active_slave_count;
590         memcpy(slaves, internals->active_slaves,
591                         sizeof(internals->active_slaves[0]) * num_of_slaves);
592
593         if (num_of_slaves < 1)
594                 return num_tx_total;
595
596         /* Populate slaves mbuf with which packets are to be sent on it  */
597         for (i = 0; i < nb_pkts; i++) {
598                 cslave_idx = (slave_idx + i) % num_of_slaves;
599                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
600         }
601
602         /* increment current slave index so the next call to tx burst starts on the
603          * next slave */
604         slave_idx = ++cslave_idx;
605
606         /* Send packet burst on each slave device */
607         for (i = 0; i < num_of_slaves; i++) {
608                 if (slave_nb_pkts[i] > 0) {
609                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
610                                         slave_bufs[i], slave_nb_pkts[i]);
611
612                         /* if tx burst fails move packets to end of bufs */
613                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
614                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
615
616                                 tx_fail_total += tx_fail_slave;
617
618                                 memcpy(&bufs[nb_pkts - tx_fail_total],
619                                        &slave_bufs[i][num_tx_slave],
620                                        tx_fail_slave * sizeof(bufs[0]));
621                         }
622                         num_tx_total += num_tx_slave;
623                 }
624         }
625
626         return num_tx_total;
627 }
628
629 static uint16_t
630 bond_ethdev_tx_burst_active_backup(void *queue,
631                 struct rte_mbuf **bufs, uint16_t nb_pkts)
632 {
633         struct bond_dev_private *internals;
634         struct bond_tx_queue *bd_tx_q;
635
636         bd_tx_q = (struct bond_tx_queue *)queue;
637         internals = bd_tx_q->dev_private;
638
639         if (internals->active_slave_count < 1)
640                 return 0;
641
642         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
643                         bufs, nb_pkts);
644 }
645
646 static inline uint16_t
647 ether_hash(struct rte_ether_hdr *eth_hdr)
648 {
649         unaligned_uint16_t *word_src_addr =
650                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
651         unaligned_uint16_t *word_dst_addr =
652                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
653
654         return (word_src_addr[0] ^ word_dst_addr[0]) ^
655                         (word_src_addr[1] ^ word_dst_addr[1]) ^
656                         (word_src_addr[2] ^ word_dst_addr[2]);
657 }
658
659 static inline uint32_t
660 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
661 {
662         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
663 }
664
665 static inline uint32_t
666 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
667 {
668         unaligned_uint32_t *word_src_addr =
669                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
670         unaligned_uint32_t *word_dst_addr =
671                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
672
673         return (word_src_addr[0] ^ word_dst_addr[0]) ^
674                         (word_src_addr[1] ^ word_dst_addr[1]) ^
675                         (word_src_addr[2] ^ word_dst_addr[2]) ^
676                         (word_src_addr[3] ^ word_dst_addr[3]);
677 }
678
679
680 void
681 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
682                 uint16_t slave_count, uint16_t *slaves)
683 {
684         struct rte_ether_hdr *eth_hdr;
685         uint32_t hash;
686         int i;
687
688         for (i = 0; i < nb_pkts; i++) {
689                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
690
691                 hash = ether_hash(eth_hdr);
692
693                 slaves[i] = (hash ^= hash >> 8) % slave_count;
694         }
695 }
696
697 void
698 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
699                 uint16_t slave_count, uint16_t *slaves)
700 {
701         uint16_t i;
702         struct rte_ether_hdr *eth_hdr;
703         uint16_t proto;
704         size_t vlan_offset;
705         uint32_t hash, l3hash;
706
707         for (i = 0; i < nb_pkts; i++) {
708                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
709                 l3hash = 0;
710
711                 proto = eth_hdr->ether_type;
712                 hash = ether_hash(eth_hdr);
713
714                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
715
716                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
717                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
718                                         ((char *)(eth_hdr + 1) + vlan_offset);
719                         l3hash = ipv4_hash(ipv4_hdr);
720
721                 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
722                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
723                                         ((char *)(eth_hdr + 1) + vlan_offset);
724                         l3hash = ipv6_hash(ipv6_hdr);
725                 }
726
727                 hash = hash ^ l3hash;
728                 hash ^= hash >> 16;
729                 hash ^= hash >> 8;
730
731                 slaves[i] = hash % slave_count;
732         }
733 }
734
735 void
736 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
737                 uint16_t slave_count, uint16_t *slaves)
738 {
739         struct rte_ether_hdr *eth_hdr;
740         uint16_t proto;
741         size_t vlan_offset;
742         int i;
743
744         struct rte_udp_hdr *udp_hdr;
745         struct rte_tcp_hdr *tcp_hdr;
746         uint32_t hash, l3hash, l4hash;
747
748         for (i = 0; i < nb_pkts; i++) {
749                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
750                 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
751                 proto = eth_hdr->ether_type;
752                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
753                 l3hash = 0;
754                 l4hash = 0;
755
756                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
757                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
758                                         ((char *)(eth_hdr + 1) + vlan_offset);
759                         size_t ip_hdr_offset;
760
761                         l3hash = ipv4_hash(ipv4_hdr);
762
763                         /* there is no L4 header in fragmented packet */
764                         if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
765                                                                 == 0)) {
766                                 ip_hdr_offset = (ipv4_hdr->version_ihl
767                                         & RTE_IPV4_HDR_IHL_MASK) *
768                                         RTE_IPV4_IHL_MULTIPLIER;
769
770                                 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
771                                         tcp_hdr = (struct rte_tcp_hdr *)
772                                                 ((char *)ipv4_hdr +
773                                                         ip_hdr_offset);
774                                         if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
775                                                         < pkt_end)
776                                                 l4hash = HASH_L4_PORTS(tcp_hdr);
777                                 } else if (ipv4_hdr->next_proto_id ==
778                                                                 IPPROTO_UDP) {
779                                         udp_hdr = (struct rte_udp_hdr *)
780                                                 ((char *)ipv4_hdr +
781                                                         ip_hdr_offset);
782                                         if ((size_t)udp_hdr + sizeof(*udp_hdr)
783                                                         < pkt_end)
784                                                 l4hash = HASH_L4_PORTS(udp_hdr);
785                                 }
786                         }
787                 } else if  (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
788                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
789                                         ((char *)(eth_hdr + 1) + vlan_offset);
790                         l3hash = ipv6_hash(ipv6_hdr);
791
792                         if (ipv6_hdr->proto == IPPROTO_TCP) {
793                                 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
794                                 l4hash = HASH_L4_PORTS(tcp_hdr);
795                         } else if (ipv6_hdr->proto == IPPROTO_UDP) {
796                                 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
797                                 l4hash = HASH_L4_PORTS(udp_hdr);
798                         }
799                 }
800
801                 hash = l3hash ^ l4hash;
802                 hash ^= hash >> 16;
803                 hash ^= hash >> 8;
804
805                 slaves[i] = hash % slave_count;
806         }
807 }
808
809 struct bwg_slave {
810         uint64_t bwg_left_int;
811         uint64_t bwg_left_remainder;
812         uint16_t slave;
813 };
814
815 void
816 bond_tlb_activate_slave(struct bond_dev_private *internals) {
817         int i;
818
819         for (i = 0; i < internals->active_slave_count; i++) {
820                 tlb_last_obytets[internals->active_slaves[i]] = 0;
821         }
822 }
823
824 static int
825 bandwidth_cmp(const void *a, const void *b)
826 {
827         const struct bwg_slave *bwg_a = a;
828         const struct bwg_slave *bwg_b = b;
829         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
830         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
831                         (int64_t)bwg_a->bwg_left_remainder;
832         if (diff > 0)
833                 return 1;
834         else if (diff < 0)
835                 return -1;
836         else if (diff2 > 0)
837                 return 1;
838         else if (diff2 < 0)
839                 return -1;
840         else
841                 return 0;
842 }
843
844 static void
845 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
846                 struct bwg_slave *bwg_slave)
847 {
848         struct rte_eth_link link_status;
849         int ret;
850
851         ret = rte_eth_link_get_nowait(port_id, &link_status);
852         if (ret < 0) {
853                 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
854                              port_id, rte_strerror(-ret));
855                 return;
856         }
857         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
858         if (link_bwg == 0)
859                 return;
860         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
861         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
862         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
863 }
864
865 static void
866 bond_ethdev_update_tlb_slave_cb(void *arg)
867 {
868         struct bond_dev_private *internals = arg;
869         struct rte_eth_stats slave_stats;
870         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
871         uint16_t slave_count;
872         uint64_t tx_bytes;
873
874         uint8_t update_stats = 0;
875         uint16_t slave_id;
876         uint16_t i;
877
878         internals->slave_update_idx++;
879
880
881         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
882                 update_stats = 1;
883
884         for (i = 0; i < internals->active_slave_count; i++) {
885                 slave_id = internals->active_slaves[i];
886                 rte_eth_stats_get(slave_id, &slave_stats);
887                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
888                 bandwidth_left(slave_id, tx_bytes,
889                                 internals->slave_update_idx, &bwg_array[i]);
890                 bwg_array[i].slave = slave_id;
891
892                 if (update_stats) {
893                         tlb_last_obytets[slave_id] = slave_stats.obytes;
894                 }
895         }
896
897         if (update_stats == 1)
898                 internals->slave_update_idx = 0;
899
900         slave_count = i;
901         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
902         for (i = 0; i < slave_count; i++)
903                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
904
905         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
906                         (struct bond_dev_private *)internals);
907 }
908
909 static uint16_t
910 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
911 {
912         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
913         struct bond_dev_private *internals = bd_tx_q->dev_private;
914
915         struct rte_eth_dev *primary_port =
916                         &rte_eth_devices[internals->primary_port];
917         uint16_t num_tx_total = 0;
918         uint16_t i, j;
919
920         uint16_t num_of_slaves = internals->active_slave_count;
921         uint16_t slaves[RTE_MAX_ETHPORTS];
922
923         struct rte_ether_hdr *ether_hdr;
924         struct rte_ether_addr primary_slave_addr;
925         struct rte_ether_addr active_slave_addr;
926
927         if (num_of_slaves < 1)
928                 return num_tx_total;
929
930         memcpy(slaves, internals->tlb_slaves_order,
931                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
932
933
934         rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
935
936         if (nb_pkts > 3) {
937                 for (i = 0; i < 3; i++)
938                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
939         }
940
941         for (i = 0; i < num_of_slaves; i++) {
942                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
943                 for (j = num_tx_total; j < nb_pkts; j++) {
944                         if (j + 3 < nb_pkts)
945                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
946
947                         ether_hdr = rte_pktmbuf_mtod(bufs[j],
948                                                 struct rte_ether_hdr *);
949                         if (rte_is_same_ether_addr(&ether_hdr->s_addr,
950                                                         &primary_slave_addr))
951                                 rte_ether_addr_copy(&active_slave_addr,
952                                                 &ether_hdr->s_addr);
953 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
954                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
955 #endif
956                 }
957
958                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
959                                 bufs + num_tx_total, nb_pkts - num_tx_total);
960
961                 if (num_tx_total == nb_pkts)
962                         break;
963         }
964
965         return num_tx_total;
966 }
967
968 void
969 bond_tlb_disable(struct bond_dev_private *internals)
970 {
971         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
972 }
973
974 void
975 bond_tlb_enable(struct bond_dev_private *internals)
976 {
977         bond_ethdev_update_tlb_slave_cb(internals);
978 }
979
980 static uint16_t
981 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
982 {
983         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
984         struct bond_dev_private *internals = bd_tx_q->dev_private;
985
986         struct rte_ether_hdr *eth_h;
987         uint16_t ether_type, offset;
988
989         struct client_data *client_info;
990
991         /*
992          * We create transmit buffers for every slave and one additional to send
993          * through tlb. In worst case every packet will be send on one port.
994          */
995         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
996         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
997
998         /*
999          * We create separate transmit buffers for update packets as they won't
1000          * be counted in num_tx_total.
1001          */
1002         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
1003         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
1004
1005         struct rte_mbuf *upd_pkt;
1006         size_t pkt_size;
1007
1008         uint16_t num_send, num_not_send = 0;
1009         uint16_t num_tx_total = 0;
1010         uint16_t slave_idx;
1011
1012         int i, j;
1013
1014         /* Search tx buffer for ARP packets and forward them to alb */
1015         for (i = 0; i < nb_pkts; i++) {
1016                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1017                 ether_type = eth_h->ether_type;
1018                 offset = get_vlan_offset(eth_h, &ether_type);
1019
1020                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1021                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1022
1023                         /* Change src mac in eth header */
1024                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
1025
1026                         /* Add packet to slave tx buffer */
1027                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1028                         slave_bufs_pkts[slave_idx]++;
1029                 } else {
1030                         /* If packet is not ARP, send it with TLB policy */
1031                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1032                                         bufs[i];
1033                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1034                 }
1035         }
1036
1037         /* Update connected client ARP tables */
1038         if (internals->mode6.ntt) {
1039                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1040                         client_info = &internals->mode6.client_table[i];
1041
1042                         if (client_info->in_use) {
1043                                 /* Allocate new packet to send ARP update on current slave */
1044                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1045                                 if (upd_pkt == NULL) {
1046                                         RTE_BOND_LOG(ERR,
1047                                                      "Failed to allocate ARP packet from pool");
1048                                         continue;
1049                                 }
1050                                 pkt_size = sizeof(struct rte_ether_hdr) +
1051                                         sizeof(struct rte_arp_hdr) +
1052                                         client_info->vlan_count *
1053                                         sizeof(struct rte_vlan_hdr);
1054                                 upd_pkt->data_len = pkt_size;
1055                                 upd_pkt->pkt_len = pkt_size;
1056
1057                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1058                                                 internals);
1059
1060                                 /* Add packet to update tx buffer */
1061                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1062                                 update_bufs_pkts[slave_idx]++;
1063                         }
1064                 }
1065                 internals->mode6.ntt = 0;
1066         }
1067
1068         /* Send ARP packets on proper slaves */
1069         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1070                 if (slave_bufs_pkts[i] > 0) {
1071                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1072                                         slave_bufs[i], slave_bufs_pkts[i]);
1073                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1074                                 bufs[nb_pkts - 1 - num_not_send - j] =
1075                                                 slave_bufs[i][nb_pkts - 1 - j];
1076                         }
1077
1078                         num_tx_total += num_send;
1079                         num_not_send += slave_bufs_pkts[i] - num_send;
1080
1081 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1082         /* Print TX stats including update packets */
1083                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
1084                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1085                                                         struct rte_ether_hdr *);
1086                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1087                         }
1088 #endif
1089                 }
1090         }
1091
1092         /* Send update packets on proper slaves */
1093         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1094                 if (update_bufs_pkts[i] > 0) {
1095                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1096                                         update_bufs_pkts[i]);
1097                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
1098                                 rte_pktmbuf_free(update_bufs[i][j]);
1099                         }
1100 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1101                         for (j = 0; j < update_bufs_pkts[i]; j++) {
1102                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1103                                                         struct rte_ether_hdr *);
1104                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1105                         }
1106 #endif
1107                 }
1108         }
1109
1110         /* Send non-ARP packets using tlb policy */
1111         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1112                 num_send = bond_ethdev_tx_burst_tlb(queue,
1113                                 slave_bufs[RTE_MAX_ETHPORTS],
1114                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1115
1116                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1117                         bufs[nb_pkts - 1 - num_not_send - j] =
1118                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1119                 }
1120
1121                 num_tx_total += num_send;
1122         }
1123
1124         return num_tx_total;
1125 }
1126
1127 static inline uint16_t
1128 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1129                  uint16_t *slave_port_ids, uint16_t slave_count)
1130 {
1131         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1132         struct bond_dev_private *internals = bd_tx_q->dev_private;
1133
1134         /* Array to sort mbufs for transmission on each slave into */
1135         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1136         /* Number of mbufs for transmission on each slave */
1137         uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1138         /* Mapping array generated by hash function to map mbufs to slaves */
1139         uint16_t bufs_slave_port_idxs[nb_bufs];
1140
1141         uint16_t slave_tx_count;
1142         uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1143
1144         uint16_t i;
1145
1146         /*
1147          * Populate slaves mbuf with the packets which are to be sent on it
1148          * selecting output slave using hash based on xmit policy
1149          */
1150         internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1151                         bufs_slave_port_idxs);
1152
1153         for (i = 0; i < nb_bufs; i++) {
1154                 /* Populate slave mbuf arrays with mbufs for that slave. */
1155                 uint16_t slave_idx = bufs_slave_port_idxs[i];
1156
1157                 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1158         }
1159
1160         /* Send packet burst on each slave device */
1161         for (i = 0; i < slave_count; i++) {
1162                 if (slave_nb_bufs[i] == 0)
1163                         continue;
1164
1165                 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1166                                 bd_tx_q->queue_id, slave_bufs[i],
1167                                 slave_nb_bufs[i]);
1168
1169                 total_tx_count += slave_tx_count;
1170
1171                 /* If tx burst fails move packets to end of bufs */
1172                 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1173                         int slave_tx_fail_count = slave_nb_bufs[i] -
1174                                         slave_tx_count;
1175                         total_tx_fail_count += slave_tx_fail_count;
1176                         memcpy(&bufs[nb_bufs - total_tx_fail_count],
1177                                &slave_bufs[i][slave_tx_count],
1178                                slave_tx_fail_count * sizeof(bufs[0]));
1179                 }
1180         }
1181
1182         return total_tx_count;
1183 }
1184
1185 static uint16_t
1186 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1187                 uint16_t nb_bufs)
1188 {
1189         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1190         struct bond_dev_private *internals = bd_tx_q->dev_private;
1191
1192         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1193         uint16_t slave_count;
1194
1195         if (unlikely(nb_bufs == 0))
1196                 return 0;
1197
1198         /* Copy slave list to protect against slave up/down changes during tx
1199          * bursting
1200          */
1201         slave_count = internals->active_slave_count;
1202         if (unlikely(slave_count < 1))
1203                 return 0;
1204
1205         memcpy(slave_port_ids, internals->active_slaves,
1206                         sizeof(slave_port_ids[0]) * slave_count);
1207         return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1208                                 slave_count);
1209 }
1210
1211 static inline uint16_t
1212 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1213                 bool dedicated_txq)
1214 {
1215         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1216         struct bond_dev_private *internals = bd_tx_q->dev_private;
1217
1218         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1219         uint16_t slave_count;
1220
1221         uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1222         uint16_t dist_slave_count;
1223
1224         uint16_t slave_tx_count;
1225
1226         uint16_t i;
1227
1228         /* Copy slave list to protect against slave up/down changes during tx
1229          * bursting */
1230         slave_count = internals->active_slave_count;
1231         if (unlikely(slave_count < 1))
1232                 return 0;
1233
1234         memcpy(slave_port_ids, internals->active_slaves,
1235                         sizeof(slave_port_ids[0]) * slave_count);
1236
1237         if (dedicated_txq)
1238                 goto skip_tx_ring;
1239
1240         /* Check for LACP control packets and send if available */
1241         for (i = 0; i < slave_count; i++) {
1242                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1243                 struct rte_mbuf *ctrl_pkt = NULL;
1244
1245                 if (likely(rte_ring_empty(port->tx_ring)))
1246                         continue;
1247
1248                 if (rte_ring_dequeue(port->tx_ring,
1249                                      (void **)&ctrl_pkt) != -ENOENT) {
1250                         slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1251                                         bd_tx_q->queue_id, &ctrl_pkt, 1);
1252                         /*
1253                          * re-enqueue LAG control plane packets to buffering
1254                          * ring if transmission fails so the packet isn't lost.
1255                          */
1256                         if (slave_tx_count != 1)
1257                                 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1258                 }
1259         }
1260
1261 skip_tx_ring:
1262         if (unlikely(nb_bufs == 0))
1263                 return 0;
1264
1265         dist_slave_count = 0;
1266         for (i = 0; i < slave_count; i++) {
1267                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1268
1269                 if (ACTOR_STATE(port, DISTRIBUTING))
1270                         dist_slave_port_ids[dist_slave_count++] =
1271                                         slave_port_ids[i];
1272         }
1273
1274         if (unlikely(dist_slave_count < 1))
1275                 return 0;
1276
1277         return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1278                                 dist_slave_count);
1279 }
1280
1281 static uint16_t
1282 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1283                 uint16_t nb_bufs)
1284 {
1285         return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1286 }
1287
1288 static uint16_t
1289 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1290                 uint16_t nb_bufs)
1291 {
1292         return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1293 }
1294
1295 static uint16_t
1296 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1297                 uint16_t nb_pkts)
1298 {
1299         struct bond_dev_private *internals;
1300         struct bond_tx_queue *bd_tx_q;
1301
1302         uint16_t slaves[RTE_MAX_ETHPORTS];
1303         uint8_t tx_failed_flag = 0;
1304         uint16_t num_of_slaves;
1305
1306         uint16_t max_nb_of_tx_pkts = 0;
1307
1308         int slave_tx_total[RTE_MAX_ETHPORTS];
1309         int i, most_successful_tx_slave = -1;
1310
1311         bd_tx_q = (struct bond_tx_queue *)queue;
1312         internals = bd_tx_q->dev_private;
1313
1314         /* Copy slave list to protect against slave up/down changes during tx
1315          * bursting */
1316         num_of_slaves = internals->active_slave_count;
1317         memcpy(slaves, internals->active_slaves,
1318                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1319
1320         if (num_of_slaves < 1)
1321                 return 0;
1322
1323         /* Increment reference count on mbufs */
1324         for (i = 0; i < nb_pkts; i++)
1325                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1326
1327         /* Transmit burst on each active slave */
1328         for (i = 0; i < num_of_slaves; i++) {
1329                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1330                                         bufs, nb_pkts);
1331
1332                 if (unlikely(slave_tx_total[i] < nb_pkts))
1333                         tx_failed_flag = 1;
1334
1335                 /* record the value and slave index for the slave which transmits the
1336                  * maximum number of packets */
1337                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1338                         max_nb_of_tx_pkts = slave_tx_total[i];
1339                         most_successful_tx_slave = i;
1340                 }
1341         }
1342
1343         /* if slaves fail to transmit packets from burst, the calling application
1344          * is not expected to know about multiple references to packets so we must
1345          * handle failures of all packets except those of the most successful slave
1346          */
1347         if (unlikely(tx_failed_flag))
1348                 for (i = 0; i < num_of_slaves; i++)
1349                         if (i != most_successful_tx_slave)
1350                                 while (slave_tx_total[i] < nb_pkts)
1351                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1352
1353         return max_nb_of_tx_pkts;
1354 }
1355
1356 static void
1357 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1358 {
1359         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1360
1361         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1362                 /**
1363                  * If in mode 4 then save the link properties of the first
1364                  * slave, all subsequent slaves must match these properties
1365                  */
1366                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1367
1368                 bond_link->link_autoneg = slave_link->link_autoneg;
1369                 bond_link->link_duplex = slave_link->link_duplex;
1370                 bond_link->link_speed = slave_link->link_speed;
1371         } else {
1372                 /**
1373                  * In any other mode the link properties are set to default
1374                  * values of AUTONEG/DUPLEX
1375                  */
1376                 ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
1377                 ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1378         }
1379 }
1380
1381 static int
1382 link_properties_valid(struct rte_eth_dev *ethdev,
1383                 struct rte_eth_link *slave_link)
1384 {
1385         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1386
1387         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1388                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1389
1390                 if (bond_link->link_duplex != slave_link->link_duplex ||
1391                         bond_link->link_autoneg != slave_link->link_autoneg ||
1392                         bond_link->link_speed != slave_link->link_speed)
1393                         return -1;
1394         }
1395
1396         return 0;
1397 }
1398
1399 int
1400 mac_address_get(struct rte_eth_dev *eth_dev,
1401                 struct rte_ether_addr *dst_mac_addr)
1402 {
1403         struct rte_ether_addr *mac_addr;
1404
1405         if (eth_dev == NULL) {
1406                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1407                 return -1;
1408         }
1409
1410         if (dst_mac_addr == NULL) {
1411                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1412                 return -1;
1413         }
1414
1415         mac_addr = eth_dev->data->mac_addrs;
1416
1417         rte_ether_addr_copy(mac_addr, dst_mac_addr);
1418         return 0;
1419 }
1420
1421 int
1422 mac_address_set(struct rte_eth_dev *eth_dev,
1423                 struct rte_ether_addr *new_mac_addr)
1424 {
1425         struct rte_ether_addr *mac_addr;
1426
1427         if (eth_dev == NULL) {
1428                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1429                 return -1;
1430         }
1431
1432         if (new_mac_addr == NULL) {
1433                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1434                 return -1;
1435         }
1436
1437         mac_addr = eth_dev->data->mac_addrs;
1438
1439         /* If new MAC is different to current MAC then update */
1440         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1441                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1442
1443         return 0;
1444 }
1445
1446 static const struct rte_ether_addr null_mac_addr;
1447
1448 /*
1449  * Add additional MAC addresses to the slave
1450  */
1451 int
1452 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1453                 uint16_t slave_port_id)
1454 {
1455         int i, ret;
1456         struct rte_ether_addr *mac_addr;
1457
1458         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1459                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1460                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1461                         break;
1462
1463                 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1464                 if (ret < 0) {
1465                         /* rollback */
1466                         for (i--; i > 0; i--)
1467                                 rte_eth_dev_mac_addr_remove(slave_port_id,
1468                                         &bonded_eth_dev->data->mac_addrs[i]);
1469                         return ret;
1470                 }
1471         }
1472
1473         return 0;
1474 }
1475
1476 /*
1477  * Remove additional MAC addresses from the slave
1478  */
1479 int
1480 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1481                 uint16_t slave_port_id)
1482 {
1483         int i, rc, ret;
1484         struct rte_ether_addr *mac_addr;
1485
1486         rc = 0;
1487         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1488                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1489                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1490                         break;
1491
1492                 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1493                 /* save only the first error */
1494                 if (ret < 0 && rc == 0)
1495                         rc = ret;
1496         }
1497
1498         return rc;
1499 }
1500
1501 int
1502 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1503 {
1504         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1505         int i;
1506
1507         /* Update slave devices MAC addresses */
1508         if (internals->slave_count < 1)
1509                 return -1;
1510
1511         switch (internals->mode) {
1512         case BONDING_MODE_ROUND_ROBIN:
1513         case BONDING_MODE_BALANCE:
1514         case BONDING_MODE_BROADCAST:
1515                 for (i = 0; i < internals->slave_count; i++) {
1516                         if (rte_eth_dev_default_mac_addr_set(
1517                                         internals->slaves[i].port_id,
1518                                         bonded_eth_dev->data->mac_addrs)) {
1519                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1520                                                 internals->slaves[i].port_id);
1521                                 return -1;
1522                         }
1523                 }
1524                 break;
1525         case BONDING_MODE_8023AD:
1526                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1527                 break;
1528         case BONDING_MODE_ACTIVE_BACKUP:
1529         case BONDING_MODE_TLB:
1530         case BONDING_MODE_ALB:
1531         default:
1532                 for (i = 0; i < internals->slave_count; i++) {
1533                         if (internals->slaves[i].port_id ==
1534                                         internals->current_primary_port) {
1535                                 if (rte_eth_dev_default_mac_addr_set(
1536                                                 internals->current_primary_port,
1537                                                 bonded_eth_dev->data->mac_addrs)) {
1538                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1539                                                         internals->current_primary_port);
1540                                         return -1;
1541                                 }
1542                         } else {
1543                                 if (rte_eth_dev_default_mac_addr_set(
1544                                                 internals->slaves[i].port_id,
1545                                                 &internals->slaves[i].persisted_mac_addr)) {
1546                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1547                                                         internals->slaves[i].port_id);
1548                                         return -1;
1549                                 }
1550                         }
1551                 }
1552         }
1553
1554         return 0;
1555 }
1556
1557 int
1558 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1559 {
1560         struct bond_dev_private *internals;
1561
1562         internals = eth_dev->data->dev_private;
1563
1564         switch (mode) {
1565         case BONDING_MODE_ROUND_ROBIN:
1566                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1567                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1568                 break;
1569         case BONDING_MODE_ACTIVE_BACKUP:
1570                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1571                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1572                 break;
1573         case BONDING_MODE_BALANCE:
1574                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1575                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1576                 break;
1577         case BONDING_MODE_BROADCAST:
1578                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1579                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1580                 break;
1581         case BONDING_MODE_8023AD:
1582                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1583                         return -1;
1584
1585                 if (internals->mode4.dedicated_queues.enabled == 0) {
1586                         eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1587                         eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1588                         RTE_BOND_LOG(WARNING,
1589                                 "Using mode 4, it is necessary to do TX burst "
1590                                 "and RX burst at least every 100ms.");
1591                 } else {
1592                         /* Use flow director's optimization */
1593                         eth_dev->rx_pkt_burst =
1594                                         bond_ethdev_rx_burst_8023ad_fast_queue;
1595                         eth_dev->tx_pkt_burst =
1596                                         bond_ethdev_tx_burst_8023ad_fast_queue;
1597                 }
1598                 break;
1599         case BONDING_MODE_TLB:
1600                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1601                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1602                 break;
1603         case BONDING_MODE_ALB:
1604                 if (bond_mode_alb_enable(eth_dev) != 0)
1605                         return -1;
1606
1607                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1608                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1609                 break;
1610         default:
1611                 return -1;
1612         }
1613
1614         internals->mode = mode;
1615
1616         return 0;
1617 }
1618
1619
1620 static int
1621 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1622                 struct rte_eth_dev *slave_eth_dev)
1623 {
1624         int errval = 0;
1625         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1626         struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1627
1628         if (port->slow_pool == NULL) {
1629                 char mem_name[256];
1630                 int slave_id = slave_eth_dev->data->port_id;
1631
1632                 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1633                                 slave_id);
1634                 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1635                         250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1636                         slave_eth_dev->data->numa_node);
1637
1638                 /* Any memory allocation failure in initialization is critical because
1639                  * resources can't be free, so reinitialization is impossible. */
1640                 if (port->slow_pool == NULL) {
1641                         rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1642                                 slave_id, mem_name, rte_strerror(rte_errno));
1643                 }
1644         }
1645
1646         if (internals->mode4.dedicated_queues.enabled == 1) {
1647                 /* Configure slow Rx queue */
1648
1649                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1650                                 internals->mode4.dedicated_queues.rx_qid, 128,
1651                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1652                                 NULL, port->slow_pool);
1653                 if (errval != 0) {
1654                         RTE_BOND_LOG(ERR,
1655                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1656                                         slave_eth_dev->data->port_id,
1657                                         internals->mode4.dedicated_queues.rx_qid,
1658                                         errval);
1659                         return errval;
1660                 }
1661
1662                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1663                                 internals->mode4.dedicated_queues.tx_qid, 512,
1664                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1665                                 NULL);
1666                 if (errval != 0) {
1667                         RTE_BOND_LOG(ERR,
1668                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1669                                 slave_eth_dev->data->port_id,
1670                                 internals->mode4.dedicated_queues.tx_qid,
1671                                 errval);
1672                         return errval;
1673                 }
1674         }
1675         return 0;
1676 }
1677
1678 int
1679 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1680                 struct rte_eth_dev *slave_eth_dev)
1681 {
1682         struct bond_rx_queue *bd_rx_q;
1683         struct bond_tx_queue *bd_tx_q;
1684         uint16_t nb_rx_queues;
1685         uint16_t nb_tx_queues;
1686
1687         int errval;
1688         uint16_t q_id;
1689         struct rte_flow_error flow_error;
1690
1691         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1692
1693         /* Stop slave */
1694         rte_eth_dev_stop(slave_eth_dev->data->port_id);
1695
1696         /* Enable interrupts on slave device if supported */
1697         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1698                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1699
1700         /* If RSS is enabled for bonding, try to enable it for slaves  */
1701         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1702                 if (internals->rss_key_len != 0) {
1703                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1704                                         internals->rss_key_len;
1705                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1706                                         internals->rss_key;
1707                 } else {
1708                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1709                 }
1710
1711                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1712                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1713                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1714                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1715         }
1716
1717         if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
1718                         DEV_RX_OFFLOAD_VLAN_FILTER)
1719                 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1720                                 DEV_RX_OFFLOAD_VLAN_FILTER;
1721         else
1722                 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1723                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
1724
1725         nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1726         nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1727
1728         if (internals->mode == BONDING_MODE_8023AD) {
1729                 if (internals->mode4.dedicated_queues.enabled == 1) {
1730                         nb_rx_queues++;
1731                         nb_tx_queues++;
1732                 }
1733         }
1734
1735         errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1736                                      bonded_eth_dev->data->mtu);
1737         if (errval != 0 && errval != -ENOTSUP) {
1738                 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1739                                 slave_eth_dev->data->port_id, errval);
1740                 return errval;
1741         }
1742
1743         /* Configure device */
1744         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1745                         nb_rx_queues, nb_tx_queues,
1746                         &(slave_eth_dev->data->dev_conf));
1747         if (errval != 0) {
1748                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1749                                 slave_eth_dev->data->port_id, errval);
1750                 return errval;
1751         }
1752
1753         /* Setup Rx Queues */
1754         for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1755                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1756
1757                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1758                                 bd_rx_q->nb_rx_desc,
1759                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1760                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1761                 if (errval != 0) {
1762                         RTE_BOND_LOG(ERR,
1763                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1764                                         slave_eth_dev->data->port_id, q_id, errval);
1765                         return errval;
1766                 }
1767         }
1768
1769         /* Setup Tx Queues */
1770         for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1771                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1772
1773                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1774                                 bd_tx_q->nb_tx_desc,
1775                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1776                                 &bd_tx_q->tx_conf);
1777                 if (errval != 0) {
1778                         RTE_BOND_LOG(ERR,
1779                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1780                                 slave_eth_dev->data->port_id, q_id, errval);
1781                         return errval;
1782                 }
1783         }
1784
1785         if (internals->mode == BONDING_MODE_8023AD &&
1786                         internals->mode4.dedicated_queues.enabled == 1) {
1787                 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1788                                 != 0)
1789                         return errval;
1790
1791                 if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1792                                 slave_eth_dev->data->port_id) != 0) {
1793                         RTE_BOND_LOG(ERR,
1794                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1795                                 slave_eth_dev->data->port_id, q_id, errval);
1796                         return -1;
1797                 }
1798
1799                 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1800                         rte_flow_destroy(slave_eth_dev->data->port_id,
1801                                         internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1802                                         &flow_error);
1803
1804                 bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1805                                 slave_eth_dev->data->port_id);
1806         }
1807
1808         /* Start device */
1809         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1810         if (errval != 0) {
1811                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1812                                 slave_eth_dev->data->port_id, errval);
1813                 return -1;
1814         }
1815
1816         /* If RSS is enabled for bonding, synchronize RETA */
1817         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1818                 int i;
1819                 struct bond_dev_private *internals;
1820
1821                 internals = bonded_eth_dev->data->dev_private;
1822
1823                 for (i = 0; i < internals->slave_count; i++) {
1824                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1825                                 errval = rte_eth_dev_rss_reta_update(
1826                                                 slave_eth_dev->data->port_id,
1827                                                 &internals->reta_conf[0],
1828                                                 internals->slaves[i].reta_size);
1829                                 if (errval != 0) {
1830                                         RTE_BOND_LOG(WARNING,
1831                                                      "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1832                                                      " RSS Configuration for bonding may be inconsistent.",
1833                                                      slave_eth_dev->data->port_id, errval);
1834                                 }
1835                                 break;
1836                         }
1837                 }
1838         }
1839
1840         /* If lsc interrupt is set, check initial slave's link status */
1841         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1842                 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1843                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1844                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1845                         NULL);
1846         }
1847
1848         return 0;
1849 }
1850
1851 void
1852 slave_remove(struct bond_dev_private *internals,
1853                 struct rte_eth_dev *slave_eth_dev)
1854 {
1855         uint16_t i;
1856
1857         for (i = 0; i < internals->slave_count; i++)
1858                 if (internals->slaves[i].port_id ==
1859                                 slave_eth_dev->data->port_id)
1860                         break;
1861
1862         if (i < (internals->slave_count - 1)) {
1863                 struct rte_flow *flow;
1864
1865                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1866                                 sizeof(internals->slaves[0]) *
1867                                 (internals->slave_count - i - 1));
1868                 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1869                         memmove(&flow->flows[i], &flow->flows[i + 1],
1870                                 sizeof(flow->flows[0]) *
1871                                 (internals->slave_count - i - 1));
1872                         flow->flows[internals->slave_count - 1] = NULL;
1873                 }
1874         }
1875
1876         internals->slave_count--;
1877
1878         /* force reconfiguration of slave interfaces */
1879         _rte_eth_dev_reset(slave_eth_dev);
1880 }
1881
1882 static void
1883 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1884
1885 void
1886 slave_add(struct bond_dev_private *internals,
1887                 struct rte_eth_dev *slave_eth_dev)
1888 {
1889         struct bond_slave_details *slave_details =
1890                         &internals->slaves[internals->slave_count];
1891
1892         slave_details->port_id = slave_eth_dev->data->port_id;
1893         slave_details->last_link_status = 0;
1894
1895         /* Mark slave devices that don't support interrupts so we can
1896          * compensate when we start the bond
1897          */
1898         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1899                 slave_details->link_status_poll_enabled = 1;
1900         }
1901
1902         slave_details->link_status_wait_to_complete = 0;
1903         /* clean tlb_last_obytes when adding port for bonding device */
1904         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1905                         sizeof(struct rte_ether_addr));
1906 }
1907
1908 void
1909 bond_ethdev_primary_set(struct bond_dev_private *internals,
1910                 uint16_t slave_port_id)
1911 {
1912         int i;
1913
1914         if (internals->active_slave_count < 1)
1915                 internals->current_primary_port = slave_port_id;
1916         else
1917                 /* Search bonded device slave ports for new proposed primary port */
1918                 for (i = 0; i < internals->active_slave_count; i++) {
1919                         if (internals->active_slaves[i] == slave_port_id)
1920                                 internals->current_primary_port = slave_port_id;
1921                 }
1922 }
1923
1924 static int
1925 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1926
1927 static int
1928 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1929 {
1930         struct bond_dev_private *internals;
1931         int i;
1932
1933         /* slave eth dev will be started by bonded device */
1934         if (check_for_bonded_ethdev(eth_dev)) {
1935                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1936                                 eth_dev->data->port_id);
1937                 return -1;
1938         }
1939
1940         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1941         eth_dev->data->dev_started = 1;
1942
1943         internals = eth_dev->data->dev_private;
1944
1945         if (internals->slave_count == 0) {
1946                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1947                 goto out_err;
1948         }
1949
1950         if (internals->user_defined_mac == 0) {
1951                 struct rte_ether_addr *new_mac_addr = NULL;
1952
1953                 for (i = 0; i < internals->slave_count; i++)
1954                         if (internals->slaves[i].port_id == internals->primary_port)
1955                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1956
1957                 if (new_mac_addr == NULL)
1958                         goto out_err;
1959
1960                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1961                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1962                                         eth_dev->data->port_id);
1963                         goto out_err;
1964                 }
1965         }
1966
1967         if (internals->mode == BONDING_MODE_8023AD) {
1968                 if (internals->mode4.dedicated_queues.enabled == 1) {
1969                         internals->mode4.dedicated_queues.rx_qid =
1970                                         eth_dev->data->nb_rx_queues;
1971                         internals->mode4.dedicated_queues.tx_qid =
1972                                         eth_dev->data->nb_tx_queues;
1973                 }
1974         }
1975
1976
1977         /* Reconfigure each slave device if starting bonded device */
1978         for (i = 0; i < internals->slave_count; i++) {
1979                 struct rte_eth_dev *slave_ethdev =
1980                                 &(rte_eth_devices[internals->slaves[i].port_id]);
1981                 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1982                         RTE_BOND_LOG(ERR,
1983                                 "bonded port (%d) failed to reconfigure slave device (%d)",
1984                                 eth_dev->data->port_id,
1985                                 internals->slaves[i].port_id);
1986                         goto out_err;
1987                 }
1988                 /* We will need to poll for link status if any slave doesn't
1989                  * support interrupts
1990                  */
1991                 if (internals->slaves[i].link_status_poll_enabled)
1992                         internals->link_status_polling_enabled = 1;
1993         }
1994
1995         /* start polling if needed */
1996         if (internals->link_status_polling_enabled) {
1997                 rte_eal_alarm_set(
1998                         internals->link_status_polling_interval_ms * 1000,
1999                         bond_ethdev_slave_link_status_change_monitor,
2000                         (void *)&rte_eth_devices[internals->port_id]);
2001         }
2002
2003         /* Update all slave devices MACs*/
2004         if (mac_address_slaves_update(eth_dev) != 0)
2005                 goto out_err;
2006
2007         if (internals->user_defined_primary_port)
2008                 bond_ethdev_primary_set(internals, internals->primary_port);
2009
2010         if (internals->mode == BONDING_MODE_8023AD)
2011                 bond_mode_8023ad_start(eth_dev);
2012
2013         if (internals->mode == BONDING_MODE_TLB ||
2014                         internals->mode == BONDING_MODE_ALB)
2015                 bond_tlb_enable(internals);
2016
2017         return 0;
2018
2019 out_err:
2020         eth_dev->data->dev_started = 0;
2021         return -1;
2022 }
2023
2024 static void
2025 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2026 {
2027         uint16_t i;
2028
2029         if (dev->data->rx_queues != NULL) {
2030                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2031                         rte_free(dev->data->rx_queues[i]);
2032                         dev->data->rx_queues[i] = NULL;
2033                 }
2034                 dev->data->nb_rx_queues = 0;
2035         }
2036
2037         if (dev->data->tx_queues != NULL) {
2038                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2039                         rte_free(dev->data->tx_queues[i]);
2040                         dev->data->tx_queues[i] = NULL;
2041                 }
2042                 dev->data->nb_tx_queues = 0;
2043         }
2044 }
2045
2046 void
2047 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2048 {
2049         struct bond_dev_private *internals = eth_dev->data->dev_private;
2050         uint16_t i;
2051
2052         if (internals->mode == BONDING_MODE_8023AD) {
2053                 struct port *port;
2054                 void *pkt = NULL;
2055
2056                 bond_mode_8023ad_stop(eth_dev);
2057
2058                 /* Discard all messages to/from mode 4 state machines */
2059                 for (i = 0; i < internals->active_slave_count; i++) {
2060                         port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2061
2062                         RTE_ASSERT(port->rx_ring != NULL);
2063                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2064                                 rte_pktmbuf_free(pkt);
2065
2066                         RTE_ASSERT(port->tx_ring != NULL);
2067                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2068                                 rte_pktmbuf_free(pkt);
2069                 }
2070         }
2071
2072         if (internals->mode == BONDING_MODE_TLB ||
2073                         internals->mode == BONDING_MODE_ALB) {
2074                 bond_tlb_disable(internals);
2075                 for (i = 0; i < internals->active_slave_count; i++)
2076                         tlb_last_obytets[internals->active_slaves[i]] = 0;
2077         }
2078
2079         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2080         eth_dev->data->dev_started = 0;
2081
2082         internals->link_status_polling_enabled = 0;
2083         for (i = 0; i < internals->slave_count; i++) {
2084                 uint16_t slave_id = internals->slaves[i].port_id;
2085                 if (find_slave_by_id(internals->active_slaves,
2086                                 internals->active_slave_count, slave_id) !=
2087                                                 internals->active_slave_count) {
2088                         internals->slaves[i].last_link_status = 0;
2089                         rte_eth_dev_stop(slave_id);
2090                         deactivate_slave(eth_dev, slave_id);
2091                 }
2092         }
2093 }
2094
2095 void
2096 bond_ethdev_close(struct rte_eth_dev *dev)
2097 {
2098         struct bond_dev_private *internals = dev->data->dev_private;
2099         uint16_t bond_port_id = internals->port_id;
2100         int skipped = 0;
2101         struct rte_flow_error ferror;
2102
2103         RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2104         while (internals->slave_count != skipped) {
2105                 uint16_t port_id = internals->slaves[skipped].port_id;
2106
2107                 rte_eth_dev_stop(port_id);
2108
2109                 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2110                         RTE_BOND_LOG(ERR,
2111                                      "Failed to remove port %d from bonded device %s",
2112                                      port_id, dev->device->name);
2113                         skipped++;
2114                 }
2115         }
2116         bond_flow_ops.flush(dev, &ferror);
2117         bond_ethdev_free_queues(dev);
2118         rte_bitmap_reset(internals->vlan_filter_bmp);
2119 }
2120
2121 /* forward declaration */
2122 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2123
2124 static int
2125 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2126 {
2127         struct bond_dev_private *internals = dev->data->dev_private;
2128         struct bond_slave_details slave;
2129         int ret;
2130
2131         uint16_t max_nb_rx_queues = UINT16_MAX;
2132         uint16_t max_nb_tx_queues = UINT16_MAX;
2133         uint16_t max_rx_desc_lim = UINT16_MAX;
2134         uint16_t max_tx_desc_lim = UINT16_MAX;
2135
2136         dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2137
2138         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2139                         internals->candidate_max_rx_pktlen :
2140                         RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2141
2142         /* Max number of tx/rx queues that the bonded device can support is the
2143          * minimum values of the bonded slaves, as all slaves must be capable
2144          * of supporting the same number of tx/rx queues.
2145          */
2146         if (internals->slave_count > 0) {
2147                 struct rte_eth_dev_info slave_info;
2148                 uint16_t idx;
2149
2150                 for (idx = 0; idx < internals->slave_count; idx++) {
2151                         slave = internals->slaves[idx];
2152                         ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2153                         if (ret != 0) {
2154                                 RTE_BOND_LOG(ERR,
2155                                         "%s: Error during getting device (port %u) info: %s\n",
2156                                         __func__,
2157                                         slave.port_id,
2158                                         strerror(-ret));
2159
2160                                 return ret;
2161                         }
2162
2163                         if (slave_info.max_rx_queues < max_nb_rx_queues)
2164                                 max_nb_rx_queues = slave_info.max_rx_queues;
2165
2166                         if (slave_info.max_tx_queues < max_nb_tx_queues)
2167                                 max_nb_tx_queues = slave_info.max_tx_queues;
2168
2169                         if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2170                                 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2171
2172                         if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2173                                 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2174                 }
2175         }
2176
2177         dev_info->max_rx_queues = max_nb_rx_queues;
2178         dev_info->max_tx_queues = max_nb_tx_queues;
2179
2180         memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2181                sizeof(dev_info->default_rxconf));
2182         memcpy(&dev_info->default_txconf, &internals->default_txconf,
2183                sizeof(dev_info->default_txconf));
2184
2185         dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2186         dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2187
2188         /**
2189          * If dedicated hw queues enabled for link bonding device in LACP mode
2190          * then we need to reduce the maximum number of data path queues by 1.
2191          */
2192         if (internals->mode == BONDING_MODE_8023AD &&
2193                 internals->mode4.dedicated_queues.enabled == 1) {
2194                 dev_info->max_rx_queues--;
2195                 dev_info->max_tx_queues--;
2196         }
2197
2198         dev_info->min_rx_bufsize = 0;
2199
2200         dev_info->rx_offload_capa = internals->rx_offload_capa;
2201         dev_info->tx_offload_capa = internals->tx_offload_capa;
2202         dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2203         dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2204         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2205
2206         dev_info->reta_size = internals->reta_size;
2207
2208         return 0;
2209 }
2210
2211 static int
2212 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2213 {
2214         int res;
2215         uint16_t i;
2216         struct bond_dev_private *internals = dev->data->dev_private;
2217
2218         /* don't do this while a slave is being added */
2219         rte_spinlock_lock(&internals->lock);
2220
2221         if (on)
2222                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2223         else
2224                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2225
2226         for (i = 0; i < internals->slave_count; i++) {
2227                 uint16_t port_id = internals->slaves[i].port_id;
2228
2229                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2230                 if (res == ENOTSUP)
2231                         RTE_BOND_LOG(WARNING,
2232                                      "Setting VLAN filter on slave port %u not supported.",
2233                                      port_id);
2234         }
2235
2236         rte_spinlock_unlock(&internals->lock);
2237         return 0;
2238 }
2239
2240 static int
2241 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2242                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2243                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2244 {
2245         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2246                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2247                                         0, dev->data->numa_node);
2248         if (bd_rx_q == NULL)
2249                 return -1;
2250
2251         bd_rx_q->queue_id = rx_queue_id;
2252         bd_rx_q->dev_private = dev->data->dev_private;
2253
2254         bd_rx_q->nb_rx_desc = nb_rx_desc;
2255
2256         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2257         bd_rx_q->mb_pool = mb_pool;
2258
2259         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2260
2261         return 0;
2262 }
2263
2264 static int
2265 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2266                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2267                 const struct rte_eth_txconf *tx_conf)
2268 {
2269         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
2270                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2271                                         0, dev->data->numa_node);
2272
2273         if (bd_tx_q == NULL)
2274                 return -1;
2275
2276         bd_tx_q->queue_id = tx_queue_id;
2277         bd_tx_q->dev_private = dev->data->dev_private;
2278
2279         bd_tx_q->nb_tx_desc = nb_tx_desc;
2280         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2281
2282         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2283
2284         return 0;
2285 }
2286
2287 static void
2288 bond_ethdev_rx_queue_release(void *queue)
2289 {
2290         if (queue == NULL)
2291                 return;
2292
2293         rte_free(queue);
2294 }
2295
2296 static void
2297 bond_ethdev_tx_queue_release(void *queue)
2298 {
2299         if (queue == NULL)
2300                 return;
2301
2302         rte_free(queue);
2303 }
2304
2305 static void
2306 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2307 {
2308         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2309         struct bond_dev_private *internals;
2310
2311         /* Default value for polling slave found is true as we don't want to
2312          * disable the polling thread if we cannot get the lock */
2313         int i, polling_slave_found = 1;
2314
2315         if (cb_arg == NULL)
2316                 return;
2317
2318         bonded_ethdev = cb_arg;
2319         internals = bonded_ethdev->data->dev_private;
2320
2321         if (!bonded_ethdev->data->dev_started ||
2322                 !internals->link_status_polling_enabled)
2323                 return;
2324
2325         /* If device is currently being configured then don't check slaves link
2326          * status, wait until next period */
2327         if (rte_spinlock_trylock(&internals->lock)) {
2328                 if (internals->slave_count > 0)
2329                         polling_slave_found = 0;
2330
2331                 for (i = 0; i < internals->slave_count; i++) {
2332                         if (!internals->slaves[i].link_status_poll_enabled)
2333                                 continue;
2334
2335                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2336                         polling_slave_found = 1;
2337
2338                         /* Update slave link status */
2339                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2340                                         internals->slaves[i].link_status_wait_to_complete);
2341
2342                         /* if link status has changed since last checked then call lsc
2343                          * event callback */
2344                         if (slave_ethdev->data->dev_link.link_status !=
2345                                         internals->slaves[i].last_link_status) {
2346                                 internals->slaves[i].last_link_status =
2347                                                 slave_ethdev->data->dev_link.link_status;
2348
2349                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2350                                                 RTE_ETH_EVENT_INTR_LSC,
2351                                                 &bonded_ethdev->data->port_id,
2352                                                 NULL);
2353                         }
2354                 }
2355                 rte_spinlock_unlock(&internals->lock);
2356         }
2357
2358         if (polling_slave_found)
2359                 /* Set alarm to continue monitoring link status of slave ethdev's */
2360                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2361                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2362 }
2363
2364 static int
2365 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2366 {
2367         int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2368
2369         struct bond_dev_private *bond_ctx;
2370         struct rte_eth_link slave_link;
2371
2372         bool one_link_update_succeeded;
2373         uint32_t idx;
2374         int ret;
2375
2376         bond_ctx = ethdev->data->dev_private;
2377
2378         ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2379
2380         if (ethdev->data->dev_started == 0 ||
2381                         bond_ctx->active_slave_count == 0) {
2382                 ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
2383                 return 0;
2384         }
2385
2386         ethdev->data->dev_link.link_status = ETH_LINK_UP;
2387
2388         if (wait_to_complete)
2389                 link_update = rte_eth_link_get;
2390         else
2391                 link_update = rte_eth_link_get_nowait;
2392
2393         switch (bond_ctx->mode) {
2394         case BONDING_MODE_BROADCAST:
2395                 /**
2396                  * Setting link speed to UINT32_MAX to ensure we pick up the
2397                  * value of the first active slave
2398                  */
2399                 ethdev->data->dev_link.link_speed = UINT32_MAX;
2400
2401                 /**
2402                  * link speed is minimum value of all the slaves link speed as
2403                  * packet loss will occur on this slave if transmission at rates
2404                  * greater than this are attempted
2405                  */
2406                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2407                         ret = link_update(bond_ctx->active_slaves[idx],
2408                                           &slave_link);
2409                         if (ret < 0) {
2410                                 ethdev->data->dev_link.link_speed =
2411                                         ETH_SPEED_NUM_NONE;
2412                                 RTE_BOND_LOG(ERR,
2413                                         "Slave (port %u) link get failed: %s",
2414                                         bond_ctx->active_slaves[idx],
2415                                         rte_strerror(-ret));
2416                                 return 0;
2417                         }
2418
2419                         if (slave_link.link_speed <
2420                                         ethdev->data->dev_link.link_speed)
2421                                 ethdev->data->dev_link.link_speed =
2422                                                 slave_link.link_speed;
2423                 }
2424                 break;
2425         case BONDING_MODE_ACTIVE_BACKUP:
2426                 /* Current primary slave */
2427                 ret = link_update(bond_ctx->current_primary_port, &slave_link);
2428                 if (ret < 0) {
2429                         RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
2430                                 bond_ctx->current_primary_port,
2431                                 rte_strerror(-ret));
2432                         return 0;
2433                 }
2434
2435                 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2436                 break;
2437         case BONDING_MODE_8023AD:
2438                 ethdev->data->dev_link.link_autoneg =
2439                                 bond_ctx->mode4.slave_link.link_autoneg;
2440                 ethdev->data->dev_link.link_duplex =
2441                                 bond_ctx->mode4.slave_link.link_duplex;
2442                 /* fall through */
2443                 /* to update link speed */
2444         case BONDING_MODE_ROUND_ROBIN:
2445         case BONDING_MODE_BALANCE:
2446         case BONDING_MODE_TLB:
2447         case BONDING_MODE_ALB:
2448         default:
2449                 /**
2450                  * In theses mode the maximum theoretical link speed is the sum
2451                  * of all the slaves
2452                  */
2453                 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2454                 one_link_update_succeeded = false;
2455
2456                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2457                         ret = link_update(bond_ctx->active_slaves[idx],
2458                                         &slave_link);
2459                         if (ret < 0) {
2460                                 RTE_BOND_LOG(ERR,
2461                                         "Slave (port %u) link get failed: %s",
2462                                         bond_ctx->active_slaves[idx],
2463                                         rte_strerror(-ret));
2464                                 continue;
2465                         }
2466
2467                         one_link_update_succeeded = true;
2468                         ethdev->data->dev_link.link_speed +=
2469                                         slave_link.link_speed;
2470                 }
2471
2472                 if (!one_link_update_succeeded) {
2473                         RTE_BOND_LOG(ERR, "All slaves link get failed");
2474                         return 0;
2475                 }
2476         }
2477
2478
2479         return 0;
2480 }
2481
2482
2483 static int
2484 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2485 {
2486         struct bond_dev_private *internals = dev->data->dev_private;
2487         struct rte_eth_stats slave_stats;
2488         int i, j;
2489
2490         for (i = 0; i < internals->slave_count; i++) {
2491                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2492
2493                 stats->ipackets += slave_stats.ipackets;
2494                 stats->opackets += slave_stats.opackets;
2495                 stats->ibytes += slave_stats.ibytes;
2496                 stats->obytes += slave_stats.obytes;
2497                 stats->imissed += slave_stats.imissed;
2498                 stats->ierrors += slave_stats.ierrors;
2499                 stats->oerrors += slave_stats.oerrors;
2500                 stats->rx_nombuf += slave_stats.rx_nombuf;
2501
2502                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2503                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2504                         stats->q_opackets[j] += slave_stats.q_opackets[j];
2505                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2506                         stats->q_obytes[j] += slave_stats.q_obytes[j];
2507                         stats->q_errors[j] += slave_stats.q_errors[j];
2508                 }
2509
2510         }
2511
2512         return 0;
2513 }
2514
2515 static int
2516 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2517 {
2518         struct bond_dev_private *internals = dev->data->dev_private;
2519         int i;
2520         int err;
2521         int ret;
2522
2523         for (i = 0, err = 0; i < internals->slave_count; i++) {
2524                 ret = rte_eth_stats_reset(internals->slaves[i].port_id);
2525                 if (ret != 0)
2526                         err = ret;
2527         }
2528
2529         return err;
2530 }
2531
2532 static int
2533 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2534 {
2535         struct bond_dev_private *internals = eth_dev->data->dev_private;
2536         int i;
2537         int ret = 0;
2538         uint16_t port_id;
2539
2540         switch (internals->mode) {
2541         /* Promiscuous mode is propagated to all slaves */
2542         case BONDING_MODE_ROUND_ROBIN:
2543         case BONDING_MODE_BALANCE:
2544         case BONDING_MODE_BROADCAST:
2545         case BONDING_MODE_8023AD: {
2546                 unsigned int slave_ok = 0;
2547
2548                 for (i = 0; i < internals->slave_count; i++) {
2549                         port_id = internals->slaves[i].port_id;
2550
2551                         ret = rte_eth_promiscuous_enable(port_id);
2552                         if (ret != 0)
2553                                 RTE_BOND_LOG(ERR,
2554                                         "Failed to enable promiscuous mode for port %u: %s",
2555                                         port_id, rte_strerror(-ret));
2556                         else
2557                                 slave_ok++;
2558                 }
2559                 /*
2560                  * Report success if operation is successful on at least
2561                  * on one slave. Otherwise return last error code.
2562                  */
2563                 if (slave_ok > 0)
2564                         ret = 0;
2565                 break;
2566         }
2567         /* Promiscuous mode is propagated only to primary slave */
2568         case BONDING_MODE_ACTIVE_BACKUP:
2569         case BONDING_MODE_TLB:
2570         case BONDING_MODE_ALB:
2571         default:
2572                 /* Do not touch promisc when there cannot be primary ports */
2573                 if (internals->slave_count == 0)
2574                         break;
2575                 port_id = internals->current_primary_port;
2576                 ret = rte_eth_promiscuous_enable(port_id);
2577                 if (ret != 0)
2578                         RTE_BOND_LOG(ERR,
2579                                 "Failed to enable promiscuous mode for port %u: %s",
2580                                 port_id, rte_strerror(-ret));
2581         }
2582
2583         return ret;
2584 }
2585
2586 static int
2587 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2588 {
2589         struct bond_dev_private *internals = dev->data->dev_private;
2590         int i;
2591         int ret = 0;
2592         uint16_t port_id;
2593
2594         switch (internals->mode) {
2595         /* Promiscuous mode is propagated to all slaves */
2596         case BONDING_MODE_ROUND_ROBIN:
2597         case BONDING_MODE_BALANCE:
2598         case BONDING_MODE_BROADCAST:
2599         case BONDING_MODE_8023AD: {
2600                 unsigned int slave_ok = 0;
2601
2602                 for (i = 0; i < internals->slave_count; i++) {
2603                         port_id = internals->slaves[i].port_id;
2604
2605                         if (internals->mode == BONDING_MODE_8023AD &&
2606                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2607                                         BOND_8023AD_FORCED_PROMISC) {
2608                                 slave_ok++;
2609                                 continue;
2610                         }
2611                         ret = rte_eth_promiscuous_disable(port_id);
2612                         if (ret != 0)
2613                                 RTE_BOND_LOG(ERR,
2614                                         "Failed to disable promiscuous mode for port %u: %s",
2615                                         port_id, rte_strerror(-ret));
2616                         else
2617                                 slave_ok++;
2618                 }
2619                 /*
2620                  * Report success if operation is successful on at least
2621                  * on one slave. Otherwise return last error code.
2622                  */
2623                 if (slave_ok > 0)
2624                         ret = 0;
2625                 break;
2626         }
2627         /* Promiscuous mode is propagated only to primary slave */
2628         case BONDING_MODE_ACTIVE_BACKUP:
2629         case BONDING_MODE_TLB:
2630         case BONDING_MODE_ALB:
2631         default:
2632                 /* Do not touch promisc when there cannot be primary ports */
2633                 if (internals->slave_count == 0)
2634                         break;
2635                 port_id = internals->current_primary_port;
2636                 ret = rte_eth_promiscuous_disable(port_id);
2637                 if (ret != 0)
2638                         RTE_BOND_LOG(ERR,
2639                                 "Failed to disable promiscuous mode for port %u: %s",
2640                                 port_id, rte_strerror(-ret));
2641         }
2642
2643         return ret;
2644 }
2645
2646 static int
2647 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2648 {
2649         struct bond_dev_private *internals = eth_dev->data->dev_private;
2650         int i;
2651         int ret = 0;
2652         uint16_t port_id;
2653
2654         switch (internals->mode) {
2655         /* allmulti mode is propagated to all slaves */
2656         case BONDING_MODE_ROUND_ROBIN:
2657         case BONDING_MODE_BALANCE:
2658         case BONDING_MODE_BROADCAST:
2659         case BONDING_MODE_8023AD: {
2660                 unsigned int slave_ok = 0;
2661
2662                 for (i = 0; i < internals->slave_count; i++) {
2663                         port_id = internals->slaves[i].port_id;
2664
2665                         ret = rte_eth_allmulticast_enable(port_id);
2666                         if (ret != 0)
2667                                 RTE_BOND_LOG(ERR,
2668                                         "Failed to enable allmulti mode for port %u: %s",
2669                                         port_id, rte_strerror(-ret));
2670                         else
2671                                 slave_ok++;
2672                 }
2673                 /*
2674                  * Report success if operation is successful on at least
2675                  * on one slave. Otherwise return last error code.
2676                  */
2677                 if (slave_ok > 0)
2678                         ret = 0;
2679                 break;
2680         }
2681         /* allmulti mode is propagated only to primary slave */
2682         case BONDING_MODE_ACTIVE_BACKUP:
2683         case BONDING_MODE_TLB:
2684         case BONDING_MODE_ALB:
2685         default:
2686                 /* Do not touch allmulti when there cannot be primary ports */
2687                 if (internals->slave_count == 0)
2688                         break;
2689                 port_id = internals->current_primary_port;
2690                 ret = rte_eth_allmulticast_enable(port_id);
2691                 if (ret != 0)
2692                         RTE_BOND_LOG(ERR,
2693                                 "Failed to enable allmulti mode for port %u: %s",
2694                                 port_id, rte_strerror(-ret));
2695         }
2696
2697         return ret;
2698 }
2699
2700 static int
2701 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2702 {
2703         struct bond_dev_private *internals = eth_dev->data->dev_private;
2704         int i;
2705         int ret = 0;
2706         uint16_t port_id;
2707
2708         switch (internals->mode) {
2709         /* allmulti mode is propagated to all slaves */
2710         case BONDING_MODE_ROUND_ROBIN:
2711         case BONDING_MODE_BALANCE:
2712         case BONDING_MODE_BROADCAST:
2713         case BONDING_MODE_8023AD: {
2714                 unsigned int slave_ok = 0;
2715
2716                 for (i = 0; i < internals->slave_count; i++) {
2717                         uint16_t port_id = internals->slaves[i].port_id;
2718
2719                         if (internals->mode == BONDING_MODE_8023AD &&
2720                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2721                                         BOND_8023AD_FORCED_ALLMULTI)
2722                                 continue;
2723
2724                         ret = rte_eth_allmulticast_disable(port_id);
2725                         if (ret != 0)
2726                                 RTE_BOND_LOG(ERR,
2727                                         "Failed to disable allmulti mode for port %u: %s",
2728                                         port_id, rte_strerror(-ret));
2729                         else
2730                                 slave_ok++;
2731                 }
2732                 /*
2733                  * Report success if operation is successful on at least
2734                  * on one slave. Otherwise return last error code.
2735                  */
2736                 if (slave_ok > 0)
2737                         ret = 0;
2738                 break;
2739         }
2740         /* allmulti mode is propagated only to primary slave */
2741         case BONDING_MODE_ACTIVE_BACKUP:
2742         case BONDING_MODE_TLB:
2743         case BONDING_MODE_ALB:
2744         default:
2745                 /* Do not touch allmulti when there cannot be primary ports */
2746                 if (internals->slave_count == 0)
2747                         break;
2748                 port_id = internals->current_primary_port;
2749                 ret = rte_eth_allmulticast_disable(port_id);
2750                 if (ret != 0)
2751                         RTE_BOND_LOG(ERR,
2752                                 "Failed to disable allmulti mode for port %u: %s",
2753                                 port_id, rte_strerror(-ret));
2754         }
2755
2756         return ret;
2757 }
2758
2759 static void
2760 bond_ethdev_delayed_lsc_propagation(void *arg)
2761 {
2762         if (arg == NULL)
2763                 return;
2764
2765         _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2766                         RTE_ETH_EVENT_INTR_LSC, NULL);
2767 }
2768
2769 int
2770 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2771                 void *param, void *ret_param __rte_unused)
2772 {
2773         struct rte_eth_dev *bonded_eth_dev;
2774         struct bond_dev_private *internals;
2775         struct rte_eth_link link;
2776         int rc = -1;
2777         int ret;
2778
2779         uint8_t lsc_flag = 0;
2780         int valid_slave = 0;
2781         uint16_t active_pos;
2782         uint16_t i;
2783
2784         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2785                 return rc;
2786
2787         bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2788
2789         if (check_for_bonded_ethdev(bonded_eth_dev))
2790                 return rc;
2791
2792         internals = bonded_eth_dev->data->dev_private;
2793
2794         /* If the device isn't started don't handle interrupts */
2795         if (!bonded_eth_dev->data->dev_started)
2796                 return rc;
2797
2798         /* verify that port_id is a valid slave of bonded port */
2799         for (i = 0; i < internals->slave_count; i++) {
2800                 if (internals->slaves[i].port_id == port_id) {
2801                         valid_slave = 1;
2802                         break;
2803                 }
2804         }
2805
2806         if (!valid_slave)
2807                 return rc;
2808
2809         /* Synchronize lsc callback parallel calls either by real link event
2810          * from the slaves PMDs or by the bonding PMD itself.
2811          */
2812         rte_spinlock_lock(&internals->lsc_lock);
2813
2814         /* Search for port in active port list */
2815         active_pos = find_slave_by_id(internals->active_slaves,
2816                         internals->active_slave_count, port_id);
2817
2818         ret = rte_eth_link_get_nowait(port_id, &link);
2819         if (ret < 0)
2820                 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
2821
2822         if (ret == 0 && link.link_status) {
2823                 if (active_pos < internals->active_slave_count)
2824                         goto link_update;
2825
2826                 /* check link state properties if bonded link is up*/
2827                 if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
2828                         if (link_properties_valid(bonded_eth_dev, &link) != 0)
2829                                 RTE_BOND_LOG(ERR, "Invalid link properties "
2830                                              "for slave %d in bonding mode %d",
2831                                              port_id, internals->mode);
2832                 } else {
2833                         /* inherit slave link properties */
2834                         link_properties_set(bonded_eth_dev, &link);
2835                 }
2836
2837                 /* If no active slave ports then set this port to be
2838                  * the primary port.
2839                  */
2840                 if (internals->active_slave_count < 1) {
2841                         /* If first active slave, then change link status */
2842                         bonded_eth_dev->data->dev_link.link_status =
2843                                                                 ETH_LINK_UP;
2844                         internals->current_primary_port = port_id;
2845                         lsc_flag = 1;
2846
2847                         mac_address_slaves_update(bonded_eth_dev);
2848                 }
2849
2850                 activate_slave(bonded_eth_dev, port_id);
2851
2852                 /* If the user has defined the primary port then default to
2853                  * using it.
2854                  */
2855                 if (internals->user_defined_primary_port &&
2856                                 internals->primary_port == port_id)
2857                         bond_ethdev_primary_set(internals, port_id);
2858         } else {
2859                 if (active_pos == internals->active_slave_count)
2860                         goto link_update;
2861
2862                 /* Remove from active slave list */
2863                 deactivate_slave(bonded_eth_dev, port_id);
2864
2865                 if (internals->active_slave_count < 1)
2866                         lsc_flag = 1;
2867
2868                 /* Update primary id, take first active slave from list or if none
2869                  * available set to -1 */
2870                 if (port_id == internals->current_primary_port) {
2871                         if (internals->active_slave_count > 0)
2872                                 bond_ethdev_primary_set(internals,
2873                                                 internals->active_slaves[0]);
2874                         else
2875                                 internals->current_primary_port = internals->primary_port;
2876                         mac_address_slaves_update(bonded_eth_dev);
2877                 }
2878         }
2879
2880 link_update:
2881         /**
2882          * Update bonded device link properties after any change to active
2883          * slaves
2884          */
2885         bond_ethdev_link_update(bonded_eth_dev, 0);
2886
2887         if (lsc_flag) {
2888                 /* Cancel any possible outstanding interrupts if delays are enabled */
2889                 if (internals->link_up_delay_ms > 0 ||
2890                         internals->link_down_delay_ms > 0)
2891                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2892                                         bonded_eth_dev);
2893
2894                 if (bonded_eth_dev->data->dev_link.link_status) {
2895                         if (internals->link_up_delay_ms > 0)
2896                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2897                                                 bond_ethdev_delayed_lsc_propagation,
2898                                                 (void *)bonded_eth_dev);
2899                         else
2900                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2901                                                 RTE_ETH_EVENT_INTR_LSC,
2902                                                 NULL);
2903
2904                 } else {
2905                         if (internals->link_down_delay_ms > 0)
2906                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2907                                                 bond_ethdev_delayed_lsc_propagation,
2908                                                 (void *)bonded_eth_dev);
2909                         else
2910                                 _rte_eth_dev_callback_process(bonded_eth_dev,
2911                                                 RTE_ETH_EVENT_INTR_LSC,
2912                                                 NULL);
2913                 }
2914         }
2915
2916         rte_spinlock_unlock(&internals->lsc_lock);
2917
2918         return rc;
2919 }
2920
2921 static int
2922 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2923                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2924 {
2925         unsigned i, j;
2926         int result = 0;
2927         int slave_reta_size;
2928         unsigned reta_count;
2929         struct bond_dev_private *internals = dev->data->dev_private;
2930
2931         if (reta_size != internals->reta_size)
2932                 return -EINVAL;
2933
2934          /* Copy RETA table */
2935         reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2936
2937         for (i = 0; i < reta_count; i++) {
2938                 internals->reta_conf[i].mask = reta_conf[i].mask;
2939                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2940                         if ((reta_conf[i].mask >> j) & 0x01)
2941                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2942         }
2943
2944         /* Fill rest of array */
2945         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2946                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2947                                 sizeof(internals->reta_conf[0]) * reta_count);
2948
2949         /* Propagate RETA over slaves */
2950         for (i = 0; i < internals->slave_count; i++) {
2951                 slave_reta_size = internals->slaves[i].reta_size;
2952                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2953                                 &internals->reta_conf[0], slave_reta_size);
2954                 if (result < 0)
2955                         return result;
2956         }
2957
2958         return 0;
2959 }
2960
2961 static int
2962 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2963                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2964 {
2965         int i, j;
2966         struct bond_dev_private *internals = dev->data->dev_private;
2967
2968         if (reta_size != internals->reta_size)
2969                 return -EINVAL;
2970
2971          /* Copy RETA table */
2972         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2973                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2974                         if ((reta_conf[i].mask >> j) & 0x01)
2975                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2976
2977         return 0;
2978 }
2979
2980 static int
2981 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2982                 struct rte_eth_rss_conf *rss_conf)
2983 {
2984         int i, result = 0;
2985         struct bond_dev_private *internals = dev->data->dev_private;
2986         struct rte_eth_rss_conf bond_rss_conf;
2987
2988         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2989
2990         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2991
2992         if (bond_rss_conf.rss_hf != 0)
2993                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2994
2995         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2996                         sizeof(internals->rss_key)) {
2997                 if (bond_rss_conf.rss_key_len == 0)
2998                         bond_rss_conf.rss_key_len = 40;
2999                 internals->rss_key_len = bond_rss_conf.rss_key_len;
3000                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
3001                                 internals->rss_key_len);
3002         }
3003
3004         for (i = 0; i < internals->slave_count; i++) {
3005                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
3006                                 &bond_rss_conf);
3007                 if (result < 0)
3008                         return result;
3009         }
3010
3011         return 0;
3012 }
3013
3014 static int
3015 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3016                 struct rte_eth_rss_conf *rss_conf)
3017 {
3018         struct bond_dev_private *internals = dev->data->dev_private;
3019
3020         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3021         rss_conf->rss_key_len = internals->rss_key_len;
3022         if (rss_conf->rss_key)
3023                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3024
3025         return 0;
3026 }
3027
3028 static int
3029 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3030 {
3031         struct rte_eth_dev *slave_eth_dev;
3032         struct bond_dev_private *internals = dev->data->dev_private;
3033         int ret, i;
3034
3035         rte_spinlock_lock(&internals->lock);
3036
3037         for (i = 0; i < internals->slave_count; i++) {
3038                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3039                 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
3040                         rte_spinlock_unlock(&internals->lock);
3041                         return -ENOTSUP;
3042                 }
3043         }
3044         for (i = 0; i < internals->slave_count; i++) {
3045                 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
3046                 if (ret < 0) {
3047                         rte_spinlock_unlock(&internals->lock);
3048                         return ret;
3049                 }
3050         }
3051
3052         rte_spinlock_unlock(&internals->lock);
3053         return 0;
3054 }
3055
3056 static int
3057 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
3058                         struct rte_ether_addr *addr)
3059 {
3060         if (mac_address_set(dev, addr)) {
3061                 RTE_BOND_LOG(ERR, "Failed to update MAC address");
3062                 return -EINVAL;
3063         }
3064
3065         return 0;
3066 }
3067
3068 static int
3069 bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
3070                  enum rte_filter_type type, enum rte_filter_op op, void *arg)
3071 {
3072         if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
3073                 *(const void **)arg = &bond_flow_ops;
3074                 return 0;
3075         }
3076         return -ENOTSUP;
3077 }
3078
3079 static int
3080 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
3081                         struct rte_ether_addr *mac_addr,
3082                         __rte_unused uint32_t index, uint32_t vmdq)
3083 {
3084         struct rte_eth_dev *slave_eth_dev;
3085         struct bond_dev_private *internals = dev->data->dev_private;
3086         int ret, i;
3087
3088         rte_spinlock_lock(&internals->lock);
3089
3090         for (i = 0; i < internals->slave_count; i++) {
3091                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3092                 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
3093                          *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
3094                         ret = -ENOTSUP;
3095                         goto end;
3096                 }
3097         }
3098
3099         for (i = 0; i < internals->slave_count; i++) {
3100                 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
3101                                 mac_addr, vmdq);
3102                 if (ret < 0) {
3103                         /* rollback */
3104                         for (i--; i >= 0; i--)
3105                                 rte_eth_dev_mac_addr_remove(
3106                                         internals->slaves[i].port_id, mac_addr);
3107                         goto end;
3108                 }
3109         }
3110
3111         ret = 0;
3112 end:
3113         rte_spinlock_unlock(&internals->lock);
3114         return ret;
3115 }
3116
3117 static void
3118 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3119 {
3120         struct rte_eth_dev *slave_eth_dev;
3121         struct bond_dev_private *internals = dev->data->dev_private;
3122         int i;
3123
3124         rte_spinlock_lock(&internals->lock);
3125
3126         for (i = 0; i < internals->slave_count; i++) {
3127                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3128                 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3129                         goto end;
3130         }
3131
3132         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3133
3134         for (i = 0; i < internals->slave_count; i++)
3135                 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3136                                 mac_addr);
3137
3138 end:
3139         rte_spinlock_unlock(&internals->lock);
3140 }
3141
3142 const struct eth_dev_ops default_dev_ops = {
3143         .dev_start            = bond_ethdev_start,
3144         .dev_stop             = bond_ethdev_stop,
3145         .dev_close            = bond_ethdev_close,
3146         .dev_configure        = bond_ethdev_configure,
3147         .dev_infos_get        = bond_ethdev_info,
3148         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
3149         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
3150         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
3151         .rx_queue_release     = bond_ethdev_rx_queue_release,
3152         .tx_queue_release     = bond_ethdev_tx_queue_release,
3153         .link_update          = bond_ethdev_link_update,
3154         .stats_get            = bond_ethdev_stats_get,
3155         .stats_reset          = bond_ethdev_stats_reset,
3156         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
3157         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
3158         .allmulticast_enable  = bond_ethdev_allmulticast_enable,
3159         .allmulticast_disable = bond_ethdev_allmulticast_disable,
3160         .reta_update          = bond_ethdev_rss_reta_update,
3161         .reta_query           = bond_ethdev_rss_reta_query,
3162         .rss_hash_update      = bond_ethdev_rss_hash_update,
3163         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get,
3164         .mtu_set              = bond_ethdev_mtu_set,
3165         .mac_addr_set         = bond_ethdev_mac_address_set,
3166         .mac_addr_add         = bond_ethdev_mac_addr_add,
3167         .mac_addr_remove      = bond_ethdev_mac_addr_remove,
3168         .filter_ctrl          = bond_filter_ctrl
3169 };
3170
3171 static int
3172 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3173 {
3174         const char *name = rte_vdev_device_name(dev);
3175         uint8_t socket_id = dev->device.numa_node;
3176         struct bond_dev_private *internals = NULL;
3177         struct rte_eth_dev *eth_dev = NULL;
3178         uint32_t vlan_filter_bmp_size;
3179
3180         /* now do all data allocation - for eth_dev structure, dummy pci driver
3181          * and internal (private) data
3182          */
3183
3184         /* reserve an ethdev entry */
3185         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3186         if (eth_dev == NULL) {
3187                 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3188                 goto err;
3189         }
3190
3191         internals = eth_dev->data->dev_private;
3192         eth_dev->data->nb_rx_queues = (uint16_t)1;
3193         eth_dev->data->nb_tx_queues = (uint16_t)1;
3194
3195         /* Allocate memory for storing MAC addresses */
3196         eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3197                         BOND_MAX_MAC_ADDRS, 0, socket_id);
3198         if (eth_dev->data->mac_addrs == NULL) {
3199                 RTE_BOND_LOG(ERR,
3200                              "Failed to allocate %u bytes needed to store MAC addresses",
3201                              RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3202                 goto err;
3203         }
3204
3205         eth_dev->dev_ops = &default_dev_ops;
3206         eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
3207
3208         rte_spinlock_init(&internals->lock);
3209         rte_spinlock_init(&internals->lsc_lock);
3210
3211         internals->port_id = eth_dev->data->port_id;
3212         internals->mode = BONDING_MODE_INVALID;
3213         internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3214         internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3215         internals->burst_xmit_hash = burst_xmit_l2_hash;
3216         internals->user_defined_mac = 0;
3217
3218         internals->link_status_polling_enabled = 0;
3219
3220         internals->link_status_polling_interval_ms =
3221                 DEFAULT_POLLING_INTERVAL_10_MS;
3222         internals->link_down_delay_ms = 0;
3223         internals->link_up_delay_ms = 0;
3224
3225         internals->slave_count = 0;
3226         internals->active_slave_count = 0;
3227         internals->rx_offload_capa = 0;
3228         internals->tx_offload_capa = 0;
3229         internals->rx_queue_offload_capa = 0;
3230         internals->tx_queue_offload_capa = 0;
3231         internals->candidate_max_rx_pktlen = 0;
3232         internals->max_rx_pktlen = 0;
3233
3234         /* Initially allow to choose any offload type */
3235         internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
3236
3237         memset(&internals->default_rxconf, 0,
3238                sizeof(internals->default_rxconf));
3239         memset(&internals->default_txconf, 0,
3240                sizeof(internals->default_txconf));
3241
3242         memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3243         memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3244
3245         memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3246         memset(internals->slaves, 0, sizeof(internals->slaves));
3247
3248         TAILQ_INIT(&internals->flow_list);
3249         internals->flow_isolated_valid = 0;
3250
3251         /* Set mode 4 default configuration */
3252         bond_mode_8023ad_setup(eth_dev, NULL);
3253         if (bond_ethdev_mode_set(eth_dev, mode)) {
3254                 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
3255                                  eth_dev->data->port_id, mode);
3256                 goto err;
3257         }
3258
3259         vlan_filter_bmp_size =
3260                 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3261         internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3262                                                    RTE_CACHE_LINE_SIZE);
3263         if (internals->vlan_filter_bmpmem == NULL) {
3264                 RTE_BOND_LOG(ERR,
3265                              "Failed to allocate vlan bitmap for bonded device %u",
3266                              eth_dev->data->port_id);
3267                 goto err;
3268         }
3269
3270         internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3271                         internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3272         if (internals->vlan_filter_bmp == NULL) {
3273                 RTE_BOND_LOG(ERR,
3274                              "Failed to init vlan bitmap for bonded device %u",
3275                              eth_dev->data->port_id);
3276                 rte_free(internals->vlan_filter_bmpmem);
3277                 goto err;
3278         }
3279
3280         return eth_dev->data->port_id;
3281
3282 err:
3283         rte_free(internals);
3284         if (eth_dev != NULL)
3285                 eth_dev->data->dev_private = NULL;
3286         rte_eth_dev_release_port(eth_dev);
3287         return -1;
3288 }
3289
3290 static int
3291 bond_probe(struct rte_vdev_device *dev)
3292 {
3293         const char *name;
3294         struct bond_dev_private *internals;
3295         struct rte_kvargs *kvlist;
3296         uint8_t bonding_mode, socket_id/*, agg_mode*/;
3297         int  arg_count, port_id;
3298         uint8_t agg_mode;
3299         struct rte_eth_dev *eth_dev;
3300
3301         if (!dev)
3302                 return -EINVAL;
3303
3304         name = rte_vdev_device_name(dev);
3305         RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3306
3307         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3308                 eth_dev = rte_eth_dev_attach_secondary(name);
3309                 if (!eth_dev) {
3310                         RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3311                         return -1;
3312                 }
3313                 /* TODO: request info from primary to set up Rx and Tx */
3314                 eth_dev->dev_ops = &default_dev_ops;
3315                 eth_dev->device = &dev->device;
3316                 rte_eth_dev_probing_finish(eth_dev);
3317                 return 0;
3318         }
3319
3320         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3321                 pmd_bond_init_valid_arguments);
3322         if (kvlist == NULL)
3323                 return -1;
3324
3325         /* Parse link bonding mode */
3326         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3327                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3328                                 &bond_ethdev_parse_slave_mode_kvarg,
3329                                 &bonding_mode) != 0) {
3330                         RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3331                                         name);
3332                         goto parse_error;
3333                 }
3334         } else {
3335                 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3336                                 "device %s", name);
3337                 goto parse_error;
3338         }
3339
3340         /* Parse socket id to create bonding device on */
3341         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3342         if (arg_count == 1) {
3343                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3344                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3345                                 != 0) {
3346                         RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3347                                         "bonded device %s", name);
3348                         goto parse_error;
3349                 }
3350         } else if (arg_count > 1) {
3351                 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3352                                 "bonded device %s", name);
3353                 goto parse_error;
3354         } else {
3355                 socket_id = rte_socket_id();
3356         }
3357
3358         dev->device.numa_node = socket_id;
3359
3360         /* Create link bonding eth device */
3361         port_id = bond_alloc(dev, bonding_mode);
3362         if (port_id < 0) {
3363                 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3364                                 "socket %u.",   name, bonding_mode, socket_id);
3365                 goto parse_error;
3366         }
3367         internals = rte_eth_devices[port_id].data->dev_private;
3368         internals->kvlist = kvlist;
3369
3370         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3371                 if (rte_kvargs_process(kvlist,
3372                                 PMD_BOND_AGG_MODE_KVARG,
3373                                 &bond_ethdev_parse_slave_agg_mode_kvarg,
3374                                 &agg_mode) != 0) {
3375                         RTE_BOND_LOG(ERR,
3376                                         "Failed to parse agg selection mode for bonded device %s",
3377                                         name);
3378                         goto parse_error;
3379                 }
3380
3381                 if (internals->mode == BONDING_MODE_8023AD)
3382                         internals->mode4.agg_selection = agg_mode;
3383         } else {
3384                 internals->mode4.agg_selection = AGG_STABLE;
3385         }
3386
3387         rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3388         RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3389                         "socket %u.",   name, port_id, bonding_mode, socket_id);
3390         return 0;
3391
3392 parse_error:
3393         rte_kvargs_free(kvlist);
3394
3395         return -1;
3396 }
3397
3398 static int
3399 bond_remove(struct rte_vdev_device *dev)
3400 {
3401         struct rte_eth_dev *eth_dev;
3402         struct bond_dev_private *internals;
3403         const char *name;
3404
3405         if (!dev)
3406                 return -EINVAL;
3407
3408         name = rte_vdev_device_name(dev);
3409         RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3410
3411         /* now free all data allocation - for eth_dev structure,
3412          * dummy pci driver and internal (private) data
3413          */
3414
3415         /* find an ethdev entry */
3416         eth_dev = rte_eth_dev_allocated(name);
3417         if (eth_dev == NULL)
3418                 return -ENODEV;
3419
3420         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3421                 return rte_eth_dev_release_port(eth_dev);
3422
3423         RTE_ASSERT(eth_dev->device == &dev->device);
3424
3425         internals = eth_dev->data->dev_private;
3426         if (internals->slave_count != 0)
3427                 return -EBUSY;
3428
3429         if (eth_dev->data->dev_started == 1) {
3430                 bond_ethdev_stop(eth_dev);
3431                 bond_ethdev_close(eth_dev);
3432         }
3433
3434         eth_dev->dev_ops = NULL;
3435         eth_dev->rx_pkt_burst = NULL;
3436         eth_dev->tx_pkt_burst = NULL;
3437
3438         internals = eth_dev->data->dev_private;
3439         /* Try to release mempool used in mode6. If the bond
3440          * device is not mode6, free the NULL is not problem.
3441          */
3442         rte_mempool_free(internals->mode6.mempool);
3443         rte_bitmap_free(internals->vlan_filter_bmp);
3444         rte_free(internals->vlan_filter_bmpmem);
3445
3446         rte_eth_dev_release_port(eth_dev);
3447
3448         return 0;
3449 }
3450
3451 /* this part will resolve the slave portids after all the other pdev and vdev
3452  * have been allocated */
3453 static int
3454 bond_ethdev_configure(struct rte_eth_dev *dev)
3455 {
3456         const char *name = dev->device->name;
3457         struct bond_dev_private *internals = dev->data->dev_private;
3458         struct rte_kvargs *kvlist = internals->kvlist;
3459         int arg_count;
3460         uint16_t port_id = dev - rte_eth_devices;
3461         uint8_t agg_mode;
3462
3463         static const uint8_t default_rss_key[40] = {
3464                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3465                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3466                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3467                 0xBE, 0xAC, 0x01, 0xFA
3468         };
3469
3470         unsigned i, j;
3471
3472         /*
3473          * If RSS is enabled, fill table with default values and
3474          * set key to the the value specified in port RSS configuration.
3475          * Fall back to default RSS key if the key is not specified
3476          */
3477         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
3478                 if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
3479                         internals->rss_key_len =
3480                                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
3481                         memcpy(internals->rss_key,
3482                                dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
3483                                internals->rss_key_len);
3484                 } else {
3485                         internals->rss_key_len = sizeof(default_rss_key);
3486                         memcpy(internals->rss_key, default_rss_key,
3487                                internals->rss_key_len);
3488                 }
3489
3490                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3491                         internals->reta_conf[i].mask = ~0LL;
3492                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3493                                 internals->reta_conf[i].reta[j] =
3494                                                 (i * RTE_RETA_GROUP_SIZE + j) %
3495                                                 dev->data->nb_rx_queues;
3496                 }
3497         }
3498
3499         /* set the max_rx_pktlen */
3500         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3501
3502         /*
3503          * if no kvlist, it means that this bonded device has been created
3504          * through the bonding api.
3505          */
3506         if (!kvlist)
3507                 return 0;
3508
3509         /* Parse MAC address for bonded device */
3510         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3511         if (arg_count == 1) {
3512                 struct rte_ether_addr bond_mac;
3513
3514                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3515                                        &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3516                         RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3517                                      name);
3518                         return -1;
3519                 }
3520
3521                 /* Set MAC address */
3522                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3523                         RTE_BOND_LOG(ERR,
3524                                      "Failed to set mac address on bonded device %s",
3525                                      name);
3526                         return -1;
3527                 }
3528         } else if (arg_count > 1) {
3529                 RTE_BOND_LOG(ERR,
3530                              "MAC address can be specified only once for bonded device %s",
3531                              name);
3532                 return -1;
3533         }
3534
3535         /* Parse/set balance mode transmit policy */
3536         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3537         if (arg_count == 1) {
3538                 uint8_t xmit_policy;
3539
3540                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3541                                        &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3542                     0) {
3543                         RTE_BOND_LOG(INFO,
3544                                      "Invalid xmit policy specified for bonded device %s",
3545                                      name);
3546                         return -1;
3547                 }
3548
3549                 /* Set balance mode transmit policy*/
3550                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3551                         RTE_BOND_LOG(ERR,
3552                                      "Failed to set balance xmit policy on bonded device %s",
3553                                      name);
3554                         return -1;
3555                 }
3556         } else if (arg_count > 1) {
3557                 RTE_BOND_LOG(ERR,
3558                              "Transmit policy can be specified only once for bonded device %s",
3559                              name);
3560                 return -1;
3561         }
3562
3563         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3564                 if (rte_kvargs_process(kvlist,
3565                                        PMD_BOND_AGG_MODE_KVARG,
3566                                        &bond_ethdev_parse_slave_agg_mode_kvarg,
3567                                        &agg_mode) != 0) {
3568                         RTE_BOND_LOG(ERR,
3569                                      "Failed to parse agg selection mode for bonded device %s",
3570                                      name);
3571                 }
3572                 if (internals->mode == BONDING_MODE_8023AD) {
3573                         int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3574                                         agg_mode);
3575                         if (ret < 0) {
3576                                 RTE_BOND_LOG(ERR,
3577                                         "Invalid args for agg selection set for bonded device %s",
3578                                         name);
3579                                 return -1;
3580                         }
3581                 }
3582         }
3583
3584         /* Parse/add slave ports to bonded device */
3585         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3586                 struct bond_ethdev_slave_ports slave_ports;
3587                 unsigned i;
3588
3589                 memset(&slave_ports, 0, sizeof(slave_ports));
3590
3591                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3592                                        &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3593                         RTE_BOND_LOG(ERR,
3594                                      "Failed to parse slave ports for bonded device %s",
3595                                      name);
3596                         return -1;
3597                 }
3598
3599                 for (i = 0; i < slave_ports.slave_count; i++) {
3600                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3601                                 RTE_BOND_LOG(ERR,
3602                                              "Failed to add port %d as slave to bonded device %s",
3603                                              slave_ports.slaves[i], name);
3604                         }
3605                 }
3606
3607         } else {
3608                 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3609                 return -1;
3610         }
3611
3612         /* Parse/set primary slave port id*/
3613         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3614         if (arg_count == 1) {
3615                 uint16_t primary_slave_port_id;
3616
3617                 if (rte_kvargs_process(kvlist,
3618                                        PMD_BOND_PRIMARY_SLAVE_KVARG,
3619                                        &bond_ethdev_parse_primary_slave_port_id_kvarg,
3620                                        &primary_slave_port_id) < 0) {
3621                         RTE_BOND_LOG(INFO,
3622                                      "Invalid primary slave port id specified for bonded device %s",
3623                                      name);
3624                         return -1;
3625                 }
3626
3627                 /* Set balance mode transmit policy*/
3628                 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3629                     != 0) {
3630                         RTE_BOND_LOG(ERR,
3631                                      "Failed to set primary slave port %d on bonded device %s",
3632                                      primary_slave_port_id, name);
3633                         return -1;
3634                 }
3635         } else if (arg_count > 1) {
3636                 RTE_BOND_LOG(INFO,
3637                              "Primary slave can be specified only once for bonded device %s",
3638                              name);
3639                 return -1;
3640         }
3641
3642         /* Parse link status monitor polling interval */
3643         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3644         if (arg_count == 1) {
3645                 uint32_t lsc_poll_interval_ms;
3646
3647                 if (rte_kvargs_process(kvlist,
3648                                        PMD_BOND_LSC_POLL_PERIOD_KVARG,
3649                                        &bond_ethdev_parse_time_ms_kvarg,
3650                                        &lsc_poll_interval_ms) < 0) {
3651                         RTE_BOND_LOG(INFO,
3652                                      "Invalid lsc polling interval value specified for bonded"
3653                                      " device %s", name);
3654                         return -1;
3655                 }
3656
3657                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3658                     != 0) {
3659                         RTE_BOND_LOG(ERR,
3660                                      "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3661                                      lsc_poll_interval_ms, name);
3662                         return -1;
3663                 }
3664         } else if (arg_count > 1) {
3665                 RTE_BOND_LOG(INFO,
3666                              "LSC polling interval can be specified only once for bonded"
3667                              " device %s", name);
3668                 return -1;
3669         }
3670
3671         /* Parse link up interrupt propagation delay */
3672         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3673         if (arg_count == 1) {
3674                 uint32_t link_up_delay_ms;
3675
3676                 if (rte_kvargs_process(kvlist,
3677                                        PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3678                                        &bond_ethdev_parse_time_ms_kvarg,
3679                                        &link_up_delay_ms) < 0) {
3680                         RTE_BOND_LOG(INFO,
3681                                      "Invalid link up propagation delay value specified for"
3682                                      " bonded device %s", name);
3683                         return -1;
3684                 }
3685
3686                 /* Set balance mode transmit policy*/
3687                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3688                     != 0) {
3689                         RTE_BOND_LOG(ERR,
3690                                      "Failed to set link up propagation delay (%u ms) on bonded"
3691                                      " device %s", link_up_delay_ms, name);
3692                         return -1;
3693                 }
3694         } else if (arg_count > 1) {
3695                 RTE_BOND_LOG(INFO,
3696                              "Link up propagation delay can be specified only once for"
3697                              " bonded device %s", name);
3698                 return -1;
3699         }
3700
3701         /* Parse link down interrupt propagation delay */
3702         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3703         if (arg_count == 1) {
3704                 uint32_t link_down_delay_ms;
3705
3706                 if (rte_kvargs_process(kvlist,
3707                                        PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3708                                        &bond_ethdev_parse_time_ms_kvarg,
3709                                        &link_down_delay_ms) < 0) {
3710                         RTE_BOND_LOG(INFO,
3711                                      "Invalid link down propagation delay value specified for"
3712                                      " bonded device %s", name);
3713                         return -1;
3714                 }
3715
3716                 /* Set balance mode transmit policy*/
3717                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3718                     != 0) {
3719                         RTE_BOND_LOG(ERR,
3720                                      "Failed to set link down propagation delay (%u ms) on bonded device %s",
3721                                      link_down_delay_ms, name);
3722                         return -1;
3723                 }
3724         } else if (arg_count > 1) {
3725                 RTE_BOND_LOG(INFO,
3726                              "Link down propagation delay can be specified only once for  bonded device %s",
3727                              name);
3728                 return -1;
3729         }
3730
3731         return 0;
3732 }
3733
3734 struct rte_vdev_driver pmd_bond_drv = {
3735         .probe = bond_probe,
3736         .remove = bond_remove,
3737 };
3738
3739 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3740 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3741
3742 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3743         "slave=<ifc> "
3744         "primary=<ifc> "
3745         "mode=[0-6] "
3746         "xmit_policy=[l2 | l23 | l34] "
3747         "agg_mode=[count | stable | bandwidth] "
3748         "socket_id=<int> "
3749         "mac=<mac addr> "
3750         "lsc_poll_period_ms=<int> "
3751         "up_delay=<int> "
3752         "down_delay=<int>");
3753
3754 RTE_LOG_REGISTER(bond_logtype, pmd.net.bond, NOTICE);