net/hns3: fix return value for unsupported tuple
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <stdbool.h>
6 #include <netinet/in.h>
7
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <ethdev_driver.h>
11 #include <ethdev_vdev.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 #include <rte_ip.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
22
23 #include "rte_eth_bond.h"
24 #include "eth_bond_private.h"
25 #include "eth_bond_8023ad_private.h"
26
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
30
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
32
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
35
36 static inline size_t
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
38 {
39         size_t vlan_offset = 0;
40
41         if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42                 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43                 struct rte_vlan_hdr *vlan_hdr =
44                         (struct rte_vlan_hdr *)(eth_hdr + 1);
45
46                 vlan_offset = sizeof(struct rte_vlan_hdr);
47                 *proto = vlan_hdr->eth_proto;
48
49                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50                         vlan_hdr = vlan_hdr + 1;
51                         *proto = vlan_hdr->eth_proto;
52                         vlan_offset += sizeof(struct rte_vlan_hdr);
53                 }
54         }
55         return vlan_offset;
56 }
57
58 static uint16_t
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
60 {
61         struct bond_dev_private *internals;
62
63         uint16_t num_rx_total = 0;
64         uint16_t slave_count;
65         uint16_t active_slave;
66         int i;
67
68         /* Cast to structure, containing bonded device's port id and queue id */
69         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70         internals = bd_rx_q->dev_private;
71         slave_count = internals->active_slave_count;
72         active_slave = bd_rx_q->active_slave;
73
74         for (i = 0; i < slave_count && nb_pkts; i++) {
75                 uint16_t num_rx_slave;
76
77                 /* Offset of pointer to *bufs increases as packets are received
78                  * from other slaves */
79                 num_rx_slave =
80                         rte_eth_rx_burst(internals->active_slaves[active_slave],
81                                          bd_rx_q->queue_id,
82                                          bufs + num_rx_total, nb_pkts);
83                 num_rx_total += num_rx_slave;
84                 nb_pkts -= num_rx_slave;
85                 if (++active_slave == slave_count)
86                         active_slave = 0;
87         }
88
89         if (++bd_rx_q->active_slave >= slave_count)
90                 bd_rx_q->active_slave = 0;
91         return num_rx_total;
92 }
93
94 static uint16_t
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
96                 uint16_t nb_pkts)
97 {
98         struct bond_dev_private *internals;
99
100         /* Cast to structure, containing bonded device's port id and queue id */
101         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
102
103         internals = bd_rx_q->dev_private;
104
105         return rte_eth_rx_burst(internals->current_primary_port,
106                         bd_rx_q->queue_id, bufs, nb_pkts);
107 }
108
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
111 {
112         const uint16_t ether_type_slow_be =
113                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
114
115         return !((mbuf->ol_flags & RTE_MBUF_F_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116                 (ethertype == ether_type_slow_be &&
117                 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
118 }
119
120 /*****************************************************************************
121  * Flow director's setup for mode 4 optimization
122  */
123
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125         .dst.addr_bytes = { 0 },
126         .src.addr_bytes = { 0 },
127         .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
128 };
129
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131         .dst.addr_bytes = { 0 },
132         .src.addr_bytes = { 0 },
133         .type = 0xFFFF,
134 };
135
136 static struct rte_flow_item flow_item_8023ad[] = {
137         {
138                 .type = RTE_FLOW_ITEM_TYPE_ETH,
139                 .spec = &flow_item_eth_type_8023ad,
140                 .last = NULL,
141                 .mask = &flow_item_eth_mask_type_8023ad,
142         },
143         {
144                 .type = RTE_FLOW_ITEM_TYPE_END,
145                 .spec = NULL,
146                 .last = NULL,
147                 .mask = NULL,
148         }
149 };
150
151 const struct rte_flow_attr flow_attr_8023ad = {
152         .group = 0,
153         .priority = 0,
154         .ingress = 1,
155         .egress = 0,
156         .reserved = 0,
157 };
158
159 int
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161                 uint16_t slave_port) {
162         struct rte_eth_dev_info slave_info;
163         struct rte_flow_error error;
164         struct bond_dev_private *internals = bond_dev->data->dev_private;
165
166         const struct rte_flow_action_queue lacp_queue_conf = {
167                 .index = 0,
168         };
169
170         const struct rte_flow_action actions[] = {
171                 {
172                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173                         .conf = &lacp_queue_conf
174                 },
175                 {
176                         .type = RTE_FLOW_ACTION_TYPE_END,
177                 }
178         };
179
180         int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181                         flow_item_8023ad, actions, &error);
182         if (ret < 0) {
183                 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184                                 __func__, error.message, slave_port,
185                                 internals->mode4.dedicated_queues.rx_qid);
186                 return -1;
187         }
188
189         ret = rte_eth_dev_info_get(slave_port, &slave_info);
190         if (ret != 0) {
191                 RTE_BOND_LOG(ERR,
192                         "%s: Error during getting device (port %u) info: %s\n",
193                         __func__, slave_port, strerror(-ret));
194
195                 return ret;
196         }
197
198         if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199                         slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
200                 RTE_BOND_LOG(ERR,
201                         "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202                         __func__, slave_port);
203                 return -1;
204         }
205
206         return 0;
207 }
208
209 int
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211         struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212         struct bond_dev_private *internals = bond_dev->data->dev_private;
213         struct rte_eth_dev_info bond_info;
214         uint16_t idx;
215         int ret;
216
217         /* Verify if all slaves in bonding supports flow director and */
218         if (internals->slave_count > 0) {
219                 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
220                 if (ret != 0) {
221                         RTE_BOND_LOG(ERR,
222                                 "%s: Error during getting device (port %u) info: %s\n",
223                                 __func__, bond_dev->data->port_id,
224                                 strerror(-ret));
225
226                         return ret;
227                 }
228
229                 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230                 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
231
232                 for (idx = 0; idx < internals->slave_count; idx++) {
233                         if (bond_ethdev_8023ad_flow_verify(bond_dev,
234                                         internals->slaves[idx].port_id) != 0)
235                                 return -1;
236                 }
237         }
238
239         return 0;
240 }
241
242 int
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
244
245         struct rte_flow_error error;
246         struct bond_dev_private *internals = bond_dev->data->dev_private;
247         struct rte_flow_action_queue lacp_queue_conf = {
248                 .index = internals->mode4.dedicated_queues.rx_qid,
249         };
250
251         const struct rte_flow_action actions[] = {
252                 {
253                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254                         .conf = &lacp_queue_conf
255                 },
256                 {
257                         .type = RTE_FLOW_ACTION_TYPE_END,
258                 }
259         };
260
261         internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262                         &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263         if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264                 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265                                 "(slave_port=%d queue_id=%d)",
266                                 error.message, slave_port,
267                                 internals->mode4.dedicated_queues.rx_qid);
268                 return -1;
269         }
270
271         return 0;
272 }
273
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
276                 bool dedicated_rxq)
277 {
278         /* Cast to structure, containing bonded device's port id and queue id */
279         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280         struct bond_dev_private *internals = bd_rx_q->dev_private;
281         struct rte_eth_dev *bonded_eth_dev =
282                                         &rte_eth_devices[internals->port_id];
283         struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284         struct rte_ether_hdr *hdr;
285
286         const uint16_t ether_type_slow_be =
287                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288         uint16_t num_rx_total = 0;      /* Total number of received packets */
289         uint16_t slaves[RTE_MAX_ETHPORTS];
290         uint16_t slave_count, idx;
291
292         uint8_t collecting;  /* current slave collecting status */
293         const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294         const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
295         uint8_t subtype;
296         uint16_t i;
297         uint16_t j;
298         uint16_t k;
299
300         /* Copy slave list to protect against slave up/down changes during tx
301          * bursting */
302         slave_count = internals->active_slave_count;
303         memcpy(slaves, internals->active_slaves,
304                         sizeof(internals->active_slaves[0]) * slave_count);
305
306         idx = bd_rx_q->active_slave;
307         if (idx >= slave_count) {
308                 bd_rx_q->active_slave = 0;
309                 idx = 0;
310         }
311         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
312                 j = num_rx_total;
313                 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
314                                          COLLECTING);
315
316                 /* Read packets from this slave */
317                 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
319
320                 for (k = j; k < 2 && k < num_rx_total; k++)
321                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
322
323                 /* Handle slow protocol packets. */
324                 while (j < num_rx_total) {
325                         if (j + 3 < num_rx_total)
326                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
327
328                         hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329                         subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
330
331                         /* Remove packet from array if:
332                          * - it is slow packet but no dedicated rxq is present,
333                          * - slave is not in collecting state,
334                          * - bonding interface is not in promiscuous mode:
335                          *   - packet is unicast and address does not match,
336                          *   - packet is multicast and bonding interface
337                          *     is not in allmulti,
338                          */
339                         if (unlikely(
340                                 (!dedicated_rxq &&
341                                  is_lacp_packets(hdr->ether_type, subtype,
342                                                  bufs[j])) ||
343                                 !collecting ||
344                                 (!promisc &&
345                                  ((rte_is_unicast_ether_addr(&hdr->dst_addr) &&
346                                    !rte_is_same_ether_addr(bond_mac,
347                                                        &hdr->dst_addr)) ||
348                                   (!allmulti &&
349                                    rte_is_multicast_ether_addr(&hdr->dst_addr)))))) {
350
351                                 if (hdr->ether_type == ether_type_slow_be) {
352                                         bond_mode_8023ad_handle_slow_pkt(
353                                             internals, slaves[idx], bufs[j]);
354                                 } else
355                                         rte_pktmbuf_free(bufs[j]);
356
357                                 /* Packet is managed by mode 4 or dropped, shift the array */
358                                 num_rx_total--;
359                                 if (j < num_rx_total) {
360                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
361                                                 (num_rx_total - j));
362                                 }
363                         } else
364                                 j++;
365                 }
366                 if (unlikely(++idx == slave_count))
367                         idx = 0;
368         }
369
370         if (++bd_rx_q->active_slave >= slave_count)
371                 bd_rx_q->active_slave = 0;
372
373         return num_rx_total;
374 }
375
376 static uint16_t
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
378                 uint16_t nb_pkts)
379 {
380         return rx_burst_8023ad(queue, bufs, nb_pkts, false);
381 }
382
383 static uint16_t
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
385                 uint16_t nb_pkts)
386 {
387         return rx_burst_8023ad(queue, bufs, nb_pkts, true);
388 }
389
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
393
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
395
396 static void
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
398 {
399         switch (arp_op) {
400         case RTE_ARP_OP_REQUEST:
401                 strlcpy(buf, "ARP Request", buf_len);
402                 return;
403         case RTE_ARP_OP_REPLY:
404                 strlcpy(buf, "ARP Reply", buf_len);
405                 return;
406         case RTE_ARP_OP_REVREQUEST:
407                 strlcpy(buf, "Reverse ARP Request", buf_len);
408                 return;
409         case RTE_ARP_OP_REVREPLY:
410                 strlcpy(buf, "Reverse ARP Reply", buf_len);
411                 return;
412         case RTE_ARP_OP_INVREQUEST:
413                 strlcpy(buf, "Peer Identify Request", buf_len);
414                 return;
415         case RTE_ARP_OP_INVREPLY:
416                 strlcpy(buf, "Peer Identify Reply", buf_len);
417                 return;
418         default:
419                 break;
420         }
421         strlcpy(buf, "Unknown", buf_len);
422         return;
423 }
424 #endif
425 #define MaxIPv4String   16
426 static void
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
428 {
429         uint32_t ipv4_addr;
430
431         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
434                 ipv4_addr & 0xFF);
435 }
436
437 #define MAX_CLIENTS_NUMBER      128
438 uint8_t active_clients;
439 struct client_stats_t {
440         uint16_t port;
441         uint32_t ipv4_addr;
442         uint32_t ipv4_rx_packets;
443         uint32_t ipv4_tx_packets;
444 };
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
446
447 static void
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
449 {
450         int i = 0;
451
452         for (; i < MAX_CLIENTS_NUMBER; i++)     {
453                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
454                         /* Just update RX packets number for this client */
455                         if (TXorRXindicator == &burstnumberRX)
456                                 client_stats[i].ipv4_rx_packets++;
457                         else
458                                 client_stats[i].ipv4_tx_packets++;
459                         return;
460                 }
461         }
462         /* We have a new client. Insert him to the table, and increment stats */
463         if (TXorRXindicator == &burstnumberRX)
464                 client_stats[active_clients].ipv4_rx_packets++;
465         else
466                 client_stats[active_clients].ipv4_tx_packets++;
467         client_stats[active_clients].ipv4_addr = addr;
468         client_stats[active_clients].port = port;
469         active_clients++;
470
471 }
472
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475         rte_log(RTE_LOG_DEBUG, bond_logtype,                            \
476                 "%s port:%d SrcMAC:" RTE_ETHER_ADDR_PRT_FMT " SrcIP:%s " \
477                 "DstMAC:" RTE_ETHER_ADDR_PRT_FMT " DstIP:%s %s %d\n", \
478                 info,                                                   \
479                 port,                                                   \
480                 RTE_ETHER_ADDR_BYTES(&eth_h->src_addr),                  \
481                 src_ip,                                                 \
482                 RTE_ETHER_ADDR_BYTES(&eth_h->dst_addr),                  \
483                 dst_ip,                                                 \
484                 arp_op, ++burstnumber)
485 #endif
486
487 static void
488 mode6_debug(const char __rte_unused *info,
489         struct rte_ether_hdr *eth_h, uint16_t port,
490         uint32_t __rte_unused *burstnumber)
491 {
492         struct rte_ipv4_hdr *ipv4_h;
493 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
494         struct rte_arp_hdr *arp_h;
495         char dst_ip[16];
496         char ArpOp[24];
497         char buf[16];
498 #endif
499         char src_ip[16];
500
501         uint16_t ether_type = eth_h->ether_type;
502         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
503
504 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
505         strlcpy(buf, info, 16);
506 #endif
507
508         if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
509                 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
510                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
511 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
512                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
513                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
514 #endif
515                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
516         }
517 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
518         else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
519                 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
520                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
521                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
522                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
523                                 ArpOp, sizeof(ArpOp));
524                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
525         }
526 #endif
527 }
528 #endif
529
530 static uint16_t
531 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
532 {
533         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
534         struct bond_dev_private *internals = bd_rx_q->dev_private;
535         struct rte_ether_hdr *eth_h;
536         uint16_t ether_type, offset;
537         uint16_t nb_recv_pkts;
538         int i;
539
540         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
541
542         for (i = 0; i < nb_recv_pkts; i++) {
543                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
544                 ether_type = eth_h->ether_type;
545                 offset = get_vlan_offset(eth_h, &ether_type);
546
547                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
548 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
549                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
550 #endif
551                         bond_mode_alb_arp_recv(eth_h, offset, internals);
552                 }
553 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
554                 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
555                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
556 #endif
557         }
558
559         return nb_recv_pkts;
560 }
561
562 static uint16_t
563 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
564                 uint16_t nb_pkts)
565 {
566         struct bond_dev_private *internals;
567         struct bond_tx_queue *bd_tx_q;
568
569         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
570         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
571
572         uint16_t num_of_slaves;
573         uint16_t slaves[RTE_MAX_ETHPORTS];
574
575         uint16_t num_tx_total = 0, num_tx_slave;
576
577         static int slave_idx = 0;
578         int i, cslave_idx = 0, tx_fail_total = 0;
579
580         bd_tx_q = (struct bond_tx_queue *)queue;
581         internals = bd_tx_q->dev_private;
582
583         /* Copy slave list to protect against slave up/down changes during tx
584          * bursting */
585         num_of_slaves = internals->active_slave_count;
586         memcpy(slaves, internals->active_slaves,
587                         sizeof(internals->active_slaves[0]) * num_of_slaves);
588
589         if (num_of_slaves < 1)
590                 return num_tx_total;
591
592         /* Populate slaves mbuf with which packets are to be sent on it  */
593         for (i = 0; i < nb_pkts; i++) {
594                 cslave_idx = (slave_idx + i) % num_of_slaves;
595                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
596         }
597
598         /* increment current slave index so the next call to tx burst starts on the
599          * next slave */
600         slave_idx = ++cslave_idx;
601
602         /* Send packet burst on each slave device */
603         for (i = 0; i < num_of_slaves; i++) {
604                 if (slave_nb_pkts[i] > 0) {
605                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
606                                         slave_bufs[i], slave_nb_pkts[i]);
607
608                         /* if tx burst fails move packets to end of bufs */
609                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
610                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
611
612                                 tx_fail_total += tx_fail_slave;
613
614                                 memcpy(&bufs[nb_pkts - tx_fail_total],
615                                        &slave_bufs[i][num_tx_slave],
616                                        tx_fail_slave * sizeof(bufs[0]));
617                         }
618                         num_tx_total += num_tx_slave;
619                 }
620         }
621
622         return num_tx_total;
623 }
624
625 static uint16_t
626 bond_ethdev_tx_burst_active_backup(void *queue,
627                 struct rte_mbuf **bufs, uint16_t nb_pkts)
628 {
629         struct bond_dev_private *internals;
630         struct bond_tx_queue *bd_tx_q;
631
632         bd_tx_q = (struct bond_tx_queue *)queue;
633         internals = bd_tx_q->dev_private;
634
635         if (internals->active_slave_count < 1)
636                 return 0;
637
638         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
639                         bufs, nb_pkts);
640 }
641
642 static inline uint16_t
643 ether_hash(struct rte_ether_hdr *eth_hdr)
644 {
645         unaligned_uint16_t *word_src_addr =
646                 (unaligned_uint16_t *)eth_hdr->src_addr.addr_bytes;
647         unaligned_uint16_t *word_dst_addr =
648                 (unaligned_uint16_t *)eth_hdr->dst_addr.addr_bytes;
649
650         return (word_src_addr[0] ^ word_dst_addr[0]) ^
651                         (word_src_addr[1] ^ word_dst_addr[1]) ^
652                         (word_src_addr[2] ^ word_dst_addr[2]);
653 }
654
655 static inline uint32_t
656 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
657 {
658         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
659 }
660
661 static inline uint32_t
662 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
663 {
664         unaligned_uint32_t *word_src_addr =
665                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
666         unaligned_uint32_t *word_dst_addr =
667                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
668
669         return (word_src_addr[0] ^ word_dst_addr[0]) ^
670                         (word_src_addr[1] ^ word_dst_addr[1]) ^
671                         (word_src_addr[2] ^ word_dst_addr[2]) ^
672                         (word_src_addr[3] ^ word_dst_addr[3]);
673 }
674
675
676 void
677 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
678                 uint16_t slave_count, uint16_t *slaves)
679 {
680         struct rte_ether_hdr *eth_hdr;
681         uint32_t hash;
682         int i;
683
684         for (i = 0; i < nb_pkts; i++) {
685                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
686
687                 hash = ether_hash(eth_hdr);
688
689                 slaves[i] = (hash ^= hash >> 8) % slave_count;
690         }
691 }
692
693 void
694 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
695                 uint16_t slave_count, uint16_t *slaves)
696 {
697         uint16_t i;
698         struct rte_ether_hdr *eth_hdr;
699         uint16_t proto;
700         size_t vlan_offset;
701         uint32_t hash, l3hash;
702
703         for (i = 0; i < nb_pkts; i++) {
704                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
705                 l3hash = 0;
706
707                 proto = eth_hdr->ether_type;
708                 hash = ether_hash(eth_hdr);
709
710                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
711
712                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
713                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
714                                         ((char *)(eth_hdr + 1) + vlan_offset);
715                         l3hash = ipv4_hash(ipv4_hdr);
716
717                 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
718                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
719                                         ((char *)(eth_hdr + 1) + vlan_offset);
720                         l3hash = ipv6_hash(ipv6_hdr);
721                 }
722
723                 hash = hash ^ l3hash;
724                 hash ^= hash >> 16;
725                 hash ^= hash >> 8;
726
727                 slaves[i] = hash % slave_count;
728         }
729 }
730
731 void
732 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
733                 uint16_t slave_count, uint16_t *slaves)
734 {
735         struct rte_ether_hdr *eth_hdr;
736         uint16_t proto;
737         size_t vlan_offset;
738         int i;
739
740         struct rte_udp_hdr *udp_hdr;
741         struct rte_tcp_hdr *tcp_hdr;
742         uint32_t hash, l3hash, l4hash;
743
744         for (i = 0; i < nb_pkts; i++) {
745                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
746                 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
747                 proto = eth_hdr->ether_type;
748                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
749                 l3hash = 0;
750                 l4hash = 0;
751
752                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
753                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
754                                         ((char *)(eth_hdr + 1) + vlan_offset);
755                         size_t ip_hdr_offset;
756
757                         l3hash = ipv4_hash(ipv4_hdr);
758
759                         /* there is no L4 header in fragmented packet */
760                         if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
761                                                                 == 0)) {
762                                 ip_hdr_offset = (ipv4_hdr->version_ihl
763                                         & RTE_IPV4_HDR_IHL_MASK) *
764                                         RTE_IPV4_IHL_MULTIPLIER;
765
766                                 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
767                                         tcp_hdr = (struct rte_tcp_hdr *)
768                                                 ((char *)ipv4_hdr +
769                                                         ip_hdr_offset);
770                                         if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
771                                                         < pkt_end)
772                                                 l4hash = HASH_L4_PORTS(tcp_hdr);
773                                 } else if (ipv4_hdr->next_proto_id ==
774                                                                 IPPROTO_UDP) {
775                                         udp_hdr = (struct rte_udp_hdr *)
776                                                 ((char *)ipv4_hdr +
777                                                         ip_hdr_offset);
778                                         if ((size_t)udp_hdr + sizeof(*udp_hdr)
779                                                         < pkt_end)
780                                                 l4hash = HASH_L4_PORTS(udp_hdr);
781                                 }
782                         }
783                 } else if  (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
784                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
785                                         ((char *)(eth_hdr + 1) + vlan_offset);
786                         l3hash = ipv6_hash(ipv6_hdr);
787
788                         if (ipv6_hdr->proto == IPPROTO_TCP) {
789                                 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
790                                 l4hash = HASH_L4_PORTS(tcp_hdr);
791                         } else if (ipv6_hdr->proto == IPPROTO_UDP) {
792                                 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
793                                 l4hash = HASH_L4_PORTS(udp_hdr);
794                         }
795                 }
796
797                 hash = l3hash ^ l4hash;
798                 hash ^= hash >> 16;
799                 hash ^= hash >> 8;
800
801                 slaves[i] = hash % slave_count;
802         }
803 }
804
805 struct bwg_slave {
806         uint64_t bwg_left_int;
807         uint64_t bwg_left_remainder;
808         uint16_t slave;
809 };
810
811 void
812 bond_tlb_activate_slave(struct bond_dev_private *internals) {
813         int i;
814
815         for (i = 0; i < internals->active_slave_count; i++) {
816                 tlb_last_obytets[internals->active_slaves[i]] = 0;
817         }
818 }
819
820 static int
821 bandwidth_cmp(const void *a, const void *b)
822 {
823         const struct bwg_slave *bwg_a = a;
824         const struct bwg_slave *bwg_b = b;
825         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
826         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
827                         (int64_t)bwg_a->bwg_left_remainder;
828         if (diff > 0)
829                 return 1;
830         else if (diff < 0)
831                 return -1;
832         else if (diff2 > 0)
833                 return 1;
834         else if (diff2 < 0)
835                 return -1;
836         else
837                 return 0;
838 }
839
840 static void
841 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
842                 struct bwg_slave *bwg_slave)
843 {
844         struct rte_eth_link link_status;
845         int ret;
846
847         ret = rte_eth_link_get_nowait(port_id, &link_status);
848         if (ret < 0) {
849                 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
850                              port_id, rte_strerror(-ret));
851                 return;
852         }
853         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
854         if (link_bwg == 0)
855                 return;
856         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
857         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
858         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
859 }
860
861 static void
862 bond_ethdev_update_tlb_slave_cb(void *arg)
863 {
864         struct bond_dev_private *internals = arg;
865         struct rte_eth_stats slave_stats;
866         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
867         uint16_t slave_count;
868         uint64_t tx_bytes;
869
870         uint8_t update_stats = 0;
871         uint16_t slave_id;
872         uint16_t i;
873
874         internals->slave_update_idx++;
875
876
877         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
878                 update_stats = 1;
879
880         for (i = 0; i < internals->active_slave_count; i++) {
881                 slave_id = internals->active_slaves[i];
882                 rte_eth_stats_get(slave_id, &slave_stats);
883                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
884                 bandwidth_left(slave_id, tx_bytes,
885                                 internals->slave_update_idx, &bwg_array[i]);
886                 bwg_array[i].slave = slave_id;
887
888                 if (update_stats) {
889                         tlb_last_obytets[slave_id] = slave_stats.obytes;
890                 }
891         }
892
893         if (update_stats == 1)
894                 internals->slave_update_idx = 0;
895
896         slave_count = i;
897         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
898         for (i = 0; i < slave_count; i++)
899                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
900
901         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
902                         (struct bond_dev_private *)internals);
903 }
904
905 static uint16_t
906 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
907 {
908         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
909         struct bond_dev_private *internals = bd_tx_q->dev_private;
910
911         struct rte_eth_dev *primary_port =
912                         &rte_eth_devices[internals->primary_port];
913         uint16_t num_tx_total = 0;
914         uint16_t i, j;
915
916         uint16_t num_of_slaves = internals->active_slave_count;
917         uint16_t slaves[RTE_MAX_ETHPORTS];
918
919         struct rte_ether_hdr *ether_hdr;
920         struct rte_ether_addr primary_slave_addr;
921         struct rte_ether_addr active_slave_addr;
922
923         if (num_of_slaves < 1)
924                 return num_tx_total;
925
926         memcpy(slaves, internals->tlb_slaves_order,
927                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
928
929
930         rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
931
932         if (nb_pkts > 3) {
933                 for (i = 0; i < 3; i++)
934                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
935         }
936
937         for (i = 0; i < num_of_slaves; i++) {
938                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
939                 for (j = num_tx_total; j < nb_pkts; j++) {
940                         if (j + 3 < nb_pkts)
941                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
942
943                         ether_hdr = rte_pktmbuf_mtod(bufs[j],
944                                                 struct rte_ether_hdr *);
945                         if (rte_is_same_ether_addr(&ether_hdr->src_addr,
946                                                         &primary_slave_addr))
947                                 rte_ether_addr_copy(&active_slave_addr,
948                                                 &ether_hdr->src_addr);
949 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
950                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
951 #endif
952                 }
953
954                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
955                                 bufs + num_tx_total, nb_pkts - num_tx_total);
956
957                 if (num_tx_total == nb_pkts)
958                         break;
959         }
960
961         return num_tx_total;
962 }
963
964 void
965 bond_tlb_disable(struct bond_dev_private *internals)
966 {
967         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
968 }
969
970 void
971 bond_tlb_enable(struct bond_dev_private *internals)
972 {
973         bond_ethdev_update_tlb_slave_cb(internals);
974 }
975
976 static uint16_t
977 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
978 {
979         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
980         struct bond_dev_private *internals = bd_tx_q->dev_private;
981
982         struct rte_ether_hdr *eth_h;
983         uint16_t ether_type, offset;
984
985         struct client_data *client_info;
986
987         /*
988          * We create transmit buffers for every slave and one additional to send
989          * through tlb. In worst case every packet will be send on one port.
990          */
991         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
992         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
993
994         /*
995          * We create separate transmit buffers for update packets as they won't
996          * be counted in num_tx_total.
997          */
998         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
999         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
1000
1001         struct rte_mbuf *upd_pkt;
1002         size_t pkt_size;
1003
1004         uint16_t num_send, num_not_send = 0;
1005         uint16_t num_tx_total = 0;
1006         uint16_t slave_idx;
1007
1008         int i, j;
1009
1010         /* Search tx buffer for ARP packets and forward them to alb */
1011         for (i = 0; i < nb_pkts; i++) {
1012                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1013                 ether_type = eth_h->ether_type;
1014                 offset = get_vlan_offset(eth_h, &ether_type);
1015
1016                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1017                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1018
1019                         /* Change src mac in eth header */
1020                         rte_eth_macaddr_get(slave_idx, &eth_h->src_addr);
1021
1022                         /* Add packet to slave tx buffer */
1023                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1024                         slave_bufs_pkts[slave_idx]++;
1025                 } else {
1026                         /* If packet is not ARP, send it with TLB policy */
1027                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1028                                         bufs[i];
1029                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1030                 }
1031         }
1032
1033         /* Update connected client ARP tables */
1034         if (internals->mode6.ntt) {
1035                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1036                         client_info = &internals->mode6.client_table[i];
1037
1038                         if (client_info->in_use) {
1039                                 /* Allocate new packet to send ARP update on current slave */
1040                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1041                                 if (upd_pkt == NULL) {
1042                                         RTE_BOND_LOG(ERR,
1043                                                      "Failed to allocate ARP packet from pool");
1044                                         continue;
1045                                 }
1046                                 pkt_size = sizeof(struct rte_ether_hdr) +
1047                                         sizeof(struct rte_arp_hdr) +
1048                                         client_info->vlan_count *
1049                                         sizeof(struct rte_vlan_hdr);
1050                                 upd_pkt->data_len = pkt_size;
1051                                 upd_pkt->pkt_len = pkt_size;
1052
1053                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1054                                                 internals);
1055
1056                                 /* Add packet to update tx buffer */
1057                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1058                                 update_bufs_pkts[slave_idx]++;
1059                         }
1060                 }
1061                 internals->mode6.ntt = 0;
1062         }
1063
1064         /* Send ARP packets on proper slaves */
1065         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1066                 if (slave_bufs_pkts[i] > 0) {
1067                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1068                                         slave_bufs[i], slave_bufs_pkts[i]);
1069                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1070                                 bufs[nb_pkts - 1 - num_not_send - j] =
1071                                                 slave_bufs[i][nb_pkts - 1 - j];
1072                         }
1073
1074                         num_tx_total += num_send;
1075                         num_not_send += slave_bufs_pkts[i] - num_send;
1076
1077 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1078         /* Print TX stats including update packets */
1079                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
1080                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1081                                                         struct rte_ether_hdr *);
1082                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1083                         }
1084 #endif
1085                 }
1086         }
1087
1088         /* Send update packets on proper slaves */
1089         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1090                 if (update_bufs_pkts[i] > 0) {
1091                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1092                                         update_bufs_pkts[i]);
1093                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
1094                                 rte_pktmbuf_free(update_bufs[i][j]);
1095                         }
1096 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1097                         for (j = 0; j < update_bufs_pkts[i]; j++) {
1098                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1099                                                         struct rte_ether_hdr *);
1100                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1101                         }
1102 #endif
1103                 }
1104         }
1105
1106         /* Send non-ARP packets using tlb policy */
1107         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1108                 num_send = bond_ethdev_tx_burst_tlb(queue,
1109                                 slave_bufs[RTE_MAX_ETHPORTS],
1110                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1111
1112                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1113                         bufs[nb_pkts - 1 - num_not_send - j] =
1114                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1115                 }
1116
1117                 num_tx_total += num_send;
1118         }
1119
1120         return num_tx_total;
1121 }
1122
1123 static inline uint16_t
1124 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1125                  uint16_t *slave_port_ids, uint16_t slave_count)
1126 {
1127         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1128         struct bond_dev_private *internals = bd_tx_q->dev_private;
1129
1130         /* Array to sort mbufs for transmission on each slave into */
1131         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1132         /* Number of mbufs for transmission on each slave */
1133         uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1134         /* Mapping array generated by hash function to map mbufs to slaves */
1135         uint16_t bufs_slave_port_idxs[nb_bufs];
1136
1137         uint16_t slave_tx_count;
1138         uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1139
1140         uint16_t i;
1141
1142         /*
1143          * Populate slaves mbuf with the packets which are to be sent on it
1144          * selecting output slave using hash based on xmit policy
1145          */
1146         internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1147                         bufs_slave_port_idxs);
1148
1149         for (i = 0; i < nb_bufs; i++) {
1150                 /* Populate slave mbuf arrays with mbufs for that slave. */
1151                 uint16_t slave_idx = bufs_slave_port_idxs[i];
1152
1153                 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1154         }
1155
1156         /* Send packet burst on each slave device */
1157         for (i = 0; i < slave_count; i++) {
1158                 if (slave_nb_bufs[i] == 0)
1159                         continue;
1160
1161                 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1162                                 bd_tx_q->queue_id, slave_bufs[i],
1163                                 slave_nb_bufs[i]);
1164
1165                 total_tx_count += slave_tx_count;
1166
1167                 /* If tx burst fails move packets to end of bufs */
1168                 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1169                         int slave_tx_fail_count = slave_nb_bufs[i] -
1170                                         slave_tx_count;
1171                         total_tx_fail_count += slave_tx_fail_count;
1172                         memcpy(&bufs[nb_bufs - total_tx_fail_count],
1173                                &slave_bufs[i][slave_tx_count],
1174                                slave_tx_fail_count * sizeof(bufs[0]));
1175                 }
1176         }
1177
1178         return total_tx_count;
1179 }
1180
1181 static uint16_t
1182 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1183                 uint16_t nb_bufs)
1184 {
1185         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1186         struct bond_dev_private *internals = bd_tx_q->dev_private;
1187
1188         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1189         uint16_t slave_count;
1190
1191         if (unlikely(nb_bufs == 0))
1192                 return 0;
1193
1194         /* Copy slave list to protect against slave up/down changes during tx
1195          * bursting
1196          */
1197         slave_count = internals->active_slave_count;
1198         if (unlikely(slave_count < 1))
1199                 return 0;
1200
1201         memcpy(slave_port_ids, internals->active_slaves,
1202                         sizeof(slave_port_ids[0]) * slave_count);
1203         return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1204                                 slave_count);
1205 }
1206
1207 static inline uint16_t
1208 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1209                 bool dedicated_txq)
1210 {
1211         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1212         struct bond_dev_private *internals = bd_tx_q->dev_private;
1213
1214         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1215         uint16_t slave_count;
1216
1217         uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1218         uint16_t dist_slave_count;
1219
1220         uint16_t slave_tx_count;
1221
1222         uint16_t i;
1223
1224         /* Copy slave list to protect against slave up/down changes during tx
1225          * bursting */
1226         slave_count = internals->active_slave_count;
1227         if (unlikely(slave_count < 1))
1228                 return 0;
1229
1230         memcpy(slave_port_ids, internals->active_slaves,
1231                         sizeof(slave_port_ids[0]) * slave_count);
1232
1233         if (dedicated_txq)
1234                 goto skip_tx_ring;
1235
1236         /* Check for LACP control packets and send if available */
1237         for (i = 0; i < slave_count; i++) {
1238                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1239                 struct rte_mbuf *ctrl_pkt = NULL;
1240
1241                 if (likely(rte_ring_empty(port->tx_ring)))
1242                         continue;
1243
1244                 if (rte_ring_dequeue(port->tx_ring,
1245                                      (void **)&ctrl_pkt) != -ENOENT) {
1246                         slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1247                                         bd_tx_q->queue_id, &ctrl_pkt, 1);
1248                         /*
1249                          * re-enqueue LAG control plane packets to buffering
1250                          * ring if transmission fails so the packet isn't lost.
1251                          */
1252                         if (slave_tx_count != 1)
1253                                 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1254                 }
1255         }
1256
1257 skip_tx_ring:
1258         if (unlikely(nb_bufs == 0))
1259                 return 0;
1260
1261         dist_slave_count = 0;
1262         for (i = 0; i < slave_count; i++) {
1263                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1264
1265                 if (ACTOR_STATE(port, DISTRIBUTING))
1266                         dist_slave_port_ids[dist_slave_count++] =
1267                                         slave_port_ids[i];
1268         }
1269
1270         if (unlikely(dist_slave_count < 1))
1271                 return 0;
1272
1273         return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1274                                 dist_slave_count);
1275 }
1276
1277 static uint16_t
1278 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1279                 uint16_t nb_bufs)
1280 {
1281         return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1282 }
1283
1284 static uint16_t
1285 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1286                 uint16_t nb_bufs)
1287 {
1288         return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1289 }
1290
1291 static uint16_t
1292 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1293                 uint16_t nb_pkts)
1294 {
1295         struct bond_dev_private *internals;
1296         struct bond_tx_queue *bd_tx_q;
1297
1298         uint16_t slaves[RTE_MAX_ETHPORTS];
1299         uint8_t tx_failed_flag = 0;
1300         uint16_t num_of_slaves;
1301
1302         uint16_t max_nb_of_tx_pkts = 0;
1303
1304         int slave_tx_total[RTE_MAX_ETHPORTS];
1305         int i, most_successful_tx_slave = -1;
1306
1307         bd_tx_q = (struct bond_tx_queue *)queue;
1308         internals = bd_tx_q->dev_private;
1309
1310         /* Copy slave list to protect against slave up/down changes during tx
1311          * bursting */
1312         num_of_slaves = internals->active_slave_count;
1313         memcpy(slaves, internals->active_slaves,
1314                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1315
1316         if (num_of_slaves < 1)
1317                 return 0;
1318
1319         /* Increment reference count on mbufs */
1320         for (i = 0; i < nb_pkts; i++)
1321                 rte_pktmbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1322
1323         /* Transmit burst on each active slave */
1324         for (i = 0; i < num_of_slaves; i++) {
1325                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1326                                         bufs, nb_pkts);
1327
1328                 if (unlikely(slave_tx_total[i] < nb_pkts))
1329                         tx_failed_flag = 1;
1330
1331                 /* record the value and slave index for the slave which transmits the
1332                  * maximum number of packets */
1333                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1334                         max_nb_of_tx_pkts = slave_tx_total[i];
1335                         most_successful_tx_slave = i;
1336                 }
1337         }
1338
1339         /* if slaves fail to transmit packets from burst, the calling application
1340          * is not expected to know about multiple references to packets so we must
1341          * handle failures of all packets except those of the most successful slave
1342          */
1343         if (unlikely(tx_failed_flag))
1344                 for (i = 0; i < num_of_slaves; i++)
1345                         if (i != most_successful_tx_slave)
1346                                 while (slave_tx_total[i] < nb_pkts)
1347                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1348
1349         return max_nb_of_tx_pkts;
1350 }
1351
1352 static void
1353 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1354 {
1355         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1356
1357         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1358                 /**
1359                  * If in mode 4 then save the link properties of the first
1360                  * slave, all subsequent slaves must match these properties
1361                  */
1362                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1363
1364                 bond_link->link_autoneg = slave_link->link_autoneg;
1365                 bond_link->link_duplex = slave_link->link_duplex;
1366                 bond_link->link_speed = slave_link->link_speed;
1367         } else {
1368                 /**
1369                  * In any other mode the link properties are set to default
1370                  * values of AUTONEG/DUPLEX
1371                  */
1372                 ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
1373                 ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1374         }
1375 }
1376
1377 static int
1378 link_properties_valid(struct rte_eth_dev *ethdev,
1379                 struct rte_eth_link *slave_link)
1380 {
1381         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1382
1383         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1384                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1385
1386                 if (bond_link->link_duplex != slave_link->link_duplex ||
1387                         bond_link->link_autoneg != slave_link->link_autoneg ||
1388                         bond_link->link_speed != slave_link->link_speed)
1389                         return -1;
1390         }
1391
1392         return 0;
1393 }
1394
1395 int
1396 mac_address_get(struct rte_eth_dev *eth_dev,
1397                 struct rte_ether_addr *dst_mac_addr)
1398 {
1399         struct rte_ether_addr *mac_addr;
1400
1401         if (eth_dev == NULL) {
1402                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1403                 return -1;
1404         }
1405
1406         if (dst_mac_addr == NULL) {
1407                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1408                 return -1;
1409         }
1410
1411         mac_addr = eth_dev->data->mac_addrs;
1412
1413         rte_ether_addr_copy(mac_addr, dst_mac_addr);
1414         return 0;
1415 }
1416
1417 int
1418 mac_address_set(struct rte_eth_dev *eth_dev,
1419                 struct rte_ether_addr *new_mac_addr)
1420 {
1421         struct rte_ether_addr *mac_addr;
1422
1423         if (eth_dev == NULL) {
1424                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1425                 return -1;
1426         }
1427
1428         if (new_mac_addr == NULL) {
1429                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1430                 return -1;
1431         }
1432
1433         mac_addr = eth_dev->data->mac_addrs;
1434
1435         /* If new MAC is different to current MAC then update */
1436         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1437                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1438
1439         return 0;
1440 }
1441
1442 static const struct rte_ether_addr null_mac_addr;
1443
1444 /*
1445  * Add additional MAC addresses to the slave
1446  */
1447 int
1448 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1449                 uint16_t slave_port_id)
1450 {
1451         int i, ret;
1452         struct rte_ether_addr *mac_addr;
1453
1454         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1455                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1456                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1457                         break;
1458
1459                 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1460                 if (ret < 0) {
1461                         /* rollback */
1462                         for (i--; i > 0; i--)
1463                                 rte_eth_dev_mac_addr_remove(slave_port_id,
1464                                         &bonded_eth_dev->data->mac_addrs[i]);
1465                         return ret;
1466                 }
1467         }
1468
1469         return 0;
1470 }
1471
1472 /*
1473  * Remove additional MAC addresses from the slave
1474  */
1475 int
1476 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1477                 uint16_t slave_port_id)
1478 {
1479         int i, rc, ret;
1480         struct rte_ether_addr *mac_addr;
1481
1482         rc = 0;
1483         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1484                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1485                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1486                         break;
1487
1488                 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1489                 /* save only the first error */
1490                 if (ret < 0 && rc == 0)
1491                         rc = ret;
1492         }
1493
1494         return rc;
1495 }
1496
1497 int
1498 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1499 {
1500         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1501         bool set;
1502         int i;
1503
1504         /* Update slave devices MAC addresses */
1505         if (internals->slave_count < 1)
1506                 return -1;
1507
1508         switch (internals->mode) {
1509         case BONDING_MODE_ROUND_ROBIN:
1510         case BONDING_MODE_BALANCE:
1511         case BONDING_MODE_BROADCAST:
1512                 for (i = 0; i < internals->slave_count; i++) {
1513                         if (rte_eth_dev_default_mac_addr_set(
1514                                         internals->slaves[i].port_id,
1515                                         bonded_eth_dev->data->mac_addrs)) {
1516                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1517                                                 internals->slaves[i].port_id);
1518                                 return -1;
1519                         }
1520                 }
1521                 break;
1522         case BONDING_MODE_8023AD:
1523                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1524                 break;
1525         case BONDING_MODE_ACTIVE_BACKUP:
1526         case BONDING_MODE_TLB:
1527         case BONDING_MODE_ALB:
1528         default:
1529                 set = true;
1530                 for (i = 0; i < internals->slave_count; i++) {
1531                         if (internals->slaves[i].port_id ==
1532                                         internals->current_primary_port) {
1533                                 if (rte_eth_dev_default_mac_addr_set(
1534                                                 internals->current_primary_port,
1535                                                 bonded_eth_dev->data->mac_addrs)) {
1536                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1537                                                         internals->current_primary_port);
1538                                         set = false;
1539                                 }
1540                         } else {
1541                                 if (rte_eth_dev_default_mac_addr_set(
1542                                                 internals->slaves[i].port_id,
1543                                                 &internals->slaves[i].persisted_mac_addr)) {
1544                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1545                                                         internals->slaves[i].port_id);
1546                                 }
1547                         }
1548                 }
1549                 if (!set)
1550                         return -1;
1551         }
1552
1553         return 0;
1554 }
1555
1556 int
1557 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, uint8_t mode)
1558 {
1559         struct bond_dev_private *internals;
1560
1561         internals = eth_dev->data->dev_private;
1562
1563         switch (mode) {
1564         case BONDING_MODE_ROUND_ROBIN:
1565                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1566                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1567                 break;
1568         case BONDING_MODE_ACTIVE_BACKUP:
1569                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1570                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1571                 break;
1572         case BONDING_MODE_BALANCE:
1573                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1574                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1575                 break;
1576         case BONDING_MODE_BROADCAST:
1577                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1578                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1579                 break;
1580         case BONDING_MODE_8023AD:
1581                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1582                         return -1;
1583
1584                 if (internals->mode4.dedicated_queues.enabled == 0) {
1585                         eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1586                         eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1587                         RTE_BOND_LOG(WARNING,
1588                                 "Using mode 4, it is necessary to do TX burst "
1589                                 "and RX burst at least every 100ms.");
1590                 } else {
1591                         /* Use flow director's optimization */
1592                         eth_dev->rx_pkt_burst =
1593                                         bond_ethdev_rx_burst_8023ad_fast_queue;
1594                         eth_dev->tx_pkt_burst =
1595                                         bond_ethdev_tx_burst_8023ad_fast_queue;
1596                 }
1597                 break;
1598         case BONDING_MODE_TLB:
1599                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1600                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1601                 break;
1602         case BONDING_MODE_ALB:
1603                 if (bond_mode_alb_enable(eth_dev) != 0)
1604                         return -1;
1605
1606                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1607                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1608                 break;
1609         default:
1610                 return -1;
1611         }
1612
1613         internals->mode = mode;
1614
1615         return 0;
1616 }
1617
1618
1619 static int
1620 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1621                 struct rte_eth_dev *slave_eth_dev)
1622 {
1623         int errval = 0;
1624         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1625         struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1626
1627         if (port->slow_pool == NULL) {
1628                 char mem_name[256];
1629                 int slave_id = slave_eth_dev->data->port_id;
1630
1631                 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1632                                 slave_id);
1633                 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1634                         250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1635                         slave_eth_dev->data->numa_node);
1636
1637                 /* Any memory allocation failure in initialization is critical because
1638                  * resources can't be free, so reinitialization is impossible. */
1639                 if (port->slow_pool == NULL) {
1640                         rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1641                                 slave_id, mem_name, rte_strerror(rte_errno));
1642                 }
1643         }
1644
1645         if (internals->mode4.dedicated_queues.enabled == 1) {
1646                 /* Configure slow Rx queue */
1647
1648                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1649                                 internals->mode4.dedicated_queues.rx_qid, 128,
1650                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1651                                 NULL, port->slow_pool);
1652                 if (errval != 0) {
1653                         RTE_BOND_LOG(ERR,
1654                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1655                                         slave_eth_dev->data->port_id,
1656                                         internals->mode4.dedicated_queues.rx_qid,
1657                                         errval);
1658                         return errval;
1659                 }
1660
1661                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1662                                 internals->mode4.dedicated_queues.tx_qid, 512,
1663                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1664                                 NULL);
1665                 if (errval != 0) {
1666                         RTE_BOND_LOG(ERR,
1667                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1668                                 slave_eth_dev->data->port_id,
1669                                 internals->mode4.dedicated_queues.tx_qid,
1670                                 errval);
1671                         return errval;
1672                 }
1673         }
1674         return 0;
1675 }
1676
1677 int
1678 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1679                 struct rte_eth_dev *slave_eth_dev)
1680 {
1681         uint16_t nb_rx_queues;
1682         uint16_t nb_tx_queues;
1683
1684         int errval;
1685
1686         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1687
1688         /* Stop slave */
1689         errval = rte_eth_dev_stop(slave_eth_dev->data->port_id);
1690         if (errval != 0)
1691                 RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)",
1692                              slave_eth_dev->data->port_id, errval);
1693
1694         /* Enable interrupts on slave device if supported */
1695         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1696                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1697
1698         /* If RSS is enabled for bonding, try to enable it for slaves  */
1699         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
1700                 /* rss_key won't be empty if RSS is configured in bonded dev */
1701                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1702                                         internals->rss_key_len;
1703                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1704                                         internals->rss_key;
1705
1706                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1707                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1708                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1709                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1710         } else {
1711                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
1712                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1713                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1714                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1715                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1716         }
1717
1718         slave_eth_dev->data->dev_conf.rxmode.mtu =
1719                         bonded_eth_dev->data->dev_conf.rxmode.mtu;
1720
1721         slave_eth_dev->data->dev_conf.txmode.offloads |=
1722                 bonded_eth_dev->data->dev_conf.txmode.offloads;
1723
1724         slave_eth_dev->data->dev_conf.txmode.offloads &=
1725                 (bonded_eth_dev->data->dev_conf.txmode.offloads |
1726                 ~internals->tx_offload_capa);
1727
1728         slave_eth_dev->data->dev_conf.rxmode.offloads |=
1729                 bonded_eth_dev->data->dev_conf.rxmode.offloads;
1730
1731         slave_eth_dev->data->dev_conf.rxmode.offloads &=
1732                 (bonded_eth_dev->data->dev_conf.rxmode.offloads |
1733                 ~internals->rx_offload_capa);
1734
1735
1736         nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1737         nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1738
1739         if (internals->mode == BONDING_MODE_8023AD) {
1740                 if (internals->mode4.dedicated_queues.enabled == 1) {
1741                         nb_rx_queues++;
1742                         nb_tx_queues++;
1743                 }
1744         }
1745
1746         /* Configure device */
1747         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1748                         nb_rx_queues, nb_tx_queues,
1749                         &(slave_eth_dev->data->dev_conf));
1750         if (errval != 0) {
1751                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1752                                 slave_eth_dev->data->port_id, errval);
1753                 return errval;
1754         }
1755
1756         errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1757                                      bonded_eth_dev->data->mtu);
1758         if (errval != 0 && errval != -ENOTSUP) {
1759                 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1760                                 slave_eth_dev->data->port_id, errval);
1761                 return errval;
1762         }
1763         return 0;
1764 }
1765
1766 int
1767 slave_start(struct rte_eth_dev *bonded_eth_dev,
1768                 struct rte_eth_dev *slave_eth_dev)
1769 {
1770         int errval = 0;
1771         struct bond_rx_queue *bd_rx_q;
1772         struct bond_tx_queue *bd_tx_q;
1773         uint16_t q_id;
1774         struct rte_flow_error flow_error;
1775         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1776
1777         /* Setup Rx Queues */
1778         for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1779                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1780
1781                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1782                                 bd_rx_q->nb_rx_desc,
1783                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1784                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1785                 if (errval != 0) {
1786                         RTE_BOND_LOG(ERR,
1787                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1788                                         slave_eth_dev->data->port_id, q_id, errval);
1789                         return errval;
1790                 }
1791         }
1792
1793         /* Setup Tx Queues */
1794         for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1795                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1796
1797                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1798                                 bd_tx_q->nb_tx_desc,
1799                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1800                                 &bd_tx_q->tx_conf);
1801                 if (errval != 0) {
1802                         RTE_BOND_LOG(ERR,
1803                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1804                                 slave_eth_dev->data->port_id, q_id, errval);
1805                         return errval;
1806                 }
1807         }
1808
1809         if (internals->mode == BONDING_MODE_8023AD &&
1810                         internals->mode4.dedicated_queues.enabled == 1) {
1811                 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1812                                 != 0)
1813                         return errval;
1814
1815                 errval = bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1816                                 slave_eth_dev->data->port_id);
1817                 if (errval != 0) {
1818                         RTE_BOND_LOG(ERR,
1819                                 "bond_ethdev_8023ad_flow_verify: port=%d, err (%d)",
1820                                 slave_eth_dev->data->port_id, errval);
1821                         return errval;
1822                 }
1823
1824                 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL) {
1825                         errval = rte_flow_destroy(slave_eth_dev->data->port_id,
1826                                         internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1827                                         &flow_error);
1828                         RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_destroy: port=%d, err (%d)",
1829                                 slave_eth_dev->data->port_id, errval);
1830                 }
1831
1832                 errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1833                                 slave_eth_dev->data->port_id);
1834                 if (errval != 0) {
1835                         RTE_BOND_LOG(ERR,
1836                                 "bond_ethdev_8023ad_flow_set: port=%d, err (%d)",
1837                                 slave_eth_dev->data->port_id, errval);
1838                         return errval;
1839                 }
1840         }
1841
1842         /* Start device */
1843         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1844         if (errval != 0) {
1845                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1846                                 slave_eth_dev->data->port_id, errval);
1847                 return -1;
1848         }
1849
1850         /* If RSS is enabled for bonding, synchronize RETA */
1851         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
1852                 int i;
1853                 struct bond_dev_private *internals;
1854
1855                 internals = bonded_eth_dev->data->dev_private;
1856
1857                 for (i = 0; i < internals->slave_count; i++) {
1858                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1859                                 errval = rte_eth_dev_rss_reta_update(
1860                                                 slave_eth_dev->data->port_id,
1861                                                 &internals->reta_conf[0],
1862                                                 internals->slaves[i].reta_size);
1863                                 if (errval != 0) {
1864                                         RTE_BOND_LOG(WARNING,
1865                                                      "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1866                                                      " RSS Configuration for bonding may be inconsistent.",
1867                                                      slave_eth_dev->data->port_id, errval);
1868                                 }
1869                                 break;
1870                         }
1871                 }
1872         }
1873
1874         /* If lsc interrupt is set, check initial slave's link status */
1875         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1876                 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1877                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1878                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1879                         NULL);
1880         }
1881
1882         return 0;
1883 }
1884
1885 void
1886 slave_remove(struct bond_dev_private *internals,
1887                 struct rte_eth_dev *slave_eth_dev)
1888 {
1889         uint16_t i;
1890
1891         for (i = 0; i < internals->slave_count; i++)
1892                 if (internals->slaves[i].port_id ==
1893                                 slave_eth_dev->data->port_id)
1894                         break;
1895
1896         if (i < (internals->slave_count - 1)) {
1897                 struct rte_flow *flow;
1898
1899                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1900                                 sizeof(internals->slaves[0]) *
1901                                 (internals->slave_count - i - 1));
1902                 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1903                         memmove(&flow->flows[i], &flow->flows[i + 1],
1904                                 sizeof(flow->flows[0]) *
1905                                 (internals->slave_count - i - 1));
1906                         flow->flows[internals->slave_count - 1] = NULL;
1907                 }
1908         }
1909
1910         internals->slave_count--;
1911
1912         /* force reconfiguration of slave interfaces */
1913         rte_eth_dev_internal_reset(slave_eth_dev);
1914 }
1915
1916 static void
1917 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1918
1919 void
1920 slave_add(struct bond_dev_private *internals,
1921                 struct rte_eth_dev *slave_eth_dev)
1922 {
1923         struct bond_slave_details *slave_details =
1924                         &internals->slaves[internals->slave_count];
1925
1926         slave_details->port_id = slave_eth_dev->data->port_id;
1927         slave_details->last_link_status = 0;
1928
1929         /* Mark slave devices that don't support interrupts so we can
1930          * compensate when we start the bond
1931          */
1932         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1933                 slave_details->link_status_poll_enabled = 1;
1934         }
1935
1936         slave_details->link_status_wait_to_complete = 0;
1937         /* clean tlb_last_obytes when adding port for bonding device */
1938         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1939                         sizeof(struct rte_ether_addr));
1940 }
1941
1942 void
1943 bond_ethdev_primary_set(struct bond_dev_private *internals,
1944                 uint16_t slave_port_id)
1945 {
1946         int i;
1947
1948         if (internals->active_slave_count < 1)
1949                 internals->current_primary_port = slave_port_id;
1950         else
1951                 /* Search bonded device slave ports for new proposed primary port */
1952                 for (i = 0; i < internals->active_slave_count; i++) {
1953                         if (internals->active_slaves[i] == slave_port_id)
1954                                 internals->current_primary_port = slave_port_id;
1955                 }
1956 }
1957
1958 static int
1959 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1960
1961 static int
1962 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1963 {
1964         struct bond_dev_private *internals;
1965         int i;
1966
1967         /* slave eth dev will be started by bonded device */
1968         if (check_for_bonded_ethdev(eth_dev)) {
1969                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1970                                 eth_dev->data->port_id);
1971                 return -1;
1972         }
1973
1974         eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
1975         eth_dev->data->dev_started = 1;
1976
1977         internals = eth_dev->data->dev_private;
1978
1979         if (internals->slave_count == 0) {
1980                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1981                 goto out_err;
1982         }
1983
1984         if (internals->user_defined_mac == 0) {
1985                 struct rte_ether_addr *new_mac_addr = NULL;
1986
1987                 for (i = 0; i < internals->slave_count; i++)
1988                         if (internals->slaves[i].port_id == internals->primary_port)
1989                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1990
1991                 if (new_mac_addr == NULL)
1992                         goto out_err;
1993
1994                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1995                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1996                                         eth_dev->data->port_id);
1997                         goto out_err;
1998                 }
1999         }
2000
2001         if (internals->mode == BONDING_MODE_8023AD) {
2002                 if (internals->mode4.dedicated_queues.enabled == 1) {
2003                         internals->mode4.dedicated_queues.rx_qid =
2004                                         eth_dev->data->nb_rx_queues;
2005                         internals->mode4.dedicated_queues.tx_qid =
2006                                         eth_dev->data->nb_tx_queues;
2007                 }
2008         }
2009
2010
2011         /* Reconfigure each slave device if starting bonded device */
2012         for (i = 0; i < internals->slave_count; i++) {
2013                 struct rte_eth_dev *slave_ethdev =
2014                                 &(rte_eth_devices[internals->slaves[i].port_id]);
2015                 if (slave_configure(eth_dev, slave_ethdev) != 0) {
2016                         RTE_BOND_LOG(ERR,
2017                                 "bonded port (%d) failed to reconfigure slave device (%d)",
2018                                 eth_dev->data->port_id,
2019                                 internals->slaves[i].port_id);
2020                         goto out_err;
2021                 }
2022                 if (slave_start(eth_dev, slave_ethdev) != 0) {
2023                         RTE_BOND_LOG(ERR,
2024                                 "bonded port (%d) failed to start slave device (%d)",
2025                                 eth_dev->data->port_id,
2026                                 internals->slaves[i].port_id);
2027                         goto out_err;
2028                 }
2029                 /* We will need to poll for link status if any slave doesn't
2030                  * support interrupts
2031                  */
2032                 if (internals->slaves[i].link_status_poll_enabled)
2033                         internals->link_status_polling_enabled = 1;
2034         }
2035
2036         /* start polling if needed */
2037         if (internals->link_status_polling_enabled) {
2038                 rte_eal_alarm_set(
2039                         internals->link_status_polling_interval_ms * 1000,
2040                         bond_ethdev_slave_link_status_change_monitor,
2041                         (void *)&rte_eth_devices[internals->port_id]);
2042         }
2043
2044         /* Update all slave devices MACs*/
2045         if (mac_address_slaves_update(eth_dev) != 0)
2046                 goto out_err;
2047
2048         if (internals->user_defined_primary_port)
2049                 bond_ethdev_primary_set(internals, internals->primary_port);
2050
2051         if (internals->mode == BONDING_MODE_8023AD)
2052                 bond_mode_8023ad_start(eth_dev);
2053
2054         if (internals->mode == BONDING_MODE_TLB ||
2055                         internals->mode == BONDING_MODE_ALB)
2056                 bond_tlb_enable(internals);
2057
2058         return 0;
2059
2060 out_err:
2061         eth_dev->data->dev_started = 0;
2062         return -1;
2063 }
2064
2065 static void
2066 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2067 {
2068         uint16_t i;
2069
2070         if (dev->data->rx_queues != NULL) {
2071                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2072                         rte_free(dev->data->rx_queues[i]);
2073                         dev->data->rx_queues[i] = NULL;
2074                 }
2075                 dev->data->nb_rx_queues = 0;
2076         }
2077
2078         if (dev->data->tx_queues != NULL) {
2079                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2080                         rte_free(dev->data->tx_queues[i]);
2081                         dev->data->tx_queues[i] = NULL;
2082                 }
2083                 dev->data->nb_tx_queues = 0;
2084         }
2085 }
2086
2087 int
2088 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2089 {
2090         struct bond_dev_private *internals = eth_dev->data->dev_private;
2091         uint16_t i;
2092         int ret;
2093
2094         if (internals->mode == BONDING_MODE_8023AD) {
2095                 struct port *port;
2096                 void *pkt = NULL;
2097
2098                 bond_mode_8023ad_stop(eth_dev);
2099
2100                 /* Discard all messages to/from mode 4 state machines */
2101                 for (i = 0; i < internals->active_slave_count; i++) {
2102                         port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2103
2104                         RTE_ASSERT(port->rx_ring != NULL);
2105                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2106                                 rte_pktmbuf_free(pkt);
2107
2108                         RTE_ASSERT(port->tx_ring != NULL);
2109                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2110                                 rte_pktmbuf_free(pkt);
2111                 }
2112         }
2113
2114         if (internals->mode == BONDING_MODE_TLB ||
2115                         internals->mode == BONDING_MODE_ALB) {
2116                 bond_tlb_disable(internals);
2117                 for (i = 0; i < internals->active_slave_count; i++)
2118                         tlb_last_obytets[internals->active_slaves[i]] = 0;
2119         }
2120
2121         eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
2122         eth_dev->data->dev_started = 0;
2123
2124         internals->link_status_polling_enabled = 0;
2125         for (i = 0; i < internals->slave_count; i++) {
2126                 uint16_t slave_id = internals->slaves[i].port_id;
2127
2128                 internals->slaves[i].last_link_status = 0;
2129                 ret = rte_eth_dev_stop(slave_id);
2130                 if (ret != 0) {
2131                         RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2132                                      slave_id);
2133                         return ret;
2134                 }
2135
2136                 /* active slaves need to be deactivated. */
2137                 if (find_slave_by_id(internals->active_slaves,
2138                                 internals->active_slave_count, slave_id) !=
2139                                         internals->active_slave_count)
2140                         deactivate_slave(eth_dev, slave_id);
2141         }
2142
2143         return 0;
2144 }
2145
2146 int
2147 bond_ethdev_close(struct rte_eth_dev *dev)
2148 {
2149         struct bond_dev_private *internals = dev->data->dev_private;
2150         uint16_t bond_port_id = internals->port_id;
2151         int skipped = 0;
2152         struct rte_flow_error ferror;
2153
2154         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2155                 return 0;
2156
2157         RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2158         while (internals->slave_count != skipped) {
2159                 uint16_t port_id = internals->slaves[skipped].port_id;
2160
2161                 if (rte_eth_dev_stop(port_id) != 0) {
2162                         RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2163                                      port_id);
2164                         skipped++;
2165                         continue;
2166                 }
2167
2168                 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2169                         RTE_BOND_LOG(ERR,
2170                                      "Failed to remove port %d from bonded device %s",
2171                                      port_id, dev->device->name);
2172                         skipped++;
2173                 }
2174         }
2175         bond_flow_ops.flush(dev, &ferror);
2176         bond_ethdev_free_queues(dev);
2177         rte_bitmap_reset(internals->vlan_filter_bmp);
2178         rte_bitmap_free(internals->vlan_filter_bmp);
2179         rte_free(internals->vlan_filter_bmpmem);
2180
2181         /* Try to release mempool used in mode6. If the bond
2182          * device is not mode6, free the NULL is not problem.
2183          */
2184         rte_mempool_free(internals->mode6.mempool);
2185
2186         rte_kvargs_free(internals->kvlist);
2187
2188         return 0;
2189 }
2190
2191 /* forward declaration */
2192 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2193
2194 static int
2195 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2196 {
2197         struct bond_dev_private *internals = dev->data->dev_private;
2198         struct bond_slave_details slave;
2199         int ret;
2200
2201         uint16_t max_nb_rx_queues = UINT16_MAX;
2202         uint16_t max_nb_tx_queues = UINT16_MAX;
2203         uint16_t max_rx_desc_lim = UINT16_MAX;
2204         uint16_t max_tx_desc_lim = UINT16_MAX;
2205
2206         dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2207
2208         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2209                         internals->candidate_max_rx_pktlen :
2210                         RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2211
2212         /* Max number of tx/rx queues that the bonded device can support is the
2213          * minimum values of the bonded slaves, as all slaves must be capable
2214          * of supporting the same number of tx/rx queues.
2215          */
2216         if (internals->slave_count > 0) {
2217                 struct rte_eth_dev_info slave_info;
2218                 uint16_t idx;
2219
2220                 for (idx = 0; idx < internals->slave_count; idx++) {
2221                         slave = internals->slaves[idx];
2222                         ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2223                         if (ret != 0) {
2224                                 RTE_BOND_LOG(ERR,
2225                                         "%s: Error during getting device (port %u) info: %s\n",
2226                                         __func__,
2227                                         slave.port_id,
2228                                         strerror(-ret));
2229
2230                                 return ret;
2231                         }
2232
2233                         if (slave_info.max_rx_queues < max_nb_rx_queues)
2234                                 max_nb_rx_queues = slave_info.max_rx_queues;
2235
2236                         if (slave_info.max_tx_queues < max_nb_tx_queues)
2237                                 max_nb_tx_queues = slave_info.max_tx_queues;
2238
2239                         if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2240                                 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2241
2242                         if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2243                                 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2244                 }
2245         }
2246
2247         dev_info->max_rx_queues = max_nb_rx_queues;
2248         dev_info->max_tx_queues = max_nb_tx_queues;
2249
2250         memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2251                sizeof(dev_info->default_rxconf));
2252         memcpy(&dev_info->default_txconf, &internals->default_txconf,
2253                sizeof(dev_info->default_txconf));
2254
2255         dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2256         dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2257
2258         /**
2259          * If dedicated hw queues enabled for link bonding device in LACP mode
2260          * then we need to reduce the maximum number of data path queues by 1.
2261          */
2262         if (internals->mode == BONDING_MODE_8023AD &&
2263                 internals->mode4.dedicated_queues.enabled == 1) {
2264                 dev_info->max_rx_queues--;
2265                 dev_info->max_tx_queues--;
2266         }
2267
2268         dev_info->min_rx_bufsize = 0;
2269
2270         dev_info->rx_offload_capa = internals->rx_offload_capa;
2271         dev_info->tx_offload_capa = internals->tx_offload_capa;
2272         dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2273         dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2274         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2275
2276         dev_info->reta_size = internals->reta_size;
2277         dev_info->hash_key_size = internals->rss_key_len;
2278
2279         return 0;
2280 }
2281
2282 static int
2283 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2284 {
2285         int res;
2286         uint16_t i;
2287         struct bond_dev_private *internals = dev->data->dev_private;
2288
2289         /* don't do this while a slave is being added */
2290         rte_spinlock_lock(&internals->lock);
2291
2292         if (on)
2293                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2294         else
2295                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2296
2297         for (i = 0; i < internals->slave_count; i++) {
2298                 uint16_t port_id = internals->slaves[i].port_id;
2299
2300                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2301                 if (res == ENOTSUP)
2302                         RTE_BOND_LOG(WARNING,
2303                                      "Setting VLAN filter on slave port %u not supported.",
2304                                      port_id);
2305         }
2306
2307         rte_spinlock_unlock(&internals->lock);
2308         return 0;
2309 }
2310
2311 static int
2312 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2313                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2314                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2315 {
2316         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2317                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2318                                         0, dev->data->numa_node);
2319         if (bd_rx_q == NULL)
2320                 return -1;
2321
2322         bd_rx_q->queue_id = rx_queue_id;
2323         bd_rx_q->dev_private = dev->data->dev_private;
2324
2325         bd_rx_q->nb_rx_desc = nb_rx_desc;
2326
2327         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2328         bd_rx_q->mb_pool = mb_pool;
2329
2330         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2331
2332         return 0;
2333 }
2334
2335 static int
2336 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2337                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2338                 const struct rte_eth_txconf *tx_conf)
2339 {
2340         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
2341                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2342                                         0, dev->data->numa_node);
2343
2344         if (bd_tx_q == NULL)
2345                 return -1;
2346
2347         bd_tx_q->queue_id = tx_queue_id;
2348         bd_tx_q->dev_private = dev->data->dev_private;
2349
2350         bd_tx_q->nb_tx_desc = nb_tx_desc;
2351         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2352
2353         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2354
2355         return 0;
2356 }
2357
2358 static void
2359 bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
2360 {
2361         void *queue = dev->data->rx_queues[queue_id];
2362
2363         if (queue == NULL)
2364                 return;
2365
2366         rte_free(queue);
2367 }
2368
2369 static void
2370 bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
2371 {
2372         void *queue = dev->data->tx_queues[queue_id];
2373
2374         if (queue == NULL)
2375                 return;
2376
2377         rte_free(queue);
2378 }
2379
2380 static void
2381 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2382 {
2383         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2384         struct bond_dev_private *internals;
2385
2386         /* Default value for polling slave found is true as we don't want to
2387          * disable the polling thread if we cannot get the lock */
2388         int i, polling_slave_found = 1;
2389
2390         if (cb_arg == NULL)
2391                 return;
2392
2393         bonded_ethdev = cb_arg;
2394         internals = bonded_ethdev->data->dev_private;
2395
2396         if (!bonded_ethdev->data->dev_started ||
2397                 !internals->link_status_polling_enabled)
2398                 return;
2399
2400         /* If device is currently being configured then don't check slaves link
2401          * status, wait until next period */
2402         if (rte_spinlock_trylock(&internals->lock)) {
2403                 if (internals->slave_count > 0)
2404                         polling_slave_found = 0;
2405
2406                 for (i = 0; i < internals->slave_count; i++) {
2407                         if (!internals->slaves[i].link_status_poll_enabled)
2408                                 continue;
2409
2410                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2411                         polling_slave_found = 1;
2412
2413                         /* Update slave link status */
2414                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2415                                         internals->slaves[i].link_status_wait_to_complete);
2416
2417                         /* if link status has changed since last checked then call lsc
2418                          * event callback */
2419                         if (slave_ethdev->data->dev_link.link_status !=
2420                                         internals->slaves[i].last_link_status) {
2421                                 internals->slaves[i].last_link_status =
2422                                                 slave_ethdev->data->dev_link.link_status;
2423
2424                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2425                                                 RTE_ETH_EVENT_INTR_LSC,
2426                                                 &bonded_ethdev->data->port_id,
2427                                                 NULL);
2428                         }
2429                 }
2430                 rte_spinlock_unlock(&internals->lock);
2431         }
2432
2433         if (polling_slave_found)
2434                 /* Set alarm to continue monitoring link status of slave ethdev's */
2435                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2436                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2437 }
2438
2439 static int
2440 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2441 {
2442         int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2443
2444         struct bond_dev_private *bond_ctx;
2445         struct rte_eth_link slave_link;
2446
2447         bool one_link_update_succeeded;
2448         uint32_t idx;
2449         int ret;
2450
2451         bond_ctx = ethdev->data->dev_private;
2452
2453         ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2454
2455         if (ethdev->data->dev_started == 0 ||
2456                         bond_ctx->active_slave_count == 0) {
2457                 ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
2458                 return 0;
2459         }
2460
2461         ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
2462
2463         if (wait_to_complete)
2464                 link_update = rte_eth_link_get;
2465         else
2466                 link_update = rte_eth_link_get_nowait;
2467
2468         switch (bond_ctx->mode) {
2469         case BONDING_MODE_BROADCAST:
2470                 /**
2471                  * Setting link speed to UINT32_MAX to ensure we pick up the
2472                  * value of the first active slave
2473                  */
2474                 ethdev->data->dev_link.link_speed = UINT32_MAX;
2475
2476                 /**
2477                  * link speed is minimum value of all the slaves link speed as
2478                  * packet loss will occur on this slave if transmission at rates
2479                  * greater than this are attempted
2480                  */
2481                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2482                         ret = link_update(bond_ctx->active_slaves[idx],
2483                                           &slave_link);
2484                         if (ret < 0) {
2485                                 ethdev->data->dev_link.link_speed =
2486                                         RTE_ETH_SPEED_NUM_NONE;
2487                                 RTE_BOND_LOG(ERR,
2488                                         "Slave (port %u) link get failed: %s",
2489                                         bond_ctx->active_slaves[idx],
2490                                         rte_strerror(-ret));
2491                                 return 0;
2492                         }
2493
2494                         if (slave_link.link_speed <
2495                                         ethdev->data->dev_link.link_speed)
2496                                 ethdev->data->dev_link.link_speed =
2497                                                 slave_link.link_speed;
2498                 }
2499                 break;
2500         case BONDING_MODE_ACTIVE_BACKUP:
2501                 /* Current primary slave */
2502                 ret = link_update(bond_ctx->current_primary_port, &slave_link);
2503                 if (ret < 0) {
2504                         RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
2505                                 bond_ctx->current_primary_port,
2506                                 rte_strerror(-ret));
2507                         return 0;
2508                 }
2509
2510                 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2511                 break;
2512         case BONDING_MODE_8023AD:
2513                 ethdev->data->dev_link.link_autoneg =
2514                                 bond_ctx->mode4.slave_link.link_autoneg;
2515                 ethdev->data->dev_link.link_duplex =
2516                                 bond_ctx->mode4.slave_link.link_duplex;
2517                 /* fall through */
2518                 /* to update link speed */
2519         case BONDING_MODE_ROUND_ROBIN:
2520         case BONDING_MODE_BALANCE:
2521         case BONDING_MODE_TLB:
2522         case BONDING_MODE_ALB:
2523         default:
2524                 /**
2525                  * In theses mode the maximum theoretical link speed is the sum
2526                  * of all the slaves
2527                  */
2528                 ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2529                 one_link_update_succeeded = false;
2530
2531                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2532                         ret = link_update(bond_ctx->active_slaves[idx],
2533                                         &slave_link);
2534                         if (ret < 0) {
2535                                 RTE_BOND_LOG(ERR,
2536                                         "Slave (port %u) link get failed: %s",
2537                                         bond_ctx->active_slaves[idx],
2538                                         rte_strerror(-ret));
2539                                 continue;
2540                         }
2541
2542                         one_link_update_succeeded = true;
2543                         ethdev->data->dev_link.link_speed +=
2544                                         slave_link.link_speed;
2545                 }
2546
2547                 if (!one_link_update_succeeded) {
2548                         RTE_BOND_LOG(ERR, "All slaves link get failed");
2549                         return 0;
2550                 }
2551         }
2552
2553
2554         return 0;
2555 }
2556
2557
2558 static int
2559 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2560 {
2561         struct bond_dev_private *internals = dev->data->dev_private;
2562         struct rte_eth_stats slave_stats;
2563         int i, j;
2564
2565         for (i = 0; i < internals->slave_count; i++) {
2566                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2567
2568                 stats->ipackets += slave_stats.ipackets;
2569                 stats->opackets += slave_stats.opackets;
2570                 stats->ibytes += slave_stats.ibytes;
2571                 stats->obytes += slave_stats.obytes;
2572                 stats->imissed += slave_stats.imissed;
2573                 stats->ierrors += slave_stats.ierrors;
2574                 stats->oerrors += slave_stats.oerrors;
2575                 stats->rx_nombuf += slave_stats.rx_nombuf;
2576
2577                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2578                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2579                         stats->q_opackets[j] += slave_stats.q_opackets[j];
2580                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2581                         stats->q_obytes[j] += slave_stats.q_obytes[j];
2582                         stats->q_errors[j] += slave_stats.q_errors[j];
2583                 }
2584
2585         }
2586
2587         return 0;
2588 }
2589
2590 static int
2591 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2592 {
2593         struct bond_dev_private *internals = dev->data->dev_private;
2594         int i;
2595         int err;
2596         int ret;
2597
2598         for (i = 0, err = 0; i < internals->slave_count; i++) {
2599                 ret = rte_eth_stats_reset(internals->slaves[i].port_id);
2600                 if (ret != 0)
2601                         err = ret;
2602         }
2603
2604         return err;
2605 }
2606
2607 static int
2608 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2609 {
2610         struct bond_dev_private *internals = eth_dev->data->dev_private;
2611         int i;
2612         int ret = 0;
2613         uint16_t port_id;
2614
2615         switch (internals->mode) {
2616         /* Promiscuous mode is propagated to all slaves */
2617         case BONDING_MODE_ROUND_ROBIN:
2618         case BONDING_MODE_BALANCE:
2619         case BONDING_MODE_BROADCAST:
2620         case BONDING_MODE_8023AD: {
2621                 unsigned int slave_ok = 0;
2622
2623                 for (i = 0; i < internals->slave_count; i++) {
2624                         port_id = internals->slaves[i].port_id;
2625
2626                         ret = rte_eth_promiscuous_enable(port_id);
2627                         if (ret != 0)
2628                                 RTE_BOND_LOG(ERR,
2629                                         "Failed to enable promiscuous mode for port %u: %s",
2630                                         port_id, rte_strerror(-ret));
2631                         else
2632                                 slave_ok++;
2633                 }
2634                 /*
2635                  * Report success if operation is successful on at least
2636                  * on one slave. Otherwise return last error code.
2637                  */
2638                 if (slave_ok > 0)
2639                         ret = 0;
2640                 break;
2641         }
2642         /* Promiscuous mode is propagated only to primary slave */
2643         case BONDING_MODE_ACTIVE_BACKUP:
2644         case BONDING_MODE_TLB:
2645         case BONDING_MODE_ALB:
2646         default:
2647                 /* Do not touch promisc when there cannot be primary ports */
2648                 if (internals->slave_count == 0)
2649                         break;
2650                 port_id = internals->current_primary_port;
2651                 ret = rte_eth_promiscuous_enable(port_id);
2652                 if (ret != 0)
2653                         RTE_BOND_LOG(ERR,
2654                                 "Failed to enable promiscuous mode for port %u: %s",
2655                                 port_id, rte_strerror(-ret));
2656         }
2657
2658         return ret;
2659 }
2660
2661 static int
2662 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2663 {
2664         struct bond_dev_private *internals = dev->data->dev_private;
2665         int i;
2666         int ret = 0;
2667         uint16_t port_id;
2668
2669         switch (internals->mode) {
2670         /* Promiscuous mode is propagated to all slaves */
2671         case BONDING_MODE_ROUND_ROBIN:
2672         case BONDING_MODE_BALANCE:
2673         case BONDING_MODE_BROADCAST:
2674         case BONDING_MODE_8023AD: {
2675                 unsigned int slave_ok = 0;
2676
2677                 for (i = 0; i < internals->slave_count; i++) {
2678                         port_id = internals->slaves[i].port_id;
2679
2680                         if (internals->mode == BONDING_MODE_8023AD &&
2681                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2682                                         BOND_8023AD_FORCED_PROMISC) {
2683                                 slave_ok++;
2684                                 continue;
2685                         }
2686                         ret = rte_eth_promiscuous_disable(port_id);
2687                         if (ret != 0)
2688                                 RTE_BOND_LOG(ERR,
2689                                         "Failed to disable promiscuous mode for port %u: %s",
2690                                         port_id, rte_strerror(-ret));
2691                         else
2692                                 slave_ok++;
2693                 }
2694                 /*
2695                  * Report success if operation is successful on at least
2696                  * on one slave. Otherwise return last error code.
2697                  */
2698                 if (slave_ok > 0)
2699                         ret = 0;
2700                 break;
2701         }
2702         /* Promiscuous mode is propagated only to primary slave */
2703         case BONDING_MODE_ACTIVE_BACKUP:
2704         case BONDING_MODE_TLB:
2705         case BONDING_MODE_ALB:
2706         default:
2707                 /* Do not touch promisc when there cannot be primary ports */
2708                 if (internals->slave_count == 0)
2709                         break;
2710                 port_id = internals->current_primary_port;
2711                 ret = rte_eth_promiscuous_disable(port_id);
2712                 if (ret != 0)
2713                         RTE_BOND_LOG(ERR,
2714                                 "Failed to disable promiscuous mode for port %u: %s",
2715                                 port_id, rte_strerror(-ret));
2716         }
2717
2718         return ret;
2719 }
2720
2721 static int
2722 bond_ethdev_promiscuous_update(struct rte_eth_dev *dev)
2723 {
2724         struct bond_dev_private *internals = dev->data->dev_private;
2725         uint16_t port_id = internals->current_primary_port;
2726
2727         switch (internals->mode) {
2728         case BONDING_MODE_ROUND_ROBIN:
2729         case BONDING_MODE_BALANCE:
2730         case BONDING_MODE_BROADCAST:
2731         case BONDING_MODE_8023AD:
2732                 /* As promiscuous mode is propagated to all slaves for these
2733                  * mode, no need to update for bonding device.
2734                  */
2735                 break;
2736         case BONDING_MODE_ACTIVE_BACKUP:
2737         case BONDING_MODE_TLB:
2738         case BONDING_MODE_ALB:
2739         default:
2740                 /* As promiscuous mode is propagated only to primary slave
2741                  * for these mode. When active/standby switchover, promiscuous
2742                  * mode should be set to new primary slave according to bonding
2743                  * device.
2744                  */
2745                 if (rte_eth_promiscuous_get(internals->port_id) == 1)
2746                         rte_eth_promiscuous_enable(port_id);
2747                 else
2748                         rte_eth_promiscuous_disable(port_id);
2749         }
2750
2751         return 0;
2752 }
2753
2754 static int
2755 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2756 {
2757         struct bond_dev_private *internals = eth_dev->data->dev_private;
2758         int i;
2759         int ret = 0;
2760         uint16_t port_id;
2761
2762         switch (internals->mode) {
2763         /* allmulti mode is propagated to all slaves */
2764         case BONDING_MODE_ROUND_ROBIN:
2765         case BONDING_MODE_BALANCE:
2766         case BONDING_MODE_BROADCAST:
2767         case BONDING_MODE_8023AD: {
2768                 unsigned int slave_ok = 0;
2769
2770                 for (i = 0; i < internals->slave_count; i++) {
2771                         port_id = internals->slaves[i].port_id;
2772
2773                         ret = rte_eth_allmulticast_enable(port_id);
2774                         if (ret != 0)
2775                                 RTE_BOND_LOG(ERR,
2776                                         "Failed to enable allmulti mode for port %u: %s",
2777                                         port_id, rte_strerror(-ret));
2778                         else
2779                                 slave_ok++;
2780                 }
2781                 /*
2782                  * Report success if operation is successful on at least
2783                  * on one slave. Otherwise return last error code.
2784                  */
2785                 if (slave_ok > 0)
2786                         ret = 0;
2787                 break;
2788         }
2789         /* allmulti mode is propagated only to primary slave */
2790         case BONDING_MODE_ACTIVE_BACKUP:
2791         case BONDING_MODE_TLB:
2792         case BONDING_MODE_ALB:
2793         default:
2794                 /* Do not touch allmulti when there cannot be primary ports */
2795                 if (internals->slave_count == 0)
2796                         break;
2797                 port_id = internals->current_primary_port;
2798                 ret = rte_eth_allmulticast_enable(port_id);
2799                 if (ret != 0)
2800                         RTE_BOND_LOG(ERR,
2801                                 "Failed to enable allmulti mode for port %u: %s",
2802                                 port_id, rte_strerror(-ret));
2803         }
2804
2805         return ret;
2806 }
2807
2808 static int
2809 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2810 {
2811         struct bond_dev_private *internals = eth_dev->data->dev_private;
2812         int i;
2813         int ret = 0;
2814         uint16_t port_id;
2815
2816         switch (internals->mode) {
2817         /* allmulti mode is propagated to all slaves */
2818         case BONDING_MODE_ROUND_ROBIN:
2819         case BONDING_MODE_BALANCE:
2820         case BONDING_MODE_BROADCAST:
2821         case BONDING_MODE_8023AD: {
2822                 unsigned int slave_ok = 0;
2823
2824                 for (i = 0; i < internals->slave_count; i++) {
2825                         uint16_t port_id = internals->slaves[i].port_id;
2826
2827                         if (internals->mode == BONDING_MODE_8023AD &&
2828                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2829                                         BOND_8023AD_FORCED_ALLMULTI)
2830                                 continue;
2831
2832                         ret = rte_eth_allmulticast_disable(port_id);
2833                         if (ret != 0)
2834                                 RTE_BOND_LOG(ERR,
2835                                         "Failed to disable allmulti mode for port %u: %s",
2836                                         port_id, rte_strerror(-ret));
2837                         else
2838                                 slave_ok++;
2839                 }
2840                 /*
2841                  * Report success if operation is successful on at least
2842                  * on one slave. Otherwise return last error code.
2843                  */
2844                 if (slave_ok > 0)
2845                         ret = 0;
2846                 break;
2847         }
2848         /* allmulti mode is propagated only to primary slave */
2849         case BONDING_MODE_ACTIVE_BACKUP:
2850         case BONDING_MODE_TLB:
2851         case BONDING_MODE_ALB:
2852         default:
2853                 /* Do not touch allmulti when there cannot be primary ports */
2854                 if (internals->slave_count == 0)
2855                         break;
2856                 port_id = internals->current_primary_port;
2857                 ret = rte_eth_allmulticast_disable(port_id);
2858                 if (ret != 0)
2859                         RTE_BOND_LOG(ERR,
2860                                 "Failed to disable allmulti mode for port %u: %s",
2861                                 port_id, rte_strerror(-ret));
2862         }
2863
2864         return ret;
2865 }
2866
2867 static int
2868 bond_ethdev_allmulticast_update(struct rte_eth_dev *dev)
2869 {
2870         struct bond_dev_private *internals = dev->data->dev_private;
2871         uint16_t port_id = internals->current_primary_port;
2872
2873         switch (internals->mode) {
2874         case BONDING_MODE_ROUND_ROBIN:
2875         case BONDING_MODE_BALANCE:
2876         case BONDING_MODE_BROADCAST:
2877         case BONDING_MODE_8023AD:
2878                 /* As allmulticast mode is propagated to all slaves for these
2879                  * mode, no need to update for bonding device.
2880                  */
2881                 break;
2882         case BONDING_MODE_ACTIVE_BACKUP:
2883         case BONDING_MODE_TLB:
2884         case BONDING_MODE_ALB:
2885         default:
2886                 /* As allmulticast mode is propagated only to primary slave
2887                  * for these mode. When active/standby switchover, allmulticast
2888                  * mode should be set to new primary slave according to bonding
2889                  * device.
2890                  */
2891                 if (rte_eth_allmulticast_get(internals->port_id) == 1)
2892                         rte_eth_allmulticast_enable(port_id);
2893                 else
2894                         rte_eth_allmulticast_disable(port_id);
2895         }
2896
2897         return 0;
2898 }
2899
2900 static void
2901 bond_ethdev_delayed_lsc_propagation(void *arg)
2902 {
2903         if (arg == NULL)
2904                 return;
2905
2906         rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2907                         RTE_ETH_EVENT_INTR_LSC, NULL);
2908 }
2909
2910 int
2911 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2912                 void *param, void *ret_param __rte_unused)
2913 {
2914         struct rte_eth_dev *bonded_eth_dev;
2915         struct bond_dev_private *internals;
2916         struct rte_eth_link link;
2917         int rc = -1;
2918         int ret;
2919
2920         uint8_t lsc_flag = 0;
2921         int valid_slave = 0;
2922         uint16_t active_pos;
2923         uint16_t i;
2924
2925         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2926                 return rc;
2927
2928         bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2929
2930         if (check_for_bonded_ethdev(bonded_eth_dev))
2931                 return rc;
2932
2933         internals = bonded_eth_dev->data->dev_private;
2934
2935         /* If the device isn't started don't handle interrupts */
2936         if (!bonded_eth_dev->data->dev_started)
2937                 return rc;
2938
2939         /* verify that port_id is a valid slave of bonded port */
2940         for (i = 0; i < internals->slave_count; i++) {
2941                 if (internals->slaves[i].port_id == port_id) {
2942                         valid_slave = 1;
2943                         break;
2944                 }
2945         }
2946
2947         if (!valid_slave)
2948                 return rc;
2949
2950         /* Synchronize lsc callback parallel calls either by real link event
2951          * from the slaves PMDs or by the bonding PMD itself.
2952          */
2953         rte_spinlock_lock(&internals->lsc_lock);
2954
2955         /* Search for port in active port list */
2956         active_pos = find_slave_by_id(internals->active_slaves,
2957                         internals->active_slave_count, port_id);
2958
2959         ret = rte_eth_link_get_nowait(port_id, &link);
2960         if (ret < 0)
2961                 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
2962
2963         if (ret == 0 && link.link_status) {
2964                 if (active_pos < internals->active_slave_count)
2965                         goto link_update;
2966
2967                 /* check link state properties if bonded link is up*/
2968                 if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
2969                         if (link_properties_valid(bonded_eth_dev, &link) != 0)
2970                                 RTE_BOND_LOG(ERR, "Invalid link properties "
2971                                              "for slave %d in bonding mode %d",
2972                                              port_id, internals->mode);
2973                 } else {
2974                         /* inherit slave link properties */
2975                         link_properties_set(bonded_eth_dev, &link);
2976                 }
2977
2978                 /* If no active slave ports then set this port to be
2979                  * the primary port.
2980                  */
2981                 if (internals->active_slave_count < 1) {
2982                         /* If first active slave, then change link status */
2983                         bonded_eth_dev->data->dev_link.link_status =
2984                                                                 RTE_ETH_LINK_UP;
2985                         internals->current_primary_port = port_id;
2986                         lsc_flag = 1;
2987
2988                         mac_address_slaves_update(bonded_eth_dev);
2989                         bond_ethdev_promiscuous_update(bonded_eth_dev);
2990                         bond_ethdev_allmulticast_update(bonded_eth_dev);
2991                 }
2992
2993                 activate_slave(bonded_eth_dev, port_id);
2994
2995                 /* If the user has defined the primary port then default to
2996                  * using it.
2997                  */
2998                 if (internals->user_defined_primary_port &&
2999                                 internals->primary_port == port_id)
3000                         bond_ethdev_primary_set(internals, port_id);
3001         } else {
3002                 if (active_pos == internals->active_slave_count)
3003                         goto link_update;
3004
3005                 /* Remove from active slave list */
3006                 deactivate_slave(bonded_eth_dev, port_id);
3007
3008                 if (internals->active_slave_count < 1)
3009                         lsc_flag = 1;
3010
3011                 /* Update primary id, take first active slave from list or if none
3012                  * available set to -1 */
3013                 if (port_id == internals->current_primary_port) {
3014                         if (internals->active_slave_count > 0)
3015                                 bond_ethdev_primary_set(internals,
3016                                                 internals->active_slaves[0]);
3017                         else
3018                                 internals->current_primary_port = internals->primary_port;
3019                         mac_address_slaves_update(bonded_eth_dev);
3020                         bond_ethdev_promiscuous_update(bonded_eth_dev);
3021                         bond_ethdev_allmulticast_update(bonded_eth_dev);
3022                 }
3023         }
3024
3025 link_update:
3026         /**
3027          * Update bonded device link properties after any change to active
3028          * slaves
3029          */
3030         bond_ethdev_link_update(bonded_eth_dev, 0);
3031
3032         if (lsc_flag) {
3033                 /* Cancel any possible outstanding interrupts if delays are enabled */
3034                 if (internals->link_up_delay_ms > 0 ||
3035                         internals->link_down_delay_ms > 0)
3036                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
3037                                         bonded_eth_dev);
3038
3039                 if (bonded_eth_dev->data->dev_link.link_status) {
3040                         if (internals->link_up_delay_ms > 0)
3041                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
3042                                                 bond_ethdev_delayed_lsc_propagation,
3043                                                 (void *)bonded_eth_dev);
3044                         else
3045                                 rte_eth_dev_callback_process(bonded_eth_dev,
3046                                                 RTE_ETH_EVENT_INTR_LSC,
3047                                                 NULL);
3048
3049                 } else {
3050                         if (internals->link_down_delay_ms > 0)
3051                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
3052                                                 bond_ethdev_delayed_lsc_propagation,
3053                                                 (void *)bonded_eth_dev);
3054                         else
3055                                 rte_eth_dev_callback_process(bonded_eth_dev,
3056                                                 RTE_ETH_EVENT_INTR_LSC,
3057                                                 NULL);
3058                 }
3059         }
3060
3061         rte_spinlock_unlock(&internals->lsc_lock);
3062
3063         return rc;
3064 }
3065
3066 static int
3067 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
3068                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3069 {
3070         unsigned i, j;
3071         int result = 0;
3072         int slave_reta_size;
3073         unsigned reta_count;
3074         struct bond_dev_private *internals = dev->data->dev_private;
3075
3076         if (reta_size != internals->reta_size)
3077                 return -EINVAL;
3078
3079          /* Copy RETA table */
3080         reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
3081                         RTE_ETH_RETA_GROUP_SIZE;
3082
3083         for (i = 0; i < reta_count; i++) {
3084                 internals->reta_conf[i].mask = reta_conf[i].mask;
3085                 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3086                         if ((reta_conf[i].mask >> j) & 0x01)
3087                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
3088         }
3089
3090         /* Fill rest of array */
3091         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
3092                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
3093                                 sizeof(internals->reta_conf[0]) * reta_count);
3094
3095         /* Propagate RETA over slaves */
3096         for (i = 0; i < internals->slave_count; i++) {
3097                 slave_reta_size = internals->slaves[i].reta_size;
3098                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
3099                                 &internals->reta_conf[0], slave_reta_size);
3100                 if (result < 0)
3101                         return result;
3102         }
3103
3104         return 0;
3105 }
3106
3107 static int
3108 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
3109                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3110 {
3111         int i, j;
3112         struct bond_dev_private *internals = dev->data->dev_private;
3113
3114         if (reta_size != internals->reta_size)
3115                 return -EINVAL;
3116
3117          /* Copy RETA table */
3118         for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
3119                 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3120                         if ((reta_conf[i].mask >> j) & 0x01)
3121                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
3122
3123         return 0;
3124 }
3125
3126 static int
3127 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
3128                 struct rte_eth_rss_conf *rss_conf)
3129 {
3130         int i, result = 0;
3131         struct bond_dev_private *internals = dev->data->dev_private;
3132         struct rte_eth_rss_conf bond_rss_conf;
3133
3134         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
3135
3136         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
3137
3138         if (bond_rss_conf.rss_hf != 0)
3139                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
3140
3141         if (bond_rss_conf.rss_key) {
3142                 if (bond_rss_conf.rss_key_len < internals->rss_key_len)
3143                         return -EINVAL;
3144                 else if (bond_rss_conf.rss_key_len > internals->rss_key_len)
3145                         RTE_BOND_LOG(WARNING, "rss_key will be truncated");
3146
3147                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
3148                                 internals->rss_key_len);
3149                 bond_rss_conf.rss_key_len = internals->rss_key_len;
3150         }
3151
3152         for (i = 0; i < internals->slave_count; i++) {
3153                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
3154                                 &bond_rss_conf);
3155                 if (result < 0)
3156                         return result;
3157         }
3158
3159         return 0;
3160 }
3161
3162 static int
3163 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3164                 struct rte_eth_rss_conf *rss_conf)
3165 {
3166         struct bond_dev_private *internals = dev->data->dev_private;
3167
3168         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3169         rss_conf->rss_key_len = internals->rss_key_len;
3170         if (rss_conf->rss_key)
3171                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3172
3173         return 0;
3174 }
3175
3176 static int
3177 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3178 {
3179         struct rte_eth_dev *slave_eth_dev;
3180         struct bond_dev_private *internals = dev->data->dev_private;
3181         int ret, i;
3182
3183         rte_spinlock_lock(&internals->lock);
3184
3185         for (i = 0; i < internals->slave_count; i++) {
3186                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3187                 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
3188                         rte_spinlock_unlock(&internals->lock);
3189                         return -ENOTSUP;
3190                 }
3191         }
3192         for (i = 0; i < internals->slave_count; i++) {
3193                 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
3194                 if (ret < 0) {
3195                         rte_spinlock_unlock(&internals->lock);
3196                         return ret;
3197                 }
3198         }
3199
3200         rte_spinlock_unlock(&internals->lock);
3201         return 0;
3202 }
3203
3204 static int
3205 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
3206                         struct rte_ether_addr *addr)
3207 {
3208         if (mac_address_set(dev, addr)) {
3209                 RTE_BOND_LOG(ERR, "Failed to update MAC address");
3210                 return -EINVAL;
3211         }
3212
3213         return 0;
3214 }
3215
3216 static int
3217 bond_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
3218                   const struct rte_flow_ops **ops)
3219 {
3220         *ops = &bond_flow_ops;
3221         return 0;
3222 }
3223
3224 static int
3225 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
3226                         struct rte_ether_addr *mac_addr,
3227                         __rte_unused uint32_t index, uint32_t vmdq)
3228 {
3229         struct rte_eth_dev *slave_eth_dev;
3230         struct bond_dev_private *internals = dev->data->dev_private;
3231         int ret, i;
3232
3233         rte_spinlock_lock(&internals->lock);
3234
3235         for (i = 0; i < internals->slave_count; i++) {
3236                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3237                 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
3238                          *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
3239                         ret = -ENOTSUP;
3240                         goto end;
3241                 }
3242         }
3243
3244         for (i = 0; i < internals->slave_count; i++) {
3245                 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
3246                                 mac_addr, vmdq);
3247                 if (ret < 0) {
3248                         /* rollback */
3249                         for (i--; i >= 0; i--)
3250                                 rte_eth_dev_mac_addr_remove(
3251                                         internals->slaves[i].port_id, mac_addr);
3252                         goto end;
3253                 }
3254         }
3255
3256         ret = 0;
3257 end:
3258         rte_spinlock_unlock(&internals->lock);
3259         return ret;
3260 }
3261
3262 static void
3263 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3264 {
3265         struct rte_eth_dev *slave_eth_dev;
3266         struct bond_dev_private *internals = dev->data->dev_private;
3267         int i;
3268
3269         rte_spinlock_lock(&internals->lock);
3270
3271         for (i = 0; i < internals->slave_count; i++) {
3272                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3273                 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3274                         goto end;
3275         }
3276
3277         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3278
3279         for (i = 0; i < internals->slave_count; i++)
3280                 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3281                                 mac_addr);
3282
3283 end:
3284         rte_spinlock_unlock(&internals->lock);
3285 }
3286
3287 const struct eth_dev_ops default_dev_ops = {
3288         .dev_start            = bond_ethdev_start,
3289         .dev_stop             = bond_ethdev_stop,
3290         .dev_close            = bond_ethdev_close,
3291         .dev_configure        = bond_ethdev_configure,
3292         .dev_infos_get        = bond_ethdev_info,
3293         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
3294         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
3295         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
3296         .rx_queue_release     = bond_ethdev_rx_queue_release,
3297         .tx_queue_release     = bond_ethdev_tx_queue_release,
3298         .link_update          = bond_ethdev_link_update,
3299         .stats_get            = bond_ethdev_stats_get,
3300         .stats_reset          = bond_ethdev_stats_reset,
3301         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
3302         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
3303         .allmulticast_enable  = bond_ethdev_allmulticast_enable,
3304         .allmulticast_disable = bond_ethdev_allmulticast_disable,
3305         .reta_update          = bond_ethdev_rss_reta_update,
3306         .reta_query           = bond_ethdev_rss_reta_query,
3307         .rss_hash_update      = bond_ethdev_rss_hash_update,
3308         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get,
3309         .mtu_set              = bond_ethdev_mtu_set,
3310         .mac_addr_set         = bond_ethdev_mac_address_set,
3311         .mac_addr_add         = bond_ethdev_mac_addr_add,
3312         .mac_addr_remove      = bond_ethdev_mac_addr_remove,
3313         .flow_ops_get         = bond_flow_ops_get
3314 };
3315
3316 static int
3317 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3318 {
3319         const char *name = rte_vdev_device_name(dev);
3320         uint8_t socket_id = dev->device.numa_node;
3321         struct bond_dev_private *internals = NULL;
3322         struct rte_eth_dev *eth_dev = NULL;
3323         uint32_t vlan_filter_bmp_size;
3324
3325         /* now do all data allocation - for eth_dev structure, dummy pci driver
3326          * and internal (private) data
3327          */
3328
3329         /* reserve an ethdev entry */
3330         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3331         if (eth_dev == NULL) {
3332                 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3333                 goto err;
3334         }
3335
3336         internals = eth_dev->data->dev_private;
3337         eth_dev->data->nb_rx_queues = (uint16_t)1;
3338         eth_dev->data->nb_tx_queues = (uint16_t)1;
3339
3340         /* Allocate memory for storing MAC addresses */
3341         eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3342                         BOND_MAX_MAC_ADDRS, 0, socket_id);
3343         if (eth_dev->data->mac_addrs == NULL) {
3344                 RTE_BOND_LOG(ERR,
3345                              "Failed to allocate %u bytes needed to store MAC addresses",
3346                              RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3347                 goto err;
3348         }
3349
3350         eth_dev->dev_ops = &default_dev_ops;
3351         eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
3352                                         RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3353
3354         rte_spinlock_init(&internals->lock);
3355         rte_spinlock_init(&internals->lsc_lock);
3356
3357         internals->port_id = eth_dev->data->port_id;
3358         internals->mode = BONDING_MODE_INVALID;
3359         internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3360         internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3361         internals->burst_xmit_hash = burst_xmit_l2_hash;
3362         internals->user_defined_mac = 0;
3363
3364         internals->link_status_polling_enabled = 0;
3365
3366         internals->link_status_polling_interval_ms =
3367                 DEFAULT_POLLING_INTERVAL_10_MS;
3368         internals->link_down_delay_ms = 0;
3369         internals->link_up_delay_ms = 0;
3370
3371         internals->slave_count = 0;
3372         internals->active_slave_count = 0;
3373         internals->rx_offload_capa = 0;
3374         internals->tx_offload_capa = 0;
3375         internals->rx_queue_offload_capa = 0;
3376         internals->tx_queue_offload_capa = 0;
3377         internals->candidate_max_rx_pktlen = 0;
3378         internals->max_rx_pktlen = 0;
3379
3380         /* Initially allow to choose any offload type */
3381         internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
3382
3383         memset(&internals->default_rxconf, 0,
3384                sizeof(internals->default_rxconf));
3385         memset(&internals->default_txconf, 0,
3386                sizeof(internals->default_txconf));
3387
3388         memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3389         memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3390
3391         memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3392         memset(internals->slaves, 0, sizeof(internals->slaves));
3393
3394         TAILQ_INIT(&internals->flow_list);
3395         internals->flow_isolated_valid = 0;
3396
3397         /* Set mode 4 default configuration */
3398         bond_mode_8023ad_setup(eth_dev, NULL);
3399         if (bond_ethdev_mode_set(eth_dev, mode)) {
3400                 RTE_BOND_LOG(ERR, "Failed to set bonded device %u mode to %u",
3401                                  eth_dev->data->port_id, mode);
3402                 goto err;
3403         }
3404
3405         vlan_filter_bmp_size =
3406                 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3407         internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3408                                                    RTE_CACHE_LINE_SIZE);
3409         if (internals->vlan_filter_bmpmem == NULL) {
3410                 RTE_BOND_LOG(ERR,
3411                              "Failed to allocate vlan bitmap for bonded device %u",
3412                              eth_dev->data->port_id);
3413                 goto err;
3414         }
3415
3416         internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3417                         internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3418         if (internals->vlan_filter_bmp == NULL) {
3419                 RTE_BOND_LOG(ERR,
3420                              "Failed to init vlan bitmap for bonded device %u",
3421                              eth_dev->data->port_id);
3422                 rte_free(internals->vlan_filter_bmpmem);
3423                 goto err;
3424         }
3425
3426         return eth_dev->data->port_id;
3427
3428 err:
3429         rte_free(internals);
3430         if (eth_dev != NULL)
3431                 eth_dev->data->dev_private = NULL;
3432         rte_eth_dev_release_port(eth_dev);
3433         return -1;
3434 }
3435
3436 static int
3437 bond_probe(struct rte_vdev_device *dev)
3438 {
3439         const char *name;
3440         struct bond_dev_private *internals;
3441         struct rte_kvargs *kvlist;
3442         uint8_t bonding_mode;
3443         int arg_count, port_id;
3444         int socket_id;
3445         uint8_t agg_mode;
3446         struct rte_eth_dev *eth_dev;
3447
3448         if (!dev)
3449                 return -EINVAL;
3450
3451         name = rte_vdev_device_name(dev);
3452         RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3453
3454         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3455                 eth_dev = rte_eth_dev_attach_secondary(name);
3456                 if (!eth_dev) {
3457                         RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3458                         return -1;
3459                 }
3460                 /* TODO: request info from primary to set up Rx and Tx */
3461                 eth_dev->dev_ops = &default_dev_ops;
3462                 eth_dev->device = &dev->device;
3463                 rte_eth_dev_probing_finish(eth_dev);
3464                 return 0;
3465         }
3466
3467         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3468                 pmd_bond_init_valid_arguments);
3469         if (kvlist == NULL) {
3470                 RTE_BOND_LOG(ERR, "Invalid args in %s", rte_vdev_device_args(dev));
3471                 return -1;
3472         }
3473
3474         /* Parse link bonding mode */
3475         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3476                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3477                                 &bond_ethdev_parse_slave_mode_kvarg,
3478                                 &bonding_mode) != 0) {
3479                         RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3480                                         name);
3481                         goto parse_error;
3482                 }
3483         } else {
3484                 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3485                                 "device %s", name);
3486                 goto parse_error;
3487         }
3488
3489         /* Parse socket id to create bonding device on */
3490         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3491         if (arg_count == 1) {
3492                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3493                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3494                                 != 0) {
3495                         RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3496                                         "bonded device %s", name);
3497                         goto parse_error;
3498                 }
3499         } else if (arg_count > 1) {
3500                 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3501                                 "bonded device %s", name);
3502                 goto parse_error;
3503         } else {
3504                 socket_id = rte_socket_id();
3505         }
3506
3507         dev->device.numa_node = socket_id;
3508
3509         /* Create link bonding eth device */
3510         port_id = bond_alloc(dev, bonding_mode);
3511         if (port_id < 0) {
3512                 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3513                                 "socket %u.",   name, bonding_mode, socket_id);
3514                 goto parse_error;
3515         }
3516         internals = rte_eth_devices[port_id].data->dev_private;
3517         internals->kvlist = kvlist;
3518
3519         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3520                 if (rte_kvargs_process(kvlist,
3521                                 PMD_BOND_AGG_MODE_KVARG,
3522                                 &bond_ethdev_parse_slave_agg_mode_kvarg,
3523                                 &agg_mode) != 0) {
3524                         RTE_BOND_LOG(ERR,
3525                                         "Failed to parse agg selection mode for bonded device %s",
3526                                         name);
3527                         goto parse_error;
3528                 }
3529
3530                 if (internals->mode == BONDING_MODE_8023AD)
3531                         internals->mode4.agg_selection = agg_mode;
3532         } else {
3533                 internals->mode4.agg_selection = AGG_STABLE;
3534         }
3535
3536         rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3537         RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3538                         "socket %u.",   name, port_id, bonding_mode, socket_id);
3539         return 0;
3540
3541 parse_error:
3542         rte_kvargs_free(kvlist);
3543
3544         return -1;
3545 }
3546
3547 static int
3548 bond_remove(struct rte_vdev_device *dev)
3549 {
3550         struct rte_eth_dev *eth_dev;
3551         struct bond_dev_private *internals;
3552         const char *name;
3553         int ret = 0;
3554
3555         if (!dev)
3556                 return -EINVAL;
3557
3558         name = rte_vdev_device_name(dev);
3559         RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3560
3561         /* find an ethdev entry */
3562         eth_dev = rte_eth_dev_allocated(name);
3563         if (eth_dev == NULL)
3564                 return 0; /* port already released */
3565
3566         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3567                 return rte_eth_dev_release_port(eth_dev);
3568
3569         RTE_ASSERT(eth_dev->device == &dev->device);
3570
3571         internals = eth_dev->data->dev_private;
3572         if (internals->slave_count != 0)
3573                 return -EBUSY;
3574
3575         if (eth_dev->data->dev_started == 1) {
3576                 ret = bond_ethdev_stop(eth_dev);
3577                 bond_ethdev_close(eth_dev);
3578         }
3579         rte_eth_dev_release_port(eth_dev);
3580
3581         return ret;
3582 }
3583
3584 /* this part will resolve the slave portids after all the other pdev and vdev
3585  * have been allocated */
3586 static int
3587 bond_ethdev_configure(struct rte_eth_dev *dev)
3588 {
3589         const char *name = dev->device->name;
3590         struct bond_dev_private *internals = dev->data->dev_private;
3591         struct rte_kvargs *kvlist = internals->kvlist;
3592         uint64_t offloads;
3593         int arg_count;
3594         uint16_t port_id = dev - rte_eth_devices;
3595         uint8_t agg_mode;
3596
3597         static const uint8_t default_rss_key[40] = {
3598                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3599                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3600                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3601                 0xBE, 0xAC, 0x01, 0xFA
3602         };
3603
3604         unsigned i, j;
3605
3606         /*
3607          * If RSS is enabled, fill table with default values and
3608          * set key to the value specified in port RSS configuration.
3609          * Fall back to default RSS key if the key is not specified
3610          */
3611         if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
3612                 struct rte_eth_rss_conf *rss_conf =
3613                         &dev->data->dev_conf.rx_adv_conf.rss_conf;
3614
3615                 if (internals->rss_key_len == 0) {
3616                         internals->rss_key_len = sizeof(default_rss_key);
3617                 }
3618
3619                 if (rss_conf->rss_key != NULL) {
3620                         if (internals->rss_key_len > rss_conf->rss_key_len) {
3621                                 RTE_BOND_LOG(ERR, "Invalid rss key length(%u)",
3622                                                 rss_conf->rss_key_len);
3623                                 return -EINVAL;
3624                         }
3625
3626                         memcpy(internals->rss_key, rss_conf->rss_key,
3627                                internals->rss_key_len);
3628                 } else {
3629                         if (internals->rss_key_len > sizeof(default_rss_key)) {
3630                                 /*
3631                                  * If the rss_key includes standard_rss_key and
3632                                  * extended_hash_key, the rss key length will be
3633                                  * larger than default rss key length, so it should
3634                                  * re-calculate the hash key.
3635                                  */
3636                                 for (i = 0; i < internals->rss_key_len; i++)
3637                                         internals->rss_key[i] = (uint8_t)rte_rand();
3638                         } else {
3639                                 memcpy(internals->rss_key, default_rss_key,
3640                                         internals->rss_key_len);
3641                         }
3642                 }
3643
3644                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3645                         internals->reta_conf[i].mask = ~0LL;
3646                         for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3647                                 internals->reta_conf[i].reta[j] =
3648                                                 (i * RTE_ETH_RETA_GROUP_SIZE + j) %
3649                                                 dev->data->nb_rx_queues;
3650                 }
3651         }
3652
3653         offloads = dev->data->dev_conf.txmode.offloads;
3654         if ((offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
3655                         (internals->mode == BONDING_MODE_8023AD ||
3656                         internals->mode == BONDING_MODE_BROADCAST)) {
3657                 RTE_BOND_LOG(WARNING,
3658                         "bond mode broadcast & 8023AD don't support MBUF_FAST_FREE offload, force disable it.");
3659                 offloads &= ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3660                 dev->data->dev_conf.txmode.offloads = offloads;
3661         }
3662
3663         /* set the max_rx_pktlen */
3664         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3665
3666         /*
3667          * if no kvlist, it means that this bonded device has been created
3668          * through the bonding api.
3669          */
3670         if (!kvlist)
3671                 return 0;
3672
3673         /* Parse MAC address for bonded device */
3674         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3675         if (arg_count == 1) {
3676                 struct rte_ether_addr bond_mac;
3677
3678                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3679                                        &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3680                         RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3681                                      name);
3682                         return -1;
3683                 }
3684
3685                 /* Set MAC address */
3686                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3687                         RTE_BOND_LOG(ERR,
3688                                      "Failed to set mac address on bonded device %s",
3689                                      name);
3690                         return -1;
3691                 }
3692         } else if (arg_count > 1) {
3693                 RTE_BOND_LOG(ERR,
3694                              "MAC address can be specified only once for bonded device %s",
3695                              name);
3696                 return -1;
3697         }
3698
3699         /* Parse/set balance mode transmit policy */
3700         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3701         if (arg_count == 1) {
3702                 uint8_t xmit_policy;
3703
3704                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3705                                        &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3706                     0) {
3707                         RTE_BOND_LOG(INFO,
3708                                      "Invalid xmit policy specified for bonded device %s",
3709                                      name);
3710                         return -1;
3711                 }
3712
3713                 /* Set balance mode transmit policy*/
3714                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3715                         RTE_BOND_LOG(ERR,
3716                                      "Failed to set balance xmit policy on bonded device %s",
3717                                      name);
3718                         return -1;
3719                 }
3720         } else if (arg_count > 1) {
3721                 RTE_BOND_LOG(ERR,
3722                              "Transmit policy can be specified only once for bonded device %s",
3723                              name);
3724                 return -1;
3725         }
3726
3727         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3728                 if (rte_kvargs_process(kvlist,
3729                                        PMD_BOND_AGG_MODE_KVARG,
3730                                        &bond_ethdev_parse_slave_agg_mode_kvarg,
3731                                        &agg_mode) != 0) {
3732                         RTE_BOND_LOG(ERR,
3733                                      "Failed to parse agg selection mode for bonded device %s",
3734                                      name);
3735                 }
3736                 if (internals->mode == BONDING_MODE_8023AD) {
3737                         int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3738                                         agg_mode);
3739                         if (ret < 0) {
3740                                 RTE_BOND_LOG(ERR,
3741                                         "Invalid args for agg selection set for bonded device %s",
3742                                         name);
3743                                 return -1;
3744                         }
3745                 }
3746         }
3747
3748         /* Parse/add slave ports to bonded device */
3749         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3750                 struct bond_ethdev_slave_ports slave_ports;
3751                 unsigned i;
3752
3753                 memset(&slave_ports, 0, sizeof(slave_ports));
3754
3755                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3756                                        &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3757                         RTE_BOND_LOG(ERR,
3758                                      "Failed to parse slave ports for bonded device %s",
3759                                      name);
3760                         return -1;
3761                 }
3762
3763                 for (i = 0; i < slave_ports.slave_count; i++) {
3764                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3765                                 RTE_BOND_LOG(ERR,
3766                                              "Failed to add port %d as slave to bonded device %s",
3767                                              slave_ports.slaves[i], name);
3768                         }
3769                 }
3770
3771         } else {
3772                 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3773                 return -1;
3774         }
3775
3776         /* Parse/set primary slave port id*/
3777         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3778         if (arg_count == 1) {
3779                 uint16_t primary_slave_port_id;
3780
3781                 if (rte_kvargs_process(kvlist,
3782                                        PMD_BOND_PRIMARY_SLAVE_KVARG,
3783                                        &bond_ethdev_parse_primary_slave_port_id_kvarg,
3784                                        &primary_slave_port_id) < 0) {
3785                         RTE_BOND_LOG(INFO,
3786                                      "Invalid primary slave port id specified for bonded device %s",
3787                                      name);
3788                         return -1;
3789                 }
3790
3791                 /* Set balance mode transmit policy*/
3792                 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3793                     != 0) {
3794                         RTE_BOND_LOG(ERR,
3795                                      "Failed to set primary slave port %d on bonded device %s",
3796                                      primary_slave_port_id, name);
3797                         return -1;
3798                 }
3799         } else if (arg_count > 1) {
3800                 RTE_BOND_LOG(INFO,
3801                              "Primary slave can be specified only once for bonded device %s",
3802                              name);
3803                 return -1;
3804         }
3805
3806         /* Parse link status monitor polling interval */
3807         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3808         if (arg_count == 1) {
3809                 uint32_t lsc_poll_interval_ms;
3810
3811                 if (rte_kvargs_process(kvlist,
3812                                        PMD_BOND_LSC_POLL_PERIOD_KVARG,
3813                                        &bond_ethdev_parse_time_ms_kvarg,
3814                                        &lsc_poll_interval_ms) < 0) {
3815                         RTE_BOND_LOG(INFO,
3816                                      "Invalid lsc polling interval value specified for bonded"
3817                                      " device %s", name);
3818                         return -1;
3819                 }
3820
3821                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3822                     != 0) {
3823                         RTE_BOND_LOG(ERR,
3824                                      "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3825                                      lsc_poll_interval_ms, name);
3826                         return -1;
3827                 }
3828         } else if (arg_count > 1) {
3829                 RTE_BOND_LOG(INFO,
3830                              "LSC polling interval can be specified only once for bonded"
3831                              " device %s", name);
3832                 return -1;
3833         }
3834
3835         /* Parse link up interrupt propagation delay */
3836         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3837         if (arg_count == 1) {
3838                 uint32_t link_up_delay_ms;
3839
3840                 if (rte_kvargs_process(kvlist,
3841                                        PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3842                                        &bond_ethdev_parse_time_ms_kvarg,
3843                                        &link_up_delay_ms) < 0) {
3844                         RTE_BOND_LOG(INFO,
3845                                      "Invalid link up propagation delay value specified for"
3846                                      " bonded device %s", name);
3847                         return -1;
3848                 }
3849
3850                 /* Set balance mode transmit policy*/
3851                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3852                     != 0) {
3853                         RTE_BOND_LOG(ERR,
3854                                      "Failed to set link up propagation delay (%u ms) on bonded"
3855                                      " device %s", link_up_delay_ms, name);
3856                         return -1;
3857                 }
3858         } else if (arg_count > 1) {
3859                 RTE_BOND_LOG(INFO,
3860                              "Link up propagation delay can be specified only once for"
3861                              " bonded device %s", name);
3862                 return -1;
3863         }
3864
3865         /* Parse link down interrupt propagation delay */
3866         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3867         if (arg_count == 1) {
3868                 uint32_t link_down_delay_ms;
3869
3870                 if (rte_kvargs_process(kvlist,
3871                                        PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3872                                        &bond_ethdev_parse_time_ms_kvarg,
3873                                        &link_down_delay_ms) < 0) {
3874                         RTE_BOND_LOG(INFO,
3875                                      "Invalid link down propagation delay value specified for"
3876                                      " bonded device %s", name);
3877                         return -1;
3878                 }
3879
3880                 /* Set balance mode transmit policy*/
3881                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3882                     != 0) {
3883                         RTE_BOND_LOG(ERR,
3884                                      "Failed to set link down propagation delay (%u ms) on bonded device %s",
3885                                      link_down_delay_ms, name);
3886                         return -1;
3887                 }
3888         } else if (arg_count > 1) {
3889                 RTE_BOND_LOG(INFO,
3890                              "Link down propagation delay can be specified only once for  bonded device %s",
3891                              name);
3892                 return -1;
3893         }
3894
3895         /* configure slaves so we can pass mtu setting */
3896         for (i = 0; i < internals->slave_count; i++) {
3897                 struct rte_eth_dev *slave_ethdev =
3898                                 &(rte_eth_devices[internals->slaves[i].port_id]);
3899                 if (slave_configure(dev, slave_ethdev) != 0) {
3900                         RTE_BOND_LOG(ERR,
3901                                 "bonded port (%d) failed to configure slave device (%d)",
3902                                 dev->data->port_id,
3903                                 internals->slaves[i].port_id);
3904                         return -1;
3905                 }
3906         }
3907         return 0;
3908 }
3909
3910 struct rte_vdev_driver pmd_bond_drv = {
3911         .probe = bond_probe,
3912         .remove = bond_remove,
3913 };
3914
3915 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3916 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3917
3918 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3919         "slave=<ifc> "
3920         "primary=<ifc> "
3921         "mode=[0-6] "
3922         "xmit_policy=[l2 | l23 | l34] "
3923         "agg_mode=[count | stable | bandwidth] "
3924         "socket_id=<int> "
3925         "mac=<mac addr> "
3926         "lsc_poll_period_ms=<int> "
3927         "up_delay=<int> "
3928         "down_delay=<int>");
3929
3930 /* We can't use RTE_LOG_REGISTER_DEFAULT because of the forced name for
3931  * this library, see meson.build.
3932  */
3933 RTE_LOG_REGISTER(bond_logtype, pmd.net.bonding, NOTICE);