net/bonding: fix Rx queue conversion
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <stdbool.h>
6 #include <netinet/in.h>
7
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 #include <rte_ip.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
22
23 #include "rte_eth_bond.h"
24 #include "eth_bond_private.h"
25 #include "eth_bond_8023ad_private.h"
26
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
30
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
32
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
35
36 static inline size_t
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
38 {
39         size_t vlan_offset = 0;
40
41         if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42                 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43                 struct rte_vlan_hdr *vlan_hdr =
44                         (struct rte_vlan_hdr *)(eth_hdr + 1);
45
46                 vlan_offset = sizeof(struct rte_vlan_hdr);
47                 *proto = vlan_hdr->eth_proto;
48
49                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50                         vlan_hdr = vlan_hdr + 1;
51                         *proto = vlan_hdr->eth_proto;
52                         vlan_offset += sizeof(struct rte_vlan_hdr);
53                 }
54         }
55         return vlan_offset;
56 }
57
58 static uint16_t
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
60 {
61         struct bond_dev_private *internals;
62
63         uint16_t num_rx_total = 0;
64         uint16_t slave_count;
65         uint16_t active_slave;
66         int i;
67
68         /* Cast to structure, containing bonded device's port id and queue id */
69         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70         internals = bd_rx_q->dev_private;
71         slave_count = internals->active_slave_count;
72         active_slave = bd_rx_q->active_slave;
73
74         for (i = 0; i < slave_count && nb_pkts; i++) {
75                 uint16_t num_rx_slave;
76
77                 /* Offset of pointer to *bufs increases as packets are received
78                  * from other slaves */
79                 num_rx_slave =
80                         rte_eth_rx_burst(internals->active_slaves[active_slave],
81                                          bd_rx_q->queue_id,
82                                          bufs + num_rx_total, nb_pkts);
83                 num_rx_total += num_rx_slave;
84                 nb_pkts -= num_rx_slave;
85                 if (++active_slave == slave_count)
86                         active_slave = 0;
87         }
88
89         if (++bd_rx_q->active_slave >= slave_count)
90                 bd_rx_q->active_slave = 0;
91         return num_rx_total;
92 }
93
94 static uint16_t
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
96                 uint16_t nb_pkts)
97 {
98         struct bond_dev_private *internals;
99
100         /* Cast to structure, containing bonded device's port id and queue id */
101         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
102
103         internals = bd_rx_q->dev_private;
104
105         return rte_eth_rx_burst(internals->current_primary_port,
106                         bd_rx_q->queue_id, bufs, nb_pkts);
107 }
108
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
111 {
112         const uint16_t ether_type_slow_be =
113                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
114
115         return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116                 (ethertype == ether_type_slow_be &&
117                 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
118 }
119
120 /*****************************************************************************
121  * Flow director's setup for mode 4 optimization
122  */
123
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125         .dst.addr_bytes = { 0 },
126         .src.addr_bytes = { 0 },
127         .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
128 };
129
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131         .dst.addr_bytes = { 0 },
132         .src.addr_bytes = { 0 },
133         .type = 0xFFFF,
134 };
135
136 static struct rte_flow_item flow_item_8023ad[] = {
137         {
138                 .type = RTE_FLOW_ITEM_TYPE_ETH,
139                 .spec = &flow_item_eth_type_8023ad,
140                 .last = NULL,
141                 .mask = &flow_item_eth_mask_type_8023ad,
142         },
143         {
144                 .type = RTE_FLOW_ITEM_TYPE_END,
145                 .spec = NULL,
146                 .last = NULL,
147                 .mask = NULL,
148         }
149 };
150
151 const struct rte_flow_attr flow_attr_8023ad = {
152         .group = 0,
153         .priority = 0,
154         .ingress = 1,
155         .egress = 0,
156         .reserved = 0,
157 };
158
159 int
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161                 uint16_t slave_port) {
162         struct rte_eth_dev_info slave_info;
163         struct rte_flow_error error;
164         struct bond_dev_private *internals = bond_dev->data->dev_private;
165
166         const struct rte_flow_action_queue lacp_queue_conf = {
167                 .index = 0,
168         };
169
170         const struct rte_flow_action actions[] = {
171                 {
172                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173                         .conf = &lacp_queue_conf
174                 },
175                 {
176                         .type = RTE_FLOW_ACTION_TYPE_END,
177                 }
178         };
179
180         int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181                         flow_item_8023ad, actions, &error);
182         if (ret < 0) {
183                 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184                                 __func__, error.message, slave_port,
185                                 internals->mode4.dedicated_queues.rx_qid);
186                 return -1;
187         }
188
189         ret = rte_eth_dev_info_get(slave_port, &slave_info);
190         if (ret != 0) {
191                 RTE_BOND_LOG(ERR,
192                         "%s: Error during getting device (port %u) info: %s\n",
193                         __func__, slave_port, strerror(-ret));
194
195                 return ret;
196         }
197
198         if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199                         slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
200                 RTE_BOND_LOG(ERR,
201                         "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202                         __func__, slave_port);
203                 return -1;
204         }
205
206         return 0;
207 }
208
209 int
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211         struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212         struct bond_dev_private *internals = bond_dev->data->dev_private;
213         struct rte_eth_dev_info bond_info;
214         uint16_t idx;
215         int ret;
216
217         /* Verify if all slaves in bonding supports flow director and */
218         if (internals->slave_count > 0) {
219                 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
220                 if (ret != 0) {
221                         RTE_BOND_LOG(ERR,
222                                 "%s: Error during getting device (port %u) info: %s\n",
223                                 __func__, bond_dev->data->port_id,
224                                 strerror(-ret));
225
226                         return ret;
227                 }
228
229                 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230                 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
231
232                 for (idx = 0; idx < internals->slave_count; idx++) {
233                         if (bond_ethdev_8023ad_flow_verify(bond_dev,
234                                         internals->slaves[idx].port_id) != 0)
235                                 return -1;
236                 }
237         }
238
239         return 0;
240 }
241
242 int
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
244
245         struct rte_flow_error error;
246         struct bond_dev_private *internals = bond_dev->data->dev_private;
247         struct rte_flow_action_queue lacp_queue_conf = {
248                 .index = internals->mode4.dedicated_queues.rx_qid,
249         };
250
251         const struct rte_flow_action actions[] = {
252                 {
253                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254                         .conf = &lacp_queue_conf
255                 },
256                 {
257                         .type = RTE_FLOW_ACTION_TYPE_END,
258                 }
259         };
260
261         internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262                         &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263         if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264                 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265                                 "(slave_port=%d queue_id=%d)",
266                                 error.message, slave_port,
267                                 internals->mode4.dedicated_queues.rx_qid);
268                 return -1;
269         }
270
271         return 0;
272 }
273
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
276                 bool dedicated_rxq)
277 {
278         /* Cast to structure, containing bonded device's port id and queue id */
279         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280         struct bond_dev_private *internals = bd_rx_q->dev_private;
281         struct rte_eth_dev *bonded_eth_dev =
282                                         &rte_eth_devices[internals->port_id];
283         struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284         struct rte_ether_hdr *hdr;
285
286         const uint16_t ether_type_slow_be =
287                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288         uint16_t num_rx_total = 0;      /* Total number of received packets */
289         uint16_t slaves[RTE_MAX_ETHPORTS];
290         uint16_t slave_count, idx;
291
292         uint8_t collecting;  /* current slave collecting status */
293         const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294         const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
295         uint8_t subtype;
296         uint16_t i;
297         uint16_t j;
298         uint16_t k;
299
300         /* Copy slave list to protect against slave up/down changes during tx
301          * bursting */
302         slave_count = internals->active_slave_count;
303         memcpy(slaves, internals->active_slaves,
304                         sizeof(internals->active_slaves[0]) * slave_count);
305
306         idx = bd_rx_q->active_slave;
307         if (idx >= slave_count) {
308                 bd_rx_q->active_slave = 0;
309                 idx = 0;
310         }
311         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
312                 j = num_rx_total;
313                 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
314                                          COLLECTING);
315
316                 /* Read packets from this slave */
317                 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
319
320                 for (k = j; k < 2 && k < num_rx_total; k++)
321                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
322
323                 /* Handle slow protocol packets. */
324                 while (j < num_rx_total) {
325                         if (j + 3 < num_rx_total)
326                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
327
328                         hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329                         subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
330
331                         /* Remove packet from array if:
332                          * - it is slow packet but no dedicated rxq is present,
333                          * - slave is not in collecting state,
334                          * - bonding interface is not in promiscuous mode:
335                          *   - packet is unicast and address does not match,
336                          *   - packet is multicast and bonding interface
337                          *     is not in allmulti,
338                          */
339                         if (unlikely(
340                                 (!dedicated_rxq &&
341                                  is_lacp_packets(hdr->ether_type, subtype,
342                                                  bufs[j])) ||
343                                 !collecting ||
344                                 (!promisc &&
345                                  ((rte_is_unicast_ether_addr(&hdr->d_addr) &&
346                                    !rte_is_same_ether_addr(bond_mac,
347                                                        &hdr->d_addr)) ||
348                                   (!allmulti &&
349                                    rte_is_multicast_ether_addr(&hdr->d_addr)))))) {
350
351                                 if (hdr->ether_type == ether_type_slow_be) {
352                                         bond_mode_8023ad_handle_slow_pkt(
353                                             internals, slaves[idx], bufs[j]);
354                                 } else
355                                         rte_pktmbuf_free(bufs[j]);
356
357                                 /* Packet is managed by mode 4 or dropped, shift the array */
358                                 num_rx_total--;
359                                 if (j < num_rx_total) {
360                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
361                                                 (num_rx_total - j));
362                                 }
363                         } else
364                                 j++;
365                 }
366                 if (unlikely(++idx == slave_count))
367                         idx = 0;
368         }
369
370         if (++bd_rx_q->active_slave >= slave_count)
371                 bd_rx_q->active_slave = 0;
372
373         return num_rx_total;
374 }
375
376 static uint16_t
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
378                 uint16_t nb_pkts)
379 {
380         return rx_burst_8023ad(queue, bufs, nb_pkts, false);
381 }
382
383 static uint16_t
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
385                 uint16_t nb_pkts)
386 {
387         return rx_burst_8023ad(queue, bufs, nb_pkts, true);
388 }
389
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
393
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
395
396 static void
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
398 {
399         switch (arp_op) {
400         case RTE_ARP_OP_REQUEST:
401                 strlcpy(buf, "ARP Request", buf_len);
402                 return;
403         case RTE_ARP_OP_REPLY:
404                 strlcpy(buf, "ARP Reply", buf_len);
405                 return;
406         case RTE_ARP_OP_REVREQUEST:
407                 strlcpy(buf, "Reverse ARP Request", buf_len);
408                 return;
409         case RTE_ARP_OP_REVREPLY:
410                 strlcpy(buf, "Reverse ARP Reply", buf_len);
411                 return;
412         case RTE_ARP_OP_INVREQUEST:
413                 strlcpy(buf, "Peer Identify Request", buf_len);
414                 return;
415         case RTE_ARP_OP_INVREPLY:
416                 strlcpy(buf, "Peer Identify Reply", buf_len);
417                 return;
418         default:
419                 break;
420         }
421         strlcpy(buf, "Unknown", buf_len);
422         return;
423 }
424 #endif
425 #define MaxIPv4String   16
426 static void
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
428 {
429         uint32_t ipv4_addr;
430
431         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
434                 ipv4_addr & 0xFF);
435 }
436
437 #define MAX_CLIENTS_NUMBER      128
438 uint8_t active_clients;
439 struct client_stats_t {
440         uint16_t port;
441         uint32_t ipv4_addr;
442         uint32_t ipv4_rx_packets;
443         uint32_t ipv4_tx_packets;
444 };
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
446
447 static void
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
449 {
450         int i = 0;
451
452         for (; i < MAX_CLIENTS_NUMBER; i++)     {
453                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
454                         /* Just update RX packets number for this client */
455                         if (TXorRXindicator == &burstnumberRX)
456                                 client_stats[i].ipv4_rx_packets++;
457                         else
458                                 client_stats[i].ipv4_tx_packets++;
459                         return;
460                 }
461         }
462         /* We have a new client. Insert him to the table, and increment stats */
463         if (TXorRXindicator == &burstnumberRX)
464                 client_stats[active_clients].ipv4_rx_packets++;
465         else
466                 client_stats[active_clients].ipv4_tx_packets++;
467         client_stats[active_clients].ipv4_addr = addr;
468         client_stats[active_clients].port = port;
469         active_clients++;
470
471 }
472
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475         rte_log(RTE_LOG_DEBUG, bond_logtype,                            \
476                 "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
477                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
478                 info,                                                   \
479                 port,                                                   \
480                 eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
481                 eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
482                 eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
483                 src_ip,                                                 \
484                 eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
485                 eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
486                 eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
487                 dst_ip,                                                 \
488                 arp_op, ++burstnumber)
489 #endif
490
491 static void
492 mode6_debug(const char __rte_unused *info,
493         struct rte_ether_hdr *eth_h, uint16_t port,
494         uint32_t __rte_unused *burstnumber)
495 {
496         struct rte_ipv4_hdr *ipv4_h;
497 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
498         struct rte_arp_hdr *arp_h;
499         char dst_ip[16];
500         char ArpOp[24];
501         char buf[16];
502 #endif
503         char src_ip[16];
504
505         uint16_t ether_type = eth_h->ether_type;
506         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
507
508 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
509         strlcpy(buf, info, 16);
510 #endif
511
512         if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
513                 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
514                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
515 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
516                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
517                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
518 #endif
519                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
520         }
521 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
522         else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
523                 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
524                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
525                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
526                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
527                                 ArpOp, sizeof(ArpOp));
528                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
529         }
530 #endif
531 }
532 #endif
533
534 static uint16_t
535 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
536 {
537         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
538         struct bond_dev_private *internals = bd_rx_q->dev_private;
539         struct rte_ether_hdr *eth_h;
540         uint16_t ether_type, offset;
541         uint16_t nb_recv_pkts;
542         int i;
543
544         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
545
546         for (i = 0; i < nb_recv_pkts; i++) {
547                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
548                 ether_type = eth_h->ether_type;
549                 offset = get_vlan_offset(eth_h, &ether_type);
550
551                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
552 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
553                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
554 #endif
555                         bond_mode_alb_arp_recv(eth_h, offset, internals);
556                 }
557 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
558                 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
559                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
560 #endif
561         }
562
563         return nb_recv_pkts;
564 }
565
566 static uint16_t
567 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
568                 uint16_t nb_pkts)
569 {
570         struct bond_dev_private *internals;
571         struct bond_tx_queue *bd_tx_q;
572
573         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
574         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
575
576         uint16_t num_of_slaves;
577         uint16_t slaves[RTE_MAX_ETHPORTS];
578
579         uint16_t num_tx_total = 0, num_tx_slave;
580
581         static int slave_idx = 0;
582         int i, cslave_idx = 0, tx_fail_total = 0;
583
584         bd_tx_q = (struct bond_tx_queue *)queue;
585         internals = bd_tx_q->dev_private;
586
587         /* Copy slave list to protect against slave up/down changes during tx
588          * bursting */
589         num_of_slaves = internals->active_slave_count;
590         memcpy(slaves, internals->active_slaves,
591                         sizeof(internals->active_slaves[0]) * num_of_slaves);
592
593         if (num_of_slaves < 1)
594                 return num_tx_total;
595
596         /* Populate slaves mbuf with which packets are to be sent on it  */
597         for (i = 0; i < nb_pkts; i++) {
598                 cslave_idx = (slave_idx + i) % num_of_slaves;
599                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
600         }
601
602         /* increment current slave index so the next call to tx burst starts on the
603          * next slave */
604         slave_idx = ++cslave_idx;
605
606         /* Send packet burst on each slave device */
607         for (i = 0; i < num_of_slaves; i++) {
608                 if (slave_nb_pkts[i] > 0) {
609                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
610                                         slave_bufs[i], slave_nb_pkts[i]);
611
612                         /* if tx burst fails move packets to end of bufs */
613                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
614                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
615
616                                 tx_fail_total += tx_fail_slave;
617
618                                 memcpy(&bufs[nb_pkts - tx_fail_total],
619                                        &slave_bufs[i][num_tx_slave],
620                                        tx_fail_slave * sizeof(bufs[0]));
621                         }
622                         num_tx_total += num_tx_slave;
623                 }
624         }
625
626         return num_tx_total;
627 }
628
629 static uint16_t
630 bond_ethdev_tx_burst_active_backup(void *queue,
631                 struct rte_mbuf **bufs, uint16_t nb_pkts)
632 {
633         struct bond_dev_private *internals;
634         struct bond_tx_queue *bd_tx_q;
635
636         bd_tx_q = (struct bond_tx_queue *)queue;
637         internals = bd_tx_q->dev_private;
638
639         if (internals->active_slave_count < 1)
640                 return 0;
641
642         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
643                         bufs, nb_pkts);
644 }
645
646 static inline uint16_t
647 ether_hash(struct rte_ether_hdr *eth_hdr)
648 {
649         unaligned_uint16_t *word_src_addr =
650                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
651         unaligned_uint16_t *word_dst_addr =
652                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
653
654         return (word_src_addr[0] ^ word_dst_addr[0]) ^
655                         (word_src_addr[1] ^ word_dst_addr[1]) ^
656                         (word_src_addr[2] ^ word_dst_addr[2]);
657 }
658
659 static inline uint32_t
660 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
661 {
662         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
663 }
664
665 static inline uint32_t
666 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
667 {
668         unaligned_uint32_t *word_src_addr =
669                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
670         unaligned_uint32_t *word_dst_addr =
671                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
672
673         return (word_src_addr[0] ^ word_dst_addr[0]) ^
674                         (word_src_addr[1] ^ word_dst_addr[1]) ^
675                         (word_src_addr[2] ^ word_dst_addr[2]) ^
676                         (word_src_addr[3] ^ word_dst_addr[3]);
677 }
678
679
680 void
681 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
682                 uint16_t slave_count, uint16_t *slaves)
683 {
684         struct rte_ether_hdr *eth_hdr;
685         uint32_t hash;
686         int i;
687
688         for (i = 0; i < nb_pkts; i++) {
689                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
690
691                 hash = ether_hash(eth_hdr);
692
693                 slaves[i] = (hash ^= hash >> 8) % slave_count;
694         }
695 }
696
697 void
698 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
699                 uint16_t slave_count, uint16_t *slaves)
700 {
701         uint16_t i;
702         struct rte_ether_hdr *eth_hdr;
703         uint16_t proto;
704         size_t vlan_offset;
705         uint32_t hash, l3hash;
706
707         for (i = 0; i < nb_pkts; i++) {
708                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
709                 l3hash = 0;
710
711                 proto = eth_hdr->ether_type;
712                 hash = ether_hash(eth_hdr);
713
714                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
715
716                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
717                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
718                                         ((char *)(eth_hdr + 1) + vlan_offset);
719                         l3hash = ipv4_hash(ipv4_hdr);
720
721                 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
722                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
723                                         ((char *)(eth_hdr + 1) + vlan_offset);
724                         l3hash = ipv6_hash(ipv6_hdr);
725                 }
726
727                 hash = hash ^ l3hash;
728                 hash ^= hash >> 16;
729                 hash ^= hash >> 8;
730
731                 slaves[i] = hash % slave_count;
732         }
733 }
734
735 void
736 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
737                 uint16_t slave_count, uint16_t *slaves)
738 {
739         struct rte_ether_hdr *eth_hdr;
740         uint16_t proto;
741         size_t vlan_offset;
742         int i;
743
744         struct rte_udp_hdr *udp_hdr;
745         struct rte_tcp_hdr *tcp_hdr;
746         uint32_t hash, l3hash, l4hash;
747
748         for (i = 0; i < nb_pkts; i++) {
749                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
750                 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
751                 proto = eth_hdr->ether_type;
752                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
753                 l3hash = 0;
754                 l4hash = 0;
755
756                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
757                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
758                                         ((char *)(eth_hdr + 1) + vlan_offset);
759                         size_t ip_hdr_offset;
760
761                         l3hash = ipv4_hash(ipv4_hdr);
762
763                         /* there is no L4 header in fragmented packet */
764                         if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
765                                                                 == 0)) {
766                                 ip_hdr_offset = (ipv4_hdr->version_ihl
767                                         & RTE_IPV4_HDR_IHL_MASK) *
768                                         RTE_IPV4_IHL_MULTIPLIER;
769
770                                 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
771                                         tcp_hdr = (struct rte_tcp_hdr *)
772                                                 ((char *)ipv4_hdr +
773                                                         ip_hdr_offset);
774                                         if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
775                                                         < pkt_end)
776                                                 l4hash = HASH_L4_PORTS(tcp_hdr);
777                                 } else if (ipv4_hdr->next_proto_id ==
778                                                                 IPPROTO_UDP) {
779                                         udp_hdr = (struct rte_udp_hdr *)
780                                                 ((char *)ipv4_hdr +
781                                                         ip_hdr_offset);
782                                         if ((size_t)udp_hdr + sizeof(*udp_hdr)
783                                                         < pkt_end)
784                                                 l4hash = HASH_L4_PORTS(udp_hdr);
785                                 }
786                         }
787                 } else if  (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
788                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
789                                         ((char *)(eth_hdr + 1) + vlan_offset);
790                         l3hash = ipv6_hash(ipv6_hdr);
791
792                         if (ipv6_hdr->proto == IPPROTO_TCP) {
793                                 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
794                                 l4hash = HASH_L4_PORTS(tcp_hdr);
795                         } else if (ipv6_hdr->proto == IPPROTO_UDP) {
796                                 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
797                                 l4hash = HASH_L4_PORTS(udp_hdr);
798                         }
799                 }
800
801                 hash = l3hash ^ l4hash;
802                 hash ^= hash >> 16;
803                 hash ^= hash >> 8;
804
805                 slaves[i] = hash % slave_count;
806         }
807 }
808
809 struct bwg_slave {
810         uint64_t bwg_left_int;
811         uint64_t bwg_left_remainder;
812         uint16_t slave;
813 };
814
815 void
816 bond_tlb_activate_slave(struct bond_dev_private *internals) {
817         int i;
818
819         for (i = 0; i < internals->active_slave_count; i++) {
820                 tlb_last_obytets[internals->active_slaves[i]] = 0;
821         }
822 }
823
824 static int
825 bandwidth_cmp(const void *a, const void *b)
826 {
827         const struct bwg_slave *bwg_a = a;
828         const struct bwg_slave *bwg_b = b;
829         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
830         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
831                         (int64_t)bwg_a->bwg_left_remainder;
832         if (diff > 0)
833                 return 1;
834         else if (diff < 0)
835                 return -1;
836         else if (diff2 > 0)
837                 return 1;
838         else if (diff2 < 0)
839                 return -1;
840         else
841                 return 0;
842 }
843
844 static void
845 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
846                 struct bwg_slave *bwg_slave)
847 {
848         struct rte_eth_link link_status;
849         int ret;
850
851         ret = rte_eth_link_get_nowait(port_id, &link_status);
852         if (ret < 0) {
853                 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
854                              port_id, rte_strerror(-ret));
855                 return;
856         }
857         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
858         if (link_bwg == 0)
859                 return;
860         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
861         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
862         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
863 }
864
865 static void
866 bond_ethdev_update_tlb_slave_cb(void *arg)
867 {
868         struct bond_dev_private *internals = arg;
869         struct rte_eth_stats slave_stats;
870         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
871         uint16_t slave_count;
872         uint64_t tx_bytes;
873
874         uint8_t update_stats = 0;
875         uint16_t slave_id;
876         uint16_t i;
877
878         internals->slave_update_idx++;
879
880
881         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
882                 update_stats = 1;
883
884         for (i = 0; i < internals->active_slave_count; i++) {
885                 slave_id = internals->active_slaves[i];
886                 rte_eth_stats_get(slave_id, &slave_stats);
887                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
888                 bandwidth_left(slave_id, tx_bytes,
889                                 internals->slave_update_idx, &bwg_array[i]);
890                 bwg_array[i].slave = slave_id;
891
892                 if (update_stats) {
893                         tlb_last_obytets[slave_id] = slave_stats.obytes;
894                 }
895         }
896
897         if (update_stats == 1)
898                 internals->slave_update_idx = 0;
899
900         slave_count = i;
901         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
902         for (i = 0; i < slave_count; i++)
903                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
904
905         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
906                         (struct bond_dev_private *)internals);
907 }
908
909 static uint16_t
910 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
911 {
912         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
913         struct bond_dev_private *internals = bd_tx_q->dev_private;
914
915         struct rte_eth_dev *primary_port =
916                         &rte_eth_devices[internals->primary_port];
917         uint16_t num_tx_total = 0;
918         uint16_t i, j;
919
920         uint16_t num_of_slaves = internals->active_slave_count;
921         uint16_t slaves[RTE_MAX_ETHPORTS];
922
923         struct rte_ether_hdr *ether_hdr;
924         struct rte_ether_addr primary_slave_addr;
925         struct rte_ether_addr active_slave_addr;
926
927         if (num_of_slaves < 1)
928                 return num_tx_total;
929
930         memcpy(slaves, internals->tlb_slaves_order,
931                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
932
933
934         rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
935
936         if (nb_pkts > 3) {
937                 for (i = 0; i < 3; i++)
938                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
939         }
940
941         for (i = 0; i < num_of_slaves; i++) {
942                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
943                 for (j = num_tx_total; j < nb_pkts; j++) {
944                         if (j + 3 < nb_pkts)
945                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
946
947                         ether_hdr = rte_pktmbuf_mtod(bufs[j],
948                                                 struct rte_ether_hdr *);
949                         if (rte_is_same_ether_addr(&ether_hdr->s_addr,
950                                                         &primary_slave_addr))
951                                 rte_ether_addr_copy(&active_slave_addr,
952                                                 &ether_hdr->s_addr);
953 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
954                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
955 #endif
956                 }
957
958                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
959                                 bufs + num_tx_total, nb_pkts - num_tx_total);
960
961                 if (num_tx_total == nb_pkts)
962                         break;
963         }
964
965         return num_tx_total;
966 }
967
968 void
969 bond_tlb_disable(struct bond_dev_private *internals)
970 {
971         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
972 }
973
974 void
975 bond_tlb_enable(struct bond_dev_private *internals)
976 {
977         bond_ethdev_update_tlb_slave_cb(internals);
978 }
979
980 static uint16_t
981 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
982 {
983         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
984         struct bond_dev_private *internals = bd_tx_q->dev_private;
985
986         struct rte_ether_hdr *eth_h;
987         uint16_t ether_type, offset;
988
989         struct client_data *client_info;
990
991         /*
992          * We create transmit buffers for every slave and one additional to send
993          * through tlb. In worst case every packet will be send on one port.
994          */
995         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
996         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
997
998         /*
999          * We create separate transmit buffers for update packets as they won't
1000          * be counted in num_tx_total.
1001          */
1002         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
1003         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
1004
1005         struct rte_mbuf *upd_pkt;
1006         size_t pkt_size;
1007
1008         uint16_t num_send, num_not_send = 0;
1009         uint16_t num_tx_total = 0;
1010         uint16_t slave_idx;
1011
1012         int i, j;
1013
1014         /* Search tx buffer for ARP packets and forward them to alb */
1015         for (i = 0; i < nb_pkts; i++) {
1016                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1017                 ether_type = eth_h->ether_type;
1018                 offset = get_vlan_offset(eth_h, &ether_type);
1019
1020                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1021                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1022
1023                         /* Change src mac in eth header */
1024                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
1025
1026                         /* Add packet to slave tx buffer */
1027                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1028                         slave_bufs_pkts[slave_idx]++;
1029                 } else {
1030                         /* If packet is not ARP, send it with TLB policy */
1031                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1032                                         bufs[i];
1033                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1034                 }
1035         }
1036
1037         /* Update connected client ARP tables */
1038         if (internals->mode6.ntt) {
1039                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1040                         client_info = &internals->mode6.client_table[i];
1041
1042                         if (client_info->in_use) {
1043                                 /* Allocate new packet to send ARP update on current slave */
1044                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1045                                 if (upd_pkt == NULL) {
1046                                         RTE_BOND_LOG(ERR,
1047                                                      "Failed to allocate ARP packet from pool");
1048                                         continue;
1049                                 }
1050                                 pkt_size = sizeof(struct rte_ether_hdr) +
1051                                         sizeof(struct rte_arp_hdr) +
1052                                         client_info->vlan_count *
1053                                         sizeof(struct rte_vlan_hdr);
1054                                 upd_pkt->data_len = pkt_size;
1055                                 upd_pkt->pkt_len = pkt_size;
1056
1057                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1058                                                 internals);
1059
1060                                 /* Add packet to update tx buffer */
1061                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1062                                 update_bufs_pkts[slave_idx]++;
1063                         }
1064                 }
1065                 internals->mode6.ntt = 0;
1066         }
1067
1068         /* Send ARP packets on proper slaves */
1069         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1070                 if (slave_bufs_pkts[i] > 0) {
1071                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1072                                         slave_bufs[i], slave_bufs_pkts[i]);
1073                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1074                                 bufs[nb_pkts - 1 - num_not_send - j] =
1075                                                 slave_bufs[i][nb_pkts - 1 - j];
1076                         }
1077
1078                         num_tx_total += num_send;
1079                         num_not_send += slave_bufs_pkts[i] - num_send;
1080
1081 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1082         /* Print TX stats including update packets */
1083                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
1084                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1085                                                         struct rte_ether_hdr *);
1086                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1087                         }
1088 #endif
1089                 }
1090         }
1091
1092         /* Send update packets on proper slaves */
1093         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1094                 if (update_bufs_pkts[i] > 0) {
1095                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1096                                         update_bufs_pkts[i]);
1097                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
1098                                 rte_pktmbuf_free(update_bufs[i][j]);
1099                         }
1100 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1101                         for (j = 0; j < update_bufs_pkts[i]; j++) {
1102                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1103                                                         struct rte_ether_hdr *);
1104                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1105                         }
1106 #endif
1107                 }
1108         }
1109
1110         /* Send non-ARP packets using tlb policy */
1111         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1112                 num_send = bond_ethdev_tx_burst_tlb(queue,
1113                                 slave_bufs[RTE_MAX_ETHPORTS],
1114                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1115
1116                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1117                         bufs[nb_pkts - 1 - num_not_send - j] =
1118                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1119                 }
1120
1121                 num_tx_total += num_send;
1122         }
1123
1124         return num_tx_total;
1125 }
1126
1127 static inline uint16_t
1128 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1129                  uint16_t *slave_port_ids, uint16_t slave_count)
1130 {
1131         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1132         struct bond_dev_private *internals = bd_tx_q->dev_private;
1133
1134         /* Array to sort mbufs for transmission on each slave into */
1135         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1136         /* Number of mbufs for transmission on each slave */
1137         uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1138         /* Mapping array generated by hash function to map mbufs to slaves */
1139         uint16_t bufs_slave_port_idxs[nb_bufs];
1140
1141         uint16_t slave_tx_count;
1142         uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1143
1144         uint16_t i;
1145
1146         /*
1147          * Populate slaves mbuf with the packets which are to be sent on it
1148          * selecting output slave using hash based on xmit policy
1149          */
1150         internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1151                         bufs_slave_port_idxs);
1152
1153         for (i = 0; i < nb_bufs; i++) {
1154                 /* Populate slave mbuf arrays with mbufs for that slave. */
1155                 uint16_t slave_idx = bufs_slave_port_idxs[i];
1156
1157                 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1158         }
1159
1160         /* Send packet burst on each slave device */
1161         for (i = 0; i < slave_count; i++) {
1162                 if (slave_nb_bufs[i] == 0)
1163                         continue;
1164
1165                 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1166                                 bd_tx_q->queue_id, slave_bufs[i],
1167                                 slave_nb_bufs[i]);
1168
1169                 total_tx_count += slave_tx_count;
1170
1171                 /* If tx burst fails move packets to end of bufs */
1172                 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1173                         int slave_tx_fail_count = slave_nb_bufs[i] -
1174                                         slave_tx_count;
1175                         total_tx_fail_count += slave_tx_fail_count;
1176                         memcpy(&bufs[nb_bufs - total_tx_fail_count],
1177                                &slave_bufs[i][slave_tx_count],
1178                                slave_tx_fail_count * sizeof(bufs[0]));
1179                 }
1180         }
1181
1182         return total_tx_count;
1183 }
1184
1185 static uint16_t
1186 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1187                 uint16_t nb_bufs)
1188 {
1189         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1190         struct bond_dev_private *internals = bd_tx_q->dev_private;
1191
1192         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1193         uint16_t slave_count;
1194
1195         if (unlikely(nb_bufs == 0))
1196                 return 0;
1197
1198         /* Copy slave list to protect against slave up/down changes during tx
1199          * bursting
1200          */
1201         slave_count = internals->active_slave_count;
1202         if (unlikely(slave_count < 1))
1203                 return 0;
1204
1205         memcpy(slave_port_ids, internals->active_slaves,
1206                         sizeof(slave_port_ids[0]) * slave_count);
1207         return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1208                                 slave_count);
1209 }
1210
1211 static inline uint16_t
1212 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1213                 bool dedicated_txq)
1214 {
1215         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1216         struct bond_dev_private *internals = bd_tx_q->dev_private;
1217
1218         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1219         uint16_t slave_count;
1220
1221         uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1222         uint16_t dist_slave_count;
1223
1224         uint16_t slave_tx_count;
1225
1226         uint16_t i;
1227
1228         /* Copy slave list to protect against slave up/down changes during tx
1229          * bursting */
1230         slave_count = internals->active_slave_count;
1231         if (unlikely(slave_count < 1))
1232                 return 0;
1233
1234         memcpy(slave_port_ids, internals->active_slaves,
1235                         sizeof(slave_port_ids[0]) * slave_count);
1236
1237         if (dedicated_txq)
1238                 goto skip_tx_ring;
1239
1240         /* Check for LACP control packets and send if available */
1241         for (i = 0; i < slave_count; i++) {
1242                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1243                 struct rte_mbuf *ctrl_pkt = NULL;
1244
1245                 if (likely(rte_ring_empty(port->tx_ring)))
1246                         continue;
1247
1248                 if (rte_ring_dequeue(port->tx_ring,
1249                                      (void **)&ctrl_pkt) != -ENOENT) {
1250                         slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1251                                         bd_tx_q->queue_id, &ctrl_pkt, 1);
1252                         /*
1253                          * re-enqueue LAG control plane packets to buffering
1254                          * ring if transmission fails so the packet isn't lost.
1255                          */
1256                         if (slave_tx_count != 1)
1257                                 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1258                 }
1259         }
1260
1261 skip_tx_ring:
1262         if (unlikely(nb_bufs == 0))
1263                 return 0;
1264
1265         dist_slave_count = 0;
1266         for (i = 0; i < slave_count; i++) {
1267                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1268
1269                 if (ACTOR_STATE(port, DISTRIBUTING))
1270                         dist_slave_port_ids[dist_slave_count++] =
1271                                         slave_port_ids[i];
1272         }
1273
1274         if (unlikely(dist_slave_count < 1))
1275                 return 0;
1276
1277         return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1278                                 dist_slave_count);
1279 }
1280
1281 static uint16_t
1282 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1283                 uint16_t nb_bufs)
1284 {
1285         return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1286 }
1287
1288 static uint16_t
1289 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1290                 uint16_t nb_bufs)
1291 {
1292         return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1293 }
1294
1295 static uint16_t
1296 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1297                 uint16_t nb_pkts)
1298 {
1299         struct bond_dev_private *internals;
1300         struct bond_tx_queue *bd_tx_q;
1301
1302         uint16_t slaves[RTE_MAX_ETHPORTS];
1303         uint8_t tx_failed_flag = 0;
1304         uint16_t num_of_slaves;
1305
1306         uint16_t max_nb_of_tx_pkts = 0;
1307
1308         int slave_tx_total[RTE_MAX_ETHPORTS];
1309         int i, most_successful_tx_slave = -1;
1310
1311         bd_tx_q = (struct bond_tx_queue *)queue;
1312         internals = bd_tx_q->dev_private;
1313
1314         /* Copy slave list to protect against slave up/down changes during tx
1315          * bursting */
1316         num_of_slaves = internals->active_slave_count;
1317         memcpy(slaves, internals->active_slaves,
1318                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1319
1320         if (num_of_slaves < 1)
1321                 return 0;
1322
1323         /* Increment reference count on mbufs */
1324         for (i = 0; i < nb_pkts; i++)
1325                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1326
1327         /* Transmit burst on each active slave */
1328         for (i = 0; i < num_of_slaves; i++) {
1329                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1330                                         bufs, nb_pkts);
1331
1332                 if (unlikely(slave_tx_total[i] < nb_pkts))
1333                         tx_failed_flag = 1;
1334
1335                 /* record the value and slave index for the slave which transmits the
1336                  * maximum number of packets */
1337                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1338                         max_nb_of_tx_pkts = slave_tx_total[i];
1339                         most_successful_tx_slave = i;
1340                 }
1341         }
1342
1343         /* if slaves fail to transmit packets from burst, the calling application
1344          * is not expected to know about multiple references to packets so we must
1345          * handle failures of all packets except those of the most successful slave
1346          */
1347         if (unlikely(tx_failed_flag))
1348                 for (i = 0; i < num_of_slaves; i++)
1349                         if (i != most_successful_tx_slave)
1350                                 while (slave_tx_total[i] < nb_pkts)
1351                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1352
1353         return max_nb_of_tx_pkts;
1354 }
1355
1356 static void
1357 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1358 {
1359         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1360
1361         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1362                 /**
1363                  * If in mode 4 then save the link properties of the first
1364                  * slave, all subsequent slaves must match these properties
1365                  */
1366                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1367
1368                 bond_link->link_autoneg = slave_link->link_autoneg;
1369                 bond_link->link_duplex = slave_link->link_duplex;
1370                 bond_link->link_speed = slave_link->link_speed;
1371         } else {
1372                 /**
1373                  * In any other mode the link properties are set to default
1374                  * values of AUTONEG/DUPLEX
1375                  */
1376                 ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
1377                 ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1378         }
1379 }
1380
1381 static int
1382 link_properties_valid(struct rte_eth_dev *ethdev,
1383                 struct rte_eth_link *slave_link)
1384 {
1385         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1386
1387         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1388                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1389
1390                 if (bond_link->link_duplex != slave_link->link_duplex ||
1391                         bond_link->link_autoneg != slave_link->link_autoneg ||
1392                         bond_link->link_speed != slave_link->link_speed)
1393                         return -1;
1394         }
1395
1396         return 0;
1397 }
1398
1399 int
1400 mac_address_get(struct rte_eth_dev *eth_dev,
1401                 struct rte_ether_addr *dst_mac_addr)
1402 {
1403         struct rte_ether_addr *mac_addr;
1404
1405         if (eth_dev == NULL) {
1406                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1407                 return -1;
1408         }
1409
1410         if (dst_mac_addr == NULL) {
1411                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1412                 return -1;
1413         }
1414
1415         mac_addr = eth_dev->data->mac_addrs;
1416
1417         rte_ether_addr_copy(mac_addr, dst_mac_addr);
1418         return 0;
1419 }
1420
1421 int
1422 mac_address_set(struct rte_eth_dev *eth_dev,
1423                 struct rte_ether_addr *new_mac_addr)
1424 {
1425         struct rte_ether_addr *mac_addr;
1426
1427         if (eth_dev == NULL) {
1428                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1429                 return -1;
1430         }
1431
1432         if (new_mac_addr == NULL) {
1433                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1434                 return -1;
1435         }
1436
1437         mac_addr = eth_dev->data->mac_addrs;
1438
1439         /* If new MAC is different to current MAC then update */
1440         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1441                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1442
1443         return 0;
1444 }
1445
1446 static const struct rte_ether_addr null_mac_addr;
1447
1448 /*
1449  * Add additional MAC addresses to the slave
1450  */
1451 int
1452 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1453                 uint16_t slave_port_id)
1454 {
1455         int i, ret;
1456         struct rte_ether_addr *mac_addr;
1457
1458         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1459                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1460                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1461                         break;
1462
1463                 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1464                 if (ret < 0) {
1465                         /* rollback */
1466                         for (i--; i > 0; i--)
1467                                 rte_eth_dev_mac_addr_remove(slave_port_id,
1468                                         &bonded_eth_dev->data->mac_addrs[i]);
1469                         return ret;
1470                 }
1471         }
1472
1473         return 0;
1474 }
1475
1476 /*
1477  * Remove additional MAC addresses from the slave
1478  */
1479 int
1480 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1481                 uint16_t slave_port_id)
1482 {
1483         int i, rc, ret;
1484         struct rte_ether_addr *mac_addr;
1485
1486         rc = 0;
1487         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1488                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1489                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1490                         break;
1491
1492                 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1493                 /* save only the first error */
1494                 if (ret < 0 && rc == 0)
1495                         rc = ret;
1496         }
1497
1498         return rc;
1499 }
1500
1501 int
1502 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1503 {
1504         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1505         bool set;
1506         int i;
1507
1508         /* Update slave devices MAC addresses */
1509         if (internals->slave_count < 1)
1510                 return -1;
1511
1512         switch (internals->mode) {
1513         case BONDING_MODE_ROUND_ROBIN:
1514         case BONDING_MODE_BALANCE:
1515         case BONDING_MODE_BROADCAST:
1516                 for (i = 0; i < internals->slave_count; i++) {
1517                         if (rte_eth_dev_default_mac_addr_set(
1518                                         internals->slaves[i].port_id,
1519                                         bonded_eth_dev->data->mac_addrs)) {
1520                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1521                                                 internals->slaves[i].port_id);
1522                                 return -1;
1523                         }
1524                 }
1525                 break;
1526         case BONDING_MODE_8023AD:
1527                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1528                 break;
1529         case BONDING_MODE_ACTIVE_BACKUP:
1530         case BONDING_MODE_TLB:
1531         case BONDING_MODE_ALB:
1532         default:
1533                 set = true;
1534                 for (i = 0; i < internals->slave_count; i++) {
1535                         if (internals->slaves[i].port_id ==
1536                                         internals->current_primary_port) {
1537                                 if (rte_eth_dev_default_mac_addr_set(
1538                                                 internals->current_primary_port,
1539                                                 bonded_eth_dev->data->mac_addrs)) {
1540                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1541                                                         internals->current_primary_port);
1542                                         set = false;
1543                                 }
1544                         } else {
1545                                 if (rte_eth_dev_default_mac_addr_set(
1546                                                 internals->slaves[i].port_id,
1547                                                 &internals->slaves[i].persisted_mac_addr)) {
1548                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1549                                                         internals->slaves[i].port_id);
1550                                 }
1551                         }
1552                 }
1553                 if (!set)
1554                         return -1;
1555         }
1556
1557         return 0;
1558 }
1559
1560 int
1561 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1562 {
1563         struct bond_dev_private *internals;
1564
1565         internals = eth_dev->data->dev_private;
1566
1567         switch (mode) {
1568         case BONDING_MODE_ROUND_ROBIN:
1569                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1570                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1571                 break;
1572         case BONDING_MODE_ACTIVE_BACKUP:
1573                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1574                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1575                 break;
1576         case BONDING_MODE_BALANCE:
1577                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1578                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1579                 break;
1580         case BONDING_MODE_BROADCAST:
1581                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1582                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1583                 break;
1584         case BONDING_MODE_8023AD:
1585                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1586                         return -1;
1587
1588                 if (internals->mode4.dedicated_queues.enabled == 0) {
1589                         eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1590                         eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1591                         RTE_BOND_LOG(WARNING,
1592                                 "Using mode 4, it is necessary to do TX burst "
1593                                 "and RX burst at least every 100ms.");
1594                 } else {
1595                         /* Use flow director's optimization */
1596                         eth_dev->rx_pkt_burst =
1597                                         bond_ethdev_rx_burst_8023ad_fast_queue;
1598                         eth_dev->tx_pkt_burst =
1599                                         bond_ethdev_tx_burst_8023ad_fast_queue;
1600                 }
1601                 break;
1602         case BONDING_MODE_TLB:
1603                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1604                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1605                 break;
1606         case BONDING_MODE_ALB:
1607                 if (bond_mode_alb_enable(eth_dev) != 0)
1608                         return -1;
1609
1610                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1611                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1612                 break;
1613         default:
1614                 return -1;
1615         }
1616
1617         internals->mode = mode;
1618
1619         return 0;
1620 }
1621
1622
1623 static int
1624 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1625                 struct rte_eth_dev *slave_eth_dev)
1626 {
1627         int errval = 0;
1628         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1629         struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1630
1631         if (port->slow_pool == NULL) {
1632                 char mem_name[256];
1633                 int slave_id = slave_eth_dev->data->port_id;
1634
1635                 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1636                                 slave_id);
1637                 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1638                         250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1639                         slave_eth_dev->data->numa_node);
1640
1641                 /* Any memory allocation failure in initialization is critical because
1642                  * resources can't be free, so reinitialization is impossible. */
1643                 if (port->slow_pool == NULL) {
1644                         rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1645                                 slave_id, mem_name, rte_strerror(rte_errno));
1646                 }
1647         }
1648
1649         if (internals->mode4.dedicated_queues.enabled == 1) {
1650                 /* Configure slow Rx queue */
1651
1652                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1653                                 internals->mode4.dedicated_queues.rx_qid, 128,
1654                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1655                                 NULL, port->slow_pool);
1656                 if (errval != 0) {
1657                         RTE_BOND_LOG(ERR,
1658                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1659                                         slave_eth_dev->data->port_id,
1660                                         internals->mode4.dedicated_queues.rx_qid,
1661                                         errval);
1662                         return errval;
1663                 }
1664
1665                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1666                                 internals->mode4.dedicated_queues.tx_qid, 512,
1667                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1668                                 NULL);
1669                 if (errval != 0) {
1670                         RTE_BOND_LOG(ERR,
1671                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1672                                 slave_eth_dev->data->port_id,
1673                                 internals->mode4.dedicated_queues.tx_qid,
1674                                 errval);
1675                         return errval;
1676                 }
1677         }
1678         return 0;
1679 }
1680
1681 int
1682 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1683                 struct rte_eth_dev *slave_eth_dev)
1684 {
1685         struct bond_rx_queue *bd_rx_q;
1686         struct bond_tx_queue *bd_tx_q;
1687         uint16_t nb_rx_queues;
1688         uint16_t nb_tx_queues;
1689
1690         int errval;
1691         uint16_t q_id;
1692         struct rte_flow_error flow_error;
1693
1694         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1695
1696         /* Stop slave */
1697         rte_eth_dev_stop(slave_eth_dev->data->port_id);
1698
1699         /* Enable interrupts on slave device if supported */
1700         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1701                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1702
1703         /* If RSS is enabled for bonding, try to enable it for slaves  */
1704         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1705                 if (internals->rss_key_len != 0) {
1706                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1707                                         internals->rss_key_len;
1708                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1709                                         internals->rss_key;
1710                 } else {
1711                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1712                 }
1713
1714                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1715                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1716                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1717                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1718         }
1719
1720         if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
1721                         DEV_RX_OFFLOAD_VLAN_FILTER)
1722                 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1723                                 DEV_RX_OFFLOAD_VLAN_FILTER;
1724         else
1725                 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1726                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
1727
1728         nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1729         nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1730
1731         if (internals->mode == BONDING_MODE_8023AD) {
1732                 if (internals->mode4.dedicated_queues.enabled == 1) {
1733                         nb_rx_queues++;
1734                         nb_tx_queues++;
1735                 }
1736         }
1737
1738         errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1739                                      bonded_eth_dev->data->mtu);
1740         if (errval != 0 && errval != -ENOTSUP) {
1741                 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1742                                 slave_eth_dev->data->port_id, errval);
1743                 return errval;
1744         }
1745
1746         /* Configure device */
1747         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1748                         nb_rx_queues, nb_tx_queues,
1749                         &(slave_eth_dev->data->dev_conf));
1750         if (errval != 0) {
1751                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1752                                 slave_eth_dev->data->port_id, errval);
1753                 return errval;
1754         }
1755
1756         /* Setup Rx Queues */
1757         for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1758                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1759
1760                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1761                                 bd_rx_q->nb_rx_desc,
1762                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1763                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1764                 if (errval != 0) {
1765                         RTE_BOND_LOG(ERR,
1766                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1767                                         slave_eth_dev->data->port_id, q_id, errval);
1768                         return errval;
1769                 }
1770         }
1771
1772         /* Setup Tx Queues */
1773         for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1774                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1775
1776                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1777                                 bd_tx_q->nb_tx_desc,
1778                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1779                                 &bd_tx_q->tx_conf);
1780                 if (errval != 0) {
1781                         RTE_BOND_LOG(ERR,
1782                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1783                                 slave_eth_dev->data->port_id, q_id, errval);
1784                         return errval;
1785                 }
1786         }
1787
1788         if (internals->mode == BONDING_MODE_8023AD &&
1789                         internals->mode4.dedicated_queues.enabled == 1) {
1790                 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1791                                 != 0)
1792                         return errval;
1793
1794                 if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1795                                 slave_eth_dev->data->port_id) != 0) {
1796                         RTE_BOND_LOG(ERR,
1797                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1798                                 slave_eth_dev->data->port_id, q_id, errval);
1799                         return -1;
1800                 }
1801
1802                 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1803                         rte_flow_destroy(slave_eth_dev->data->port_id,
1804                                         internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1805                                         &flow_error);
1806
1807                 bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1808                                 slave_eth_dev->data->port_id);
1809         }
1810
1811         /* Start device */
1812         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1813         if (errval != 0) {
1814                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1815                                 slave_eth_dev->data->port_id, errval);
1816                 return -1;
1817         }
1818
1819         /* If RSS is enabled for bonding, synchronize RETA */
1820         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1821                 int i;
1822                 struct bond_dev_private *internals;
1823
1824                 internals = bonded_eth_dev->data->dev_private;
1825
1826                 for (i = 0; i < internals->slave_count; i++) {
1827                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1828                                 errval = rte_eth_dev_rss_reta_update(
1829                                                 slave_eth_dev->data->port_id,
1830                                                 &internals->reta_conf[0],
1831                                                 internals->slaves[i].reta_size);
1832                                 if (errval != 0) {
1833                                         RTE_BOND_LOG(WARNING,
1834                                                      "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1835                                                      " RSS Configuration for bonding may be inconsistent.",
1836                                                      slave_eth_dev->data->port_id, errval);
1837                                 }
1838                                 break;
1839                         }
1840                 }
1841         }
1842
1843         /* If lsc interrupt is set, check initial slave's link status */
1844         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1845                 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1846                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1847                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1848                         NULL);
1849         }
1850
1851         return 0;
1852 }
1853
1854 void
1855 slave_remove(struct bond_dev_private *internals,
1856                 struct rte_eth_dev *slave_eth_dev)
1857 {
1858         uint16_t i;
1859
1860         for (i = 0; i < internals->slave_count; i++)
1861                 if (internals->slaves[i].port_id ==
1862                                 slave_eth_dev->data->port_id)
1863                         break;
1864
1865         if (i < (internals->slave_count - 1)) {
1866                 struct rte_flow *flow;
1867
1868                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1869                                 sizeof(internals->slaves[0]) *
1870                                 (internals->slave_count - i - 1));
1871                 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1872                         memmove(&flow->flows[i], &flow->flows[i + 1],
1873                                 sizeof(flow->flows[0]) *
1874                                 (internals->slave_count - i - 1));
1875                         flow->flows[internals->slave_count - 1] = NULL;
1876                 }
1877         }
1878
1879         internals->slave_count--;
1880
1881         /* force reconfiguration of slave interfaces */
1882         rte_eth_dev_internal_reset(slave_eth_dev);
1883 }
1884
1885 static void
1886 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1887
1888 void
1889 slave_add(struct bond_dev_private *internals,
1890                 struct rte_eth_dev *slave_eth_dev)
1891 {
1892         struct bond_slave_details *slave_details =
1893                         &internals->slaves[internals->slave_count];
1894
1895         slave_details->port_id = slave_eth_dev->data->port_id;
1896         slave_details->last_link_status = 0;
1897
1898         /* Mark slave devices that don't support interrupts so we can
1899          * compensate when we start the bond
1900          */
1901         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1902                 slave_details->link_status_poll_enabled = 1;
1903         }
1904
1905         slave_details->link_status_wait_to_complete = 0;
1906         /* clean tlb_last_obytes when adding port for bonding device */
1907         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1908                         sizeof(struct rte_ether_addr));
1909 }
1910
1911 void
1912 bond_ethdev_primary_set(struct bond_dev_private *internals,
1913                 uint16_t slave_port_id)
1914 {
1915         int i;
1916
1917         if (internals->active_slave_count < 1)
1918                 internals->current_primary_port = slave_port_id;
1919         else
1920                 /* Search bonded device slave ports for new proposed primary port */
1921                 for (i = 0; i < internals->active_slave_count; i++) {
1922                         if (internals->active_slaves[i] == slave_port_id)
1923                                 internals->current_primary_port = slave_port_id;
1924                 }
1925 }
1926
1927 static int
1928 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1929
1930 static int
1931 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1932 {
1933         struct bond_dev_private *internals;
1934         int i;
1935
1936         /* slave eth dev will be started by bonded device */
1937         if (check_for_bonded_ethdev(eth_dev)) {
1938                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1939                                 eth_dev->data->port_id);
1940                 return -1;
1941         }
1942
1943         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1944         eth_dev->data->dev_started = 1;
1945
1946         internals = eth_dev->data->dev_private;
1947
1948         if (internals->slave_count == 0) {
1949                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1950                 goto out_err;
1951         }
1952
1953         if (internals->user_defined_mac == 0) {
1954                 struct rte_ether_addr *new_mac_addr = NULL;
1955
1956                 for (i = 0; i < internals->slave_count; i++)
1957                         if (internals->slaves[i].port_id == internals->primary_port)
1958                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1959
1960                 if (new_mac_addr == NULL)
1961                         goto out_err;
1962
1963                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1964                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1965                                         eth_dev->data->port_id);
1966                         goto out_err;
1967                 }
1968         }
1969
1970         if (internals->mode == BONDING_MODE_8023AD) {
1971                 if (internals->mode4.dedicated_queues.enabled == 1) {
1972                         internals->mode4.dedicated_queues.rx_qid =
1973                                         eth_dev->data->nb_rx_queues;
1974                         internals->mode4.dedicated_queues.tx_qid =
1975                                         eth_dev->data->nb_tx_queues;
1976                 }
1977         }
1978
1979
1980         /* Reconfigure each slave device if starting bonded device */
1981         for (i = 0; i < internals->slave_count; i++) {
1982                 struct rte_eth_dev *slave_ethdev =
1983                                 &(rte_eth_devices[internals->slaves[i].port_id]);
1984                 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1985                         RTE_BOND_LOG(ERR,
1986                                 "bonded port (%d) failed to reconfigure slave device (%d)",
1987                                 eth_dev->data->port_id,
1988                                 internals->slaves[i].port_id);
1989                         goto out_err;
1990                 }
1991                 /* We will need to poll for link status if any slave doesn't
1992                  * support interrupts
1993                  */
1994                 if (internals->slaves[i].link_status_poll_enabled)
1995                         internals->link_status_polling_enabled = 1;
1996         }
1997
1998         /* start polling if needed */
1999         if (internals->link_status_polling_enabled) {
2000                 rte_eal_alarm_set(
2001                         internals->link_status_polling_interval_ms * 1000,
2002                         bond_ethdev_slave_link_status_change_monitor,
2003                         (void *)&rte_eth_devices[internals->port_id]);
2004         }
2005
2006         /* Update all slave devices MACs*/
2007         if (mac_address_slaves_update(eth_dev) != 0)
2008                 goto out_err;
2009
2010         if (internals->user_defined_primary_port)
2011                 bond_ethdev_primary_set(internals, internals->primary_port);
2012
2013         if (internals->mode == BONDING_MODE_8023AD)
2014                 bond_mode_8023ad_start(eth_dev);
2015
2016         if (internals->mode == BONDING_MODE_TLB ||
2017                         internals->mode == BONDING_MODE_ALB)
2018                 bond_tlb_enable(internals);
2019
2020         return 0;
2021
2022 out_err:
2023         eth_dev->data->dev_started = 0;
2024         return -1;
2025 }
2026
2027 static void
2028 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2029 {
2030         uint16_t i;
2031
2032         if (dev->data->rx_queues != NULL) {
2033                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2034                         rte_free(dev->data->rx_queues[i]);
2035                         dev->data->rx_queues[i] = NULL;
2036                 }
2037                 dev->data->nb_rx_queues = 0;
2038         }
2039
2040         if (dev->data->tx_queues != NULL) {
2041                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2042                         rte_free(dev->data->tx_queues[i]);
2043                         dev->data->tx_queues[i] = NULL;
2044                 }
2045                 dev->data->nb_tx_queues = 0;
2046         }
2047 }
2048
2049 void
2050 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2051 {
2052         struct bond_dev_private *internals = eth_dev->data->dev_private;
2053         uint16_t i;
2054
2055         if (internals->mode == BONDING_MODE_8023AD) {
2056                 struct port *port;
2057                 void *pkt = NULL;
2058
2059                 bond_mode_8023ad_stop(eth_dev);
2060
2061                 /* Discard all messages to/from mode 4 state machines */
2062                 for (i = 0; i < internals->active_slave_count; i++) {
2063                         port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2064
2065                         RTE_ASSERT(port->rx_ring != NULL);
2066                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2067                                 rte_pktmbuf_free(pkt);
2068
2069                         RTE_ASSERT(port->tx_ring != NULL);
2070                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2071                                 rte_pktmbuf_free(pkt);
2072                 }
2073         }
2074
2075         if (internals->mode == BONDING_MODE_TLB ||
2076                         internals->mode == BONDING_MODE_ALB) {
2077                 bond_tlb_disable(internals);
2078                 for (i = 0; i < internals->active_slave_count; i++)
2079                         tlb_last_obytets[internals->active_slaves[i]] = 0;
2080         }
2081
2082         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2083         eth_dev->data->dev_started = 0;
2084
2085         internals->link_status_polling_enabled = 0;
2086         for (i = 0; i < internals->slave_count; i++) {
2087                 uint16_t slave_id = internals->slaves[i].port_id;
2088                 if (find_slave_by_id(internals->active_slaves,
2089                                 internals->active_slave_count, slave_id) !=
2090                                                 internals->active_slave_count) {
2091                         internals->slaves[i].last_link_status = 0;
2092                         rte_eth_dev_stop(slave_id);
2093                         deactivate_slave(eth_dev, slave_id);
2094                 }
2095         }
2096 }
2097
2098 int
2099 bond_ethdev_close(struct rte_eth_dev *dev)
2100 {
2101         struct bond_dev_private *internals = dev->data->dev_private;
2102         uint16_t bond_port_id = internals->port_id;
2103         int skipped = 0;
2104         struct rte_flow_error ferror;
2105
2106         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2107                 return 0;
2108
2109         RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2110         while (internals->slave_count != skipped) {
2111                 uint16_t port_id = internals->slaves[skipped].port_id;
2112
2113                 rte_eth_dev_stop(port_id);
2114
2115                 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2116                         RTE_BOND_LOG(ERR,
2117                                      "Failed to remove port %d from bonded device %s",
2118                                      port_id, dev->device->name);
2119                         skipped++;
2120                 }
2121         }
2122         bond_flow_ops.flush(dev, &ferror);
2123         bond_ethdev_free_queues(dev);
2124         rte_bitmap_reset(internals->vlan_filter_bmp);
2125         rte_bitmap_free(internals->vlan_filter_bmp);
2126         rte_free(internals->vlan_filter_bmpmem);
2127
2128         /* Try to release mempool used in mode6. If the bond
2129          * device is not mode6, free the NULL is not problem.
2130          */
2131         rte_mempool_free(internals->mode6.mempool);
2132
2133         dev->dev_ops = NULL;
2134         dev->rx_pkt_burst = NULL;
2135         dev->tx_pkt_burst = NULL;
2136
2137         return 0;
2138 }
2139
2140 /* forward declaration */
2141 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2142
2143 static int
2144 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2145 {
2146         struct bond_dev_private *internals = dev->data->dev_private;
2147         struct bond_slave_details slave;
2148         int ret;
2149
2150         uint16_t max_nb_rx_queues = UINT16_MAX;
2151         uint16_t max_nb_tx_queues = UINT16_MAX;
2152         uint16_t max_rx_desc_lim = UINT16_MAX;
2153         uint16_t max_tx_desc_lim = UINT16_MAX;
2154
2155         dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2156
2157         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2158                         internals->candidate_max_rx_pktlen :
2159                         RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2160
2161         /* Max number of tx/rx queues that the bonded device can support is the
2162          * minimum values of the bonded slaves, as all slaves must be capable
2163          * of supporting the same number of tx/rx queues.
2164          */
2165         if (internals->slave_count > 0) {
2166                 struct rte_eth_dev_info slave_info;
2167                 uint16_t idx;
2168
2169                 for (idx = 0; idx < internals->slave_count; idx++) {
2170                         slave = internals->slaves[idx];
2171                         ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2172                         if (ret != 0) {
2173                                 RTE_BOND_LOG(ERR,
2174                                         "%s: Error during getting device (port %u) info: %s\n",
2175                                         __func__,
2176                                         slave.port_id,
2177                                         strerror(-ret));
2178
2179                                 return ret;
2180                         }
2181
2182                         if (slave_info.max_rx_queues < max_nb_rx_queues)
2183                                 max_nb_rx_queues = slave_info.max_rx_queues;
2184
2185                         if (slave_info.max_tx_queues < max_nb_tx_queues)
2186                                 max_nb_tx_queues = slave_info.max_tx_queues;
2187
2188                         if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2189                                 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2190
2191                         if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2192                                 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2193                 }
2194         }
2195
2196         dev_info->max_rx_queues = max_nb_rx_queues;
2197         dev_info->max_tx_queues = max_nb_tx_queues;
2198
2199         memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2200                sizeof(dev_info->default_rxconf));
2201         memcpy(&dev_info->default_txconf, &internals->default_txconf,
2202                sizeof(dev_info->default_txconf));
2203
2204         dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2205         dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2206
2207         /**
2208          * If dedicated hw queues enabled for link bonding device in LACP mode
2209          * then we need to reduce the maximum number of data path queues by 1.
2210          */
2211         if (internals->mode == BONDING_MODE_8023AD &&
2212                 internals->mode4.dedicated_queues.enabled == 1) {
2213                 dev_info->max_rx_queues--;
2214                 dev_info->max_tx_queues--;
2215         }
2216
2217         dev_info->min_rx_bufsize = 0;
2218
2219         dev_info->rx_offload_capa = internals->rx_offload_capa;
2220         dev_info->tx_offload_capa = internals->tx_offload_capa;
2221         dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2222         dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2223         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2224
2225         dev_info->reta_size = internals->reta_size;
2226
2227         return 0;
2228 }
2229
2230 static int
2231 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2232 {
2233         int res;
2234         uint16_t i;
2235         struct bond_dev_private *internals = dev->data->dev_private;
2236
2237         /* don't do this while a slave is being added */
2238         rte_spinlock_lock(&internals->lock);
2239
2240         if (on)
2241                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2242         else
2243                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2244
2245         for (i = 0; i < internals->slave_count; i++) {
2246                 uint16_t port_id = internals->slaves[i].port_id;
2247
2248                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2249                 if (res == ENOTSUP)
2250                         RTE_BOND_LOG(WARNING,
2251                                      "Setting VLAN filter on slave port %u not supported.",
2252                                      port_id);
2253         }
2254
2255         rte_spinlock_unlock(&internals->lock);
2256         return 0;
2257 }
2258
2259 static int
2260 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2261                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2262                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2263 {
2264         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2265                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2266                                         0, dev->data->numa_node);
2267         if (bd_rx_q == NULL)
2268                 return -1;
2269
2270         bd_rx_q->queue_id = rx_queue_id;
2271         bd_rx_q->dev_private = dev->data->dev_private;
2272
2273         bd_rx_q->nb_rx_desc = nb_rx_desc;
2274
2275         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2276         bd_rx_q->mb_pool = mb_pool;
2277
2278         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2279
2280         return 0;
2281 }
2282
2283 static int
2284 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2285                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2286                 const struct rte_eth_txconf *tx_conf)
2287 {
2288         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
2289                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2290                                         0, dev->data->numa_node);
2291
2292         if (bd_tx_q == NULL)
2293                 return -1;
2294
2295         bd_tx_q->queue_id = tx_queue_id;
2296         bd_tx_q->dev_private = dev->data->dev_private;
2297
2298         bd_tx_q->nb_tx_desc = nb_tx_desc;
2299         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2300
2301         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2302
2303         return 0;
2304 }
2305
2306 static void
2307 bond_ethdev_rx_queue_release(void *queue)
2308 {
2309         if (queue == NULL)
2310                 return;
2311
2312         rte_free(queue);
2313 }
2314
2315 static void
2316 bond_ethdev_tx_queue_release(void *queue)
2317 {
2318         if (queue == NULL)
2319                 return;
2320
2321         rte_free(queue);
2322 }
2323
2324 static void
2325 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2326 {
2327         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2328         struct bond_dev_private *internals;
2329
2330         /* Default value for polling slave found is true as we don't want to
2331          * disable the polling thread if we cannot get the lock */
2332         int i, polling_slave_found = 1;
2333
2334         if (cb_arg == NULL)
2335                 return;
2336
2337         bonded_ethdev = cb_arg;
2338         internals = bonded_ethdev->data->dev_private;
2339
2340         if (!bonded_ethdev->data->dev_started ||
2341                 !internals->link_status_polling_enabled)
2342                 return;
2343
2344         /* If device is currently being configured then don't check slaves link
2345          * status, wait until next period */
2346         if (rte_spinlock_trylock(&internals->lock)) {
2347                 if (internals->slave_count > 0)
2348                         polling_slave_found = 0;
2349
2350                 for (i = 0; i < internals->slave_count; i++) {
2351                         if (!internals->slaves[i].link_status_poll_enabled)
2352                                 continue;
2353
2354                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2355                         polling_slave_found = 1;
2356
2357                         /* Update slave link status */
2358                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2359                                         internals->slaves[i].link_status_wait_to_complete);
2360
2361                         /* if link status has changed since last checked then call lsc
2362                          * event callback */
2363                         if (slave_ethdev->data->dev_link.link_status !=
2364                                         internals->slaves[i].last_link_status) {
2365                                 internals->slaves[i].last_link_status =
2366                                                 slave_ethdev->data->dev_link.link_status;
2367
2368                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2369                                                 RTE_ETH_EVENT_INTR_LSC,
2370                                                 &bonded_ethdev->data->port_id,
2371                                                 NULL);
2372                         }
2373                 }
2374                 rte_spinlock_unlock(&internals->lock);
2375         }
2376
2377         if (polling_slave_found)
2378                 /* Set alarm to continue monitoring link status of slave ethdev's */
2379                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2380                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2381 }
2382
2383 static int
2384 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2385 {
2386         int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2387
2388         struct bond_dev_private *bond_ctx;
2389         struct rte_eth_link slave_link;
2390
2391         bool one_link_update_succeeded;
2392         uint32_t idx;
2393         int ret;
2394
2395         bond_ctx = ethdev->data->dev_private;
2396
2397         ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2398
2399         if (ethdev->data->dev_started == 0 ||
2400                         bond_ctx->active_slave_count == 0) {
2401                 ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
2402                 return 0;
2403         }
2404
2405         ethdev->data->dev_link.link_status = ETH_LINK_UP;
2406
2407         if (wait_to_complete)
2408                 link_update = rte_eth_link_get;
2409         else
2410                 link_update = rte_eth_link_get_nowait;
2411
2412         switch (bond_ctx->mode) {
2413         case BONDING_MODE_BROADCAST:
2414                 /**
2415                  * Setting link speed to UINT32_MAX to ensure we pick up the
2416                  * value of the first active slave
2417                  */
2418                 ethdev->data->dev_link.link_speed = UINT32_MAX;
2419
2420                 /**
2421                  * link speed is minimum value of all the slaves link speed as
2422                  * packet loss will occur on this slave if transmission at rates
2423                  * greater than this are attempted
2424                  */
2425                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2426                         ret = link_update(bond_ctx->active_slaves[idx],
2427                                           &slave_link);
2428                         if (ret < 0) {
2429                                 ethdev->data->dev_link.link_speed =
2430                                         ETH_SPEED_NUM_NONE;
2431                                 RTE_BOND_LOG(ERR,
2432                                         "Slave (port %u) link get failed: %s",
2433                                         bond_ctx->active_slaves[idx],
2434                                         rte_strerror(-ret));
2435                                 return 0;
2436                         }
2437
2438                         if (slave_link.link_speed <
2439                                         ethdev->data->dev_link.link_speed)
2440                                 ethdev->data->dev_link.link_speed =
2441                                                 slave_link.link_speed;
2442                 }
2443                 break;
2444         case BONDING_MODE_ACTIVE_BACKUP:
2445                 /* Current primary slave */
2446                 ret = link_update(bond_ctx->current_primary_port, &slave_link);
2447                 if (ret < 0) {
2448                         RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
2449                                 bond_ctx->current_primary_port,
2450                                 rte_strerror(-ret));
2451                         return 0;
2452                 }
2453
2454                 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2455                 break;
2456         case BONDING_MODE_8023AD:
2457                 ethdev->data->dev_link.link_autoneg =
2458                                 bond_ctx->mode4.slave_link.link_autoneg;
2459                 ethdev->data->dev_link.link_duplex =
2460                                 bond_ctx->mode4.slave_link.link_duplex;
2461                 /* fall through */
2462                 /* to update link speed */
2463         case BONDING_MODE_ROUND_ROBIN:
2464         case BONDING_MODE_BALANCE:
2465         case BONDING_MODE_TLB:
2466         case BONDING_MODE_ALB:
2467         default:
2468                 /**
2469                  * In theses mode the maximum theoretical link speed is the sum
2470                  * of all the slaves
2471                  */
2472                 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2473                 one_link_update_succeeded = false;
2474
2475                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2476                         ret = link_update(bond_ctx->active_slaves[idx],
2477                                         &slave_link);
2478                         if (ret < 0) {
2479                                 RTE_BOND_LOG(ERR,
2480                                         "Slave (port %u) link get failed: %s",
2481                                         bond_ctx->active_slaves[idx],
2482                                         rte_strerror(-ret));
2483                                 continue;
2484                         }
2485
2486                         one_link_update_succeeded = true;
2487                         ethdev->data->dev_link.link_speed +=
2488                                         slave_link.link_speed;
2489                 }
2490
2491                 if (!one_link_update_succeeded) {
2492                         RTE_BOND_LOG(ERR, "All slaves link get failed");
2493                         return 0;
2494                 }
2495         }
2496
2497
2498         return 0;
2499 }
2500
2501
2502 static int
2503 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2504 {
2505         struct bond_dev_private *internals = dev->data->dev_private;
2506         struct rte_eth_stats slave_stats;
2507         int i, j;
2508
2509         for (i = 0; i < internals->slave_count; i++) {
2510                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2511
2512                 stats->ipackets += slave_stats.ipackets;
2513                 stats->opackets += slave_stats.opackets;
2514                 stats->ibytes += slave_stats.ibytes;
2515                 stats->obytes += slave_stats.obytes;
2516                 stats->imissed += slave_stats.imissed;
2517                 stats->ierrors += slave_stats.ierrors;
2518                 stats->oerrors += slave_stats.oerrors;
2519                 stats->rx_nombuf += slave_stats.rx_nombuf;
2520
2521                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2522                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2523                         stats->q_opackets[j] += slave_stats.q_opackets[j];
2524                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2525                         stats->q_obytes[j] += slave_stats.q_obytes[j];
2526                         stats->q_errors[j] += slave_stats.q_errors[j];
2527                 }
2528
2529         }
2530
2531         return 0;
2532 }
2533
2534 static int
2535 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2536 {
2537         struct bond_dev_private *internals = dev->data->dev_private;
2538         int i;
2539         int err;
2540         int ret;
2541
2542         for (i = 0, err = 0; i < internals->slave_count; i++) {
2543                 ret = rte_eth_stats_reset(internals->slaves[i].port_id);
2544                 if (ret != 0)
2545                         err = ret;
2546         }
2547
2548         return err;
2549 }
2550
2551 static int
2552 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2553 {
2554         struct bond_dev_private *internals = eth_dev->data->dev_private;
2555         int i;
2556         int ret = 0;
2557         uint16_t port_id;
2558
2559         switch (internals->mode) {
2560         /* Promiscuous mode is propagated to all slaves */
2561         case BONDING_MODE_ROUND_ROBIN:
2562         case BONDING_MODE_BALANCE:
2563         case BONDING_MODE_BROADCAST:
2564         case BONDING_MODE_8023AD: {
2565                 unsigned int slave_ok = 0;
2566
2567                 for (i = 0; i < internals->slave_count; i++) {
2568                         port_id = internals->slaves[i].port_id;
2569
2570                         ret = rte_eth_promiscuous_enable(port_id);
2571                         if (ret != 0)
2572                                 RTE_BOND_LOG(ERR,
2573                                         "Failed to enable promiscuous mode for port %u: %s",
2574                                         port_id, rte_strerror(-ret));
2575                         else
2576                                 slave_ok++;
2577                 }
2578                 /*
2579                  * Report success if operation is successful on at least
2580                  * on one slave. Otherwise return last error code.
2581                  */
2582                 if (slave_ok > 0)
2583                         ret = 0;
2584                 break;
2585         }
2586         /* Promiscuous mode is propagated only to primary slave */
2587         case BONDING_MODE_ACTIVE_BACKUP:
2588         case BONDING_MODE_TLB:
2589         case BONDING_MODE_ALB:
2590         default:
2591                 /* Do not touch promisc when there cannot be primary ports */
2592                 if (internals->slave_count == 0)
2593                         break;
2594                 port_id = internals->current_primary_port;
2595                 ret = rte_eth_promiscuous_enable(port_id);
2596                 if (ret != 0)
2597                         RTE_BOND_LOG(ERR,
2598                                 "Failed to enable promiscuous mode for port %u: %s",
2599                                 port_id, rte_strerror(-ret));
2600         }
2601
2602         return ret;
2603 }
2604
2605 static int
2606 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2607 {
2608         struct bond_dev_private *internals = dev->data->dev_private;
2609         int i;
2610         int ret = 0;
2611         uint16_t port_id;
2612
2613         switch (internals->mode) {
2614         /* Promiscuous mode is propagated to all slaves */
2615         case BONDING_MODE_ROUND_ROBIN:
2616         case BONDING_MODE_BALANCE:
2617         case BONDING_MODE_BROADCAST:
2618         case BONDING_MODE_8023AD: {
2619                 unsigned int slave_ok = 0;
2620
2621                 for (i = 0; i < internals->slave_count; i++) {
2622                         port_id = internals->slaves[i].port_id;
2623
2624                         if (internals->mode == BONDING_MODE_8023AD &&
2625                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2626                                         BOND_8023AD_FORCED_PROMISC) {
2627                                 slave_ok++;
2628                                 continue;
2629                         }
2630                         ret = rte_eth_promiscuous_disable(port_id);
2631                         if (ret != 0)
2632                                 RTE_BOND_LOG(ERR,
2633                                         "Failed to disable promiscuous mode for port %u: %s",
2634                                         port_id, rte_strerror(-ret));
2635                         else
2636                                 slave_ok++;
2637                 }
2638                 /*
2639                  * Report success if operation is successful on at least
2640                  * on one slave. Otherwise return last error code.
2641                  */
2642                 if (slave_ok > 0)
2643                         ret = 0;
2644                 break;
2645         }
2646         /* Promiscuous mode is propagated only to primary slave */
2647         case BONDING_MODE_ACTIVE_BACKUP:
2648         case BONDING_MODE_TLB:
2649         case BONDING_MODE_ALB:
2650         default:
2651                 /* Do not touch promisc when there cannot be primary ports */
2652                 if (internals->slave_count == 0)
2653                         break;
2654                 port_id = internals->current_primary_port;
2655                 ret = rte_eth_promiscuous_disable(port_id);
2656                 if (ret != 0)
2657                         RTE_BOND_LOG(ERR,
2658                                 "Failed to disable promiscuous mode for port %u: %s",
2659                                 port_id, rte_strerror(-ret));
2660         }
2661
2662         return ret;
2663 }
2664
2665 static int
2666 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2667 {
2668         struct bond_dev_private *internals = eth_dev->data->dev_private;
2669         int i;
2670         int ret = 0;
2671         uint16_t port_id;
2672
2673         switch (internals->mode) {
2674         /* allmulti mode is propagated to all slaves */
2675         case BONDING_MODE_ROUND_ROBIN:
2676         case BONDING_MODE_BALANCE:
2677         case BONDING_MODE_BROADCAST:
2678         case BONDING_MODE_8023AD: {
2679                 unsigned int slave_ok = 0;
2680
2681                 for (i = 0; i < internals->slave_count; i++) {
2682                         port_id = internals->slaves[i].port_id;
2683
2684                         ret = rte_eth_allmulticast_enable(port_id);
2685                         if (ret != 0)
2686                                 RTE_BOND_LOG(ERR,
2687                                         "Failed to enable allmulti mode for port %u: %s",
2688                                         port_id, rte_strerror(-ret));
2689                         else
2690                                 slave_ok++;
2691                 }
2692                 /*
2693                  * Report success if operation is successful on at least
2694                  * on one slave. Otherwise return last error code.
2695                  */
2696                 if (slave_ok > 0)
2697                         ret = 0;
2698                 break;
2699         }
2700         /* allmulti mode is propagated only to primary slave */
2701         case BONDING_MODE_ACTIVE_BACKUP:
2702         case BONDING_MODE_TLB:
2703         case BONDING_MODE_ALB:
2704         default:
2705                 /* Do not touch allmulti when there cannot be primary ports */
2706                 if (internals->slave_count == 0)
2707                         break;
2708                 port_id = internals->current_primary_port;
2709                 ret = rte_eth_allmulticast_enable(port_id);
2710                 if (ret != 0)
2711                         RTE_BOND_LOG(ERR,
2712                                 "Failed to enable allmulti mode for port %u: %s",
2713                                 port_id, rte_strerror(-ret));
2714         }
2715
2716         return ret;
2717 }
2718
2719 static int
2720 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2721 {
2722         struct bond_dev_private *internals = eth_dev->data->dev_private;
2723         int i;
2724         int ret = 0;
2725         uint16_t port_id;
2726
2727         switch (internals->mode) {
2728         /* allmulti mode is propagated to all slaves */
2729         case BONDING_MODE_ROUND_ROBIN:
2730         case BONDING_MODE_BALANCE:
2731         case BONDING_MODE_BROADCAST:
2732         case BONDING_MODE_8023AD: {
2733                 unsigned int slave_ok = 0;
2734
2735                 for (i = 0; i < internals->slave_count; i++) {
2736                         uint16_t port_id = internals->slaves[i].port_id;
2737
2738                         if (internals->mode == BONDING_MODE_8023AD &&
2739                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2740                                         BOND_8023AD_FORCED_ALLMULTI)
2741                                 continue;
2742
2743                         ret = rte_eth_allmulticast_disable(port_id);
2744                         if (ret != 0)
2745                                 RTE_BOND_LOG(ERR,
2746                                         "Failed to disable allmulti mode for port %u: %s",
2747                                         port_id, rte_strerror(-ret));
2748                         else
2749                                 slave_ok++;
2750                 }
2751                 /*
2752                  * Report success if operation is successful on at least
2753                  * on one slave. Otherwise return last error code.
2754                  */
2755                 if (slave_ok > 0)
2756                         ret = 0;
2757                 break;
2758         }
2759         /* allmulti mode is propagated only to primary slave */
2760         case BONDING_MODE_ACTIVE_BACKUP:
2761         case BONDING_MODE_TLB:
2762         case BONDING_MODE_ALB:
2763         default:
2764                 /* Do not touch allmulti when there cannot be primary ports */
2765                 if (internals->slave_count == 0)
2766                         break;
2767                 port_id = internals->current_primary_port;
2768                 ret = rte_eth_allmulticast_disable(port_id);
2769                 if (ret != 0)
2770                         RTE_BOND_LOG(ERR,
2771                                 "Failed to disable allmulti mode for port %u: %s",
2772                                 port_id, rte_strerror(-ret));
2773         }
2774
2775         return ret;
2776 }
2777
2778 static void
2779 bond_ethdev_delayed_lsc_propagation(void *arg)
2780 {
2781         if (arg == NULL)
2782                 return;
2783
2784         rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2785                         RTE_ETH_EVENT_INTR_LSC, NULL);
2786 }
2787
2788 int
2789 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2790                 void *param, void *ret_param __rte_unused)
2791 {
2792         struct rte_eth_dev *bonded_eth_dev;
2793         struct bond_dev_private *internals;
2794         struct rte_eth_link link;
2795         int rc = -1;
2796         int ret;
2797
2798         uint8_t lsc_flag = 0;
2799         int valid_slave = 0;
2800         uint16_t active_pos;
2801         uint16_t i;
2802
2803         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2804                 return rc;
2805
2806         bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2807
2808         if (check_for_bonded_ethdev(bonded_eth_dev))
2809                 return rc;
2810
2811         internals = bonded_eth_dev->data->dev_private;
2812
2813         /* If the device isn't started don't handle interrupts */
2814         if (!bonded_eth_dev->data->dev_started)
2815                 return rc;
2816
2817         /* verify that port_id is a valid slave of bonded port */
2818         for (i = 0; i < internals->slave_count; i++) {
2819                 if (internals->slaves[i].port_id == port_id) {
2820                         valid_slave = 1;
2821                         break;
2822                 }
2823         }
2824
2825         if (!valid_slave)
2826                 return rc;
2827
2828         /* Synchronize lsc callback parallel calls either by real link event
2829          * from the slaves PMDs or by the bonding PMD itself.
2830          */
2831         rte_spinlock_lock(&internals->lsc_lock);
2832
2833         /* Search for port in active port list */
2834         active_pos = find_slave_by_id(internals->active_slaves,
2835                         internals->active_slave_count, port_id);
2836
2837         ret = rte_eth_link_get_nowait(port_id, &link);
2838         if (ret < 0)
2839                 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
2840
2841         if (ret == 0 && link.link_status) {
2842                 if (active_pos < internals->active_slave_count)
2843                         goto link_update;
2844
2845                 /* check link state properties if bonded link is up*/
2846                 if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
2847                         if (link_properties_valid(bonded_eth_dev, &link) != 0)
2848                                 RTE_BOND_LOG(ERR, "Invalid link properties "
2849                                              "for slave %d in bonding mode %d",
2850                                              port_id, internals->mode);
2851                 } else {
2852                         /* inherit slave link properties */
2853                         link_properties_set(bonded_eth_dev, &link);
2854                 }
2855
2856                 /* If no active slave ports then set this port to be
2857                  * the primary port.
2858                  */
2859                 if (internals->active_slave_count < 1) {
2860                         /* If first active slave, then change link status */
2861                         bonded_eth_dev->data->dev_link.link_status =
2862                                                                 ETH_LINK_UP;
2863                         internals->current_primary_port = port_id;
2864                         lsc_flag = 1;
2865
2866                         mac_address_slaves_update(bonded_eth_dev);
2867                 }
2868
2869                 activate_slave(bonded_eth_dev, port_id);
2870
2871                 /* If the user has defined the primary port then default to
2872                  * using it.
2873                  */
2874                 if (internals->user_defined_primary_port &&
2875                                 internals->primary_port == port_id)
2876                         bond_ethdev_primary_set(internals, port_id);
2877         } else {
2878                 if (active_pos == internals->active_slave_count)
2879                         goto link_update;
2880
2881                 /* Remove from active slave list */
2882                 deactivate_slave(bonded_eth_dev, port_id);
2883
2884                 if (internals->active_slave_count < 1)
2885                         lsc_flag = 1;
2886
2887                 /* Update primary id, take first active slave from list or if none
2888                  * available set to -1 */
2889                 if (port_id == internals->current_primary_port) {
2890                         if (internals->active_slave_count > 0)
2891                                 bond_ethdev_primary_set(internals,
2892                                                 internals->active_slaves[0]);
2893                         else
2894                                 internals->current_primary_port = internals->primary_port;
2895                         mac_address_slaves_update(bonded_eth_dev);
2896                 }
2897         }
2898
2899 link_update:
2900         /**
2901          * Update bonded device link properties after any change to active
2902          * slaves
2903          */
2904         bond_ethdev_link_update(bonded_eth_dev, 0);
2905
2906         if (lsc_flag) {
2907                 /* Cancel any possible outstanding interrupts if delays are enabled */
2908                 if (internals->link_up_delay_ms > 0 ||
2909                         internals->link_down_delay_ms > 0)
2910                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2911                                         bonded_eth_dev);
2912
2913                 if (bonded_eth_dev->data->dev_link.link_status) {
2914                         if (internals->link_up_delay_ms > 0)
2915                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2916                                                 bond_ethdev_delayed_lsc_propagation,
2917                                                 (void *)bonded_eth_dev);
2918                         else
2919                                 rte_eth_dev_callback_process(bonded_eth_dev,
2920                                                 RTE_ETH_EVENT_INTR_LSC,
2921                                                 NULL);
2922
2923                 } else {
2924                         if (internals->link_down_delay_ms > 0)
2925                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2926                                                 bond_ethdev_delayed_lsc_propagation,
2927                                                 (void *)bonded_eth_dev);
2928                         else
2929                                 rte_eth_dev_callback_process(bonded_eth_dev,
2930                                                 RTE_ETH_EVENT_INTR_LSC,
2931                                                 NULL);
2932                 }
2933         }
2934
2935         rte_spinlock_unlock(&internals->lsc_lock);
2936
2937         return rc;
2938 }
2939
2940 static int
2941 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2942                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2943 {
2944         unsigned i, j;
2945         int result = 0;
2946         int slave_reta_size;
2947         unsigned reta_count;
2948         struct bond_dev_private *internals = dev->data->dev_private;
2949
2950         if (reta_size != internals->reta_size)
2951                 return -EINVAL;
2952
2953          /* Copy RETA table */
2954         reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
2955                         RTE_RETA_GROUP_SIZE;
2956
2957         for (i = 0; i < reta_count; i++) {
2958                 internals->reta_conf[i].mask = reta_conf[i].mask;
2959                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2960                         if ((reta_conf[i].mask >> j) & 0x01)
2961                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2962         }
2963
2964         /* Fill rest of array */
2965         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2966                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2967                                 sizeof(internals->reta_conf[0]) * reta_count);
2968
2969         /* Propagate RETA over slaves */
2970         for (i = 0; i < internals->slave_count; i++) {
2971                 slave_reta_size = internals->slaves[i].reta_size;
2972                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2973                                 &internals->reta_conf[0], slave_reta_size);
2974                 if (result < 0)
2975                         return result;
2976         }
2977
2978         return 0;
2979 }
2980
2981 static int
2982 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2983                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2984 {
2985         int i, j;
2986         struct bond_dev_private *internals = dev->data->dev_private;
2987
2988         if (reta_size != internals->reta_size)
2989                 return -EINVAL;
2990
2991          /* Copy RETA table */
2992         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2993                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2994                         if ((reta_conf[i].mask >> j) & 0x01)
2995                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2996
2997         return 0;
2998 }
2999
3000 static int
3001 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
3002                 struct rte_eth_rss_conf *rss_conf)
3003 {
3004         int i, result = 0;
3005         struct bond_dev_private *internals = dev->data->dev_private;
3006         struct rte_eth_rss_conf bond_rss_conf;
3007
3008         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
3009
3010         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
3011
3012         if (bond_rss_conf.rss_hf != 0)
3013                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
3014
3015         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
3016                         sizeof(internals->rss_key)) {
3017                 if (bond_rss_conf.rss_key_len == 0)
3018                         bond_rss_conf.rss_key_len = 40;
3019                 internals->rss_key_len = bond_rss_conf.rss_key_len;
3020                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
3021                                 internals->rss_key_len);
3022         }
3023
3024         for (i = 0; i < internals->slave_count; i++) {
3025                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
3026                                 &bond_rss_conf);
3027                 if (result < 0)
3028                         return result;
3029         }
3030
3031         return 0;
3032 }
3033
3034 static int
3035 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3036                 struct rte_eth_rss_conf *rss_conf)
3037 {
3038         struct bond_dev_private *internals = dev->data->dev_private;
3039
3040         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3041         rss_conf->rss_key_len = internals->rss_key_len;
3042         if (rss_conf->rss_key)
3043                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3044
3045         return 0;
3046 }
3047
3048 static int
3049 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3050 {
3051         struct rte_eth_dev *slave_eth_dev;
3052         struct bond_dev_private *internals = dev->data->dev_private;
3053         int ret, i;
3054
3055         rte_spinlock_lock(&internals->lock);
3056
3057         for (i = 0; i < internals->slave_count; i++) {
3058                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3059                 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
3060                         rte_spinlock_unlock(&internals->lock);
3061                         return -ENOTSUP;
3062                 }
3063         }
3064         for (i = 0; i < internals->slave_count; i++) {
3065                 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
3066                 if (ret < 0) {
3067                         rte_spinlock_unlock(&internals->lock);
3068                         return ret;
3069                 }
3070         }
3071
3072         rte_spinlock_unlock(&internals->lock);
3073         return 0;
3074 }
3075
3076 static int
3077 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
3078                         struct rte_ether_addr *addr)
3079 {
3080         if (mac_address_set(dev, addr)) {
3081                 RTE_BOND_LOG(ERR, "Failed to update MAC address");
3082                 return -EINVAL;
3083         }
3084
3085         return 0;
3086 }
3087
3088 static int
3089 bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
3090                  enum rte_filter_type type, enum rte_filter_op op, void *arg)
3091 {
3092         if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
3093                 *(const void **)arg = &bond_flow_ops;
3094                 return 0;
3095         }
3096         return -ENOTSUP;
3097 }
3098
3099 static int
3100 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
3101                         struct rte_ether_addr *mac_addr,
3102                         __rte_unused uint32_t index, uint32_t vmdq)
3103 {
3104         struct rte_eth_dev *slave_eth_dev;
3105         struct bond_dev_private *internals = dev->data->dev_private;
3106         int ret, i;
3107
3108         rte_spinlock_lock(&internals->lock);
3109
3110         for (i = 0; i < internals->slave_count; i++) {
3111                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3112                 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
3113                          *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
3114                         ret = -ENOTSUP;
3115                         goto end;
3116                 }
3117         }
3118
3119         for (i = 0; i < internals->slave_count; i++) {
3120                 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
3121                                 mac_addr, vmdq);
3122                 if (ret < 0) {
3123                         /* rollback */
3124                         for (i--; i >= 0; i--)
3125                                 rte_eth_dev_mac_addr_remove(
3126                                         internals->slaves[i].port_id, mac_addr);
3127                         goto end;
3128                 }
3129         }
3130
3131         ret = 0;
3132 end:
3133         rte_spinlock_unlock(&internals->lock);
3134         return ret;
3135 }
3136
3137 static void
3138 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3139 {
3140         struct rte_eth_dev *slave_eth_dev;
3141         struct bond_dev_private *internals = dev->data->dev_private;
3142         int i;
3143
3144         rte_spinlock_lock(&internals->lock);
3145
3146         for (i = 0; i < internals->slave_count; i++) {
3147                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3148                 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3149                         goto end;
3150         }
3151
3152         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3153
3154         for (i = 0; i < internals->slave_count; i++)
3155                 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3156                                 mac_addr);
3157
3158 end:
3159         rte_spinlock_unlock(&internals->lock);
3160 }
3161
3162 const struct eth_dev_ops default_dev_ops = {
3163         .dev_start            = bond_ethdev_start,
3164         .dev_stop             = bond_ethdev_stop,
3165         .dev_close            = bond_ethdev_close,
3166         .dev_configure        = bond_ethdev_configure,
3167         .dev_infos_get        = bond_ethdev_info,
3168         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
3169         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
3170         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
3171         .rx_queue_release     = bond_ethdev_rx_queue_release,
3172         .tx_queue_release     = bond_ethdev_tx_queue_release,
3173         .link_update          = bond_ethdev_link_update,
3174         .stats_get            = bond_ethdev_stats_get,
3175         .stats_reset          = bond_ethdev_stats_reset,
3176         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
3177         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
3178         .allmulticast_enable  = bond_ethdev_allmulticast_enable,
3179         .allmulticast_disable = bond_ethdev_allmulticast_disable,
3180         .reta_update          = bond_ethdev_rss_reta_update,
3181         .reta_query           = bond_ethdev_rss_reta_query,
3182         .rss_hash_update      = bond_ethdev_rss_hash_update,
3183         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get,
3184         .mtu_set              = bond_ethdev_mtu_set,
3185         .mac_addr_set         = bond_ethdev_mac_address_set,
3186         .mac_addr_add         = bond_ethdev_mac_addr_add,
3187         .mac_addr_remove      = bond_ethdev_mac_addr_remove,
3188         .filter_ctrl          = bond_filter_ctrl
3189 };
3190
3191 static int
3192 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3193 {
3194         const char *name = rte_vdev_device_name(dev);
3195         uint8_t socket_id = dev->device.numa_node;
3196         struct bond_dev_private *internals = NULL;
3197         struct rte_eth_dev *eth_dev = NULL;
3198         uint32_t vlan_filter_bmp_size;
3199
3200         /* now do all data allocation - for eth_dev structure, dummy pci driver
3201          * and internal (private) data
3202          */
3203
3204         /* reserve an ethdev entry */
3205         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3206         if (eth_dev == NULL) {
3207                 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3208                 goto err;
3209         }
3210
3211         internals = eth_dev->data->dev_private;
3212         eth_dev->data->nb_rx_queues = (uint16_t)1;
3213         eth_dev->data->nb_tx_queues = (uint16_t)1;
3214
3215         /* Allocate memory for storing MAC addresses */
3216         eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3217                         BOND_MAX_MAC_ADDRS, 0, socket_id);
3218         if (eth_dev->data->mac_addrs == NULL) {
3219                 RTE_BOND_LOG(ERR,
3220                              "Failed to allocate %u bytes needed to store MAC addresses",
3221                              RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3222                 goto err;
3223         }
3224
3225         eth_dev->dev_ops = &default_dev_ops;
3226         eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
3227
3228         rte_spinlock_init(&internals->lock);
3229         rte_spinlock_init(&internals->lsc_lock);
3230
3231         internals->port_id = eth_dev->data->port_id;
3232         internals->mode = BONDING_MODE_INVALID;
3233         internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3234         internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3235         internals->burst_xmit_hash = burst_xmit_l2_hash;
3236         internals->user_defined_mac = 0;
3237
3238         internals->link_status_polling_enabled = 0;
3239
3240         internals->link_status_polling_interval_ms =
3241                 DEFAULT_POLLING_INTERVAL_10_MS;
3242         internals->link_down_delay_ms = 0;
3243         internals->link_up_delay_ms = 0;
3244
3245         internals->slave_count = 0;
3246         internals->active_slave_count = 0;
3247         internals->rx_offload_capa = 0;
3248         internals->tx_offload_capa = 0;
3249         internals->rx_queue_offload_capa = 0;
3250         internals->tx_queue_offload_capa = 0;
3251         internals->candidate_max_rx_pktlen = 0;
3252         internals->max_rx_pktlen = 0;
3253
3254         /* Initially allow to choose any offload type */
3255         internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
3256
3257         memset(&internals->default_rxconf, 0,
3258                sizeof(internals->default_rxconf));
3259         memset(&internals->default_txconf, 0,
3260                sizeof(internals->default_txconf));
3261
3262         memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3263         memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3264
3265         memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3266         memset(internals->slaves, 0, sizeof(internals->slaves));
3267
3268         TAILQ_INIT(&internals->flow_list);
3269         internals->flow_isolated_valid = 0;
3270
3271         /* Set mode 4 default configuration */
3272         bond_mode_8023ad_setup(eth_dev, NULL);
3273         if (bond_ethdev_mode_set(eth_dev, mode)) {
3274                 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
3275                                  eth_dev->data->port_id, mode);
3276                 goto err;
3277         }
3278
3279         vlan_filter_bmp_size =
3280                 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3281         internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3282                                                    RTE_CACHE_LINE_SIZE);
3283         if (internals->vlan_filter_bmpmem == NULL) {
3284                 RTE_BOND_LOG(ERR,
3285                              "Failed to allocate vlan bitmap for bonded device %u",
3286                              eth_dev->data->port_id);
3287                 goto err;
3288         }
3289
3290         internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3291                         internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3292         if (internals->vlan_filter_bmp == NULL) {
3293                 RTE_BOND_LOG(ERR,
3294                              "Failed to init vlan bitmap for bonded device %u",
3295                              eth_dev->data->port_id);
3296                 rte_free(internals->vlan_filter_bmpmem);
3297                 goto err;
3298         }
3299
3300         return eth_dev->data->port_id;
3301
3302 err:
3303         rte_free(internals);
3304         if (eth_dev != NULL)
3305                 eth_dev->data->dev_private = NULL;
3306         rte_eth_dev_release_port(eth_dev);
3307         return -1;
3308 }
3309
3310 static int
3311 bond_probe(struct rte_vdev_device *dev)
3312 {
3313         const char *name;
3314         struct bond_dev_private *internals;
3315         struct rte_kvargs *kvlist;
3316         uint8_t bonding_mode, socket_id/*, agg_mode*/;
3317         int  arg_count, port_id;
3318         uint8_t agg_mode;
3319         struct rte_eth_dev *eth_dev;
3320
3321         if (!dev)
3322                 return -EINVAL;
3323
3324         name = rte_vdev_device_name(dev);
3325         RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3326
3327         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3328                 eth_dev = rte_eth_dev_attach_secondary(name);
3329                 if (!eth_dev) {
3330                         RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3331                         return -1;
3332                 }
3333                 /* TODO: request info from primary to set up Rx and Tx */
3334                 eth_dev->dev_ops = &default_dev_ops;
3335                 eth_dev->device = &dev->device;
3336                 rte_eth_dev_probing_finish(eth_dev);
3337                 return 0;
3338         }
3339
3340         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3341                 pmd_bond_init_valid_arguments);
3342         if (kvlist == NULL)
3343                 return -1;
3344
3345         /* Parse link bonding mode */
3346         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3347                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3348                                 &bond_ethdev_parse_slave_mode_kvarg,
3349                                 &bonding_mode) != 0) {
3350                         RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3351                                         name);
3352                         goto parse_error;
3353                 }
3354         } else {
3355                 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3356                                 "device %s", name);
3357                 goto parse_error;
3358         }
3359
3360         /* Parse socket id to create bonding device on */
3361         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3362         if (arg_count == 1) {
3363                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3364                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3365                                 != 0) {
3366                         RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3367                                         "bonded device %s", name);
3368                         goto parse_error;
3369                 }
3370         } else if (arg_count > 1) {
3371                 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3372                                 "bonded device %s", name);
3373                 goto parse_error;
3374         } else {
3375                 socket_id = rte_socket_id();
3376         }
3377
3378         dev->device.numa_node = socket_id;
3379
3380         /* Create link bonding eth device */
3381         port_id = bond_alloc(dev, bonding_mode);
3382         if (port_id < 0) {
3383                 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3384                                 "socket %u.",   name, bonding_mode, socket_id);
3385                 goto parse_error;
3386         }
3387         internals = rte_eth_devices[port_id].data->dev_private;
3388         internals->kvlist = kvlist;
3389
3390         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3391                 if (rte_kvargs_process(kvlist,
3392                                 PMD_BOND_AGG_MODE_KVARG,
3393                                 &bond_ethdev_parse_slave_agg_mode_kvarg,
3394                                 &agg_mode) != 0) {
3395                         RTE_BOND_LOG(ERR,
3396                                         "Failed to parse agg selection mode for bonded device %s",
3397                                         name);
3398                         goto parse_error;
3399                 }
3400
3401                 if (internals->mode == BONDING_MODE_8023AD)
3402                         internals->mode4.agg_selection = agg_mode;
3403         } else {
3404                 internals->mode4.agg_selection = AGG_STABLE;
3405         }
3406
3407         rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3408         RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3409                         "socket %u.",   name, port_id, bonding_mode, socket_id);
3410         return 0;
3411
3412 parse_error:
3413         rte_kvargs_free(kvlist);
3414
3415         return -1;
3416 }
3417
3418 static int
3419 bond_remove(struct rte_vdev_device *dev)
3420 {
3421         struct rte_eth_dev *eth_dev;
3422         struct bond_dev_private *internals;
3423         const char *name;
3424
3425         if (!dev)
3426                 return -EINVAL;
3427
3428         name = rte_vdev_device_name(dev);
3429         RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3430
3431         /* find an ethdev entry */
3432         eth_dev = rte_eth_dev_allocated(name);
3433         if (eth_dev == NULL)
3434                 return 0; /* port already released */
3435
3436         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3437                 return rte_eth_dev_release_port(eth_dev);
3438
3439         RTE_ASSERT(eth_dev->device == &dev->device);
3440
3441         internals = eth_dev->data->dev_private;
3442         if (internals->slave_count != 0)
3443                 return -EBUSY;
3444
3445         if (eth_dev->data->dev_started == 1) {
3446                 bond_ethdev_stop(eth_dev);
3447                 bond_ethdev_close(eth_dev);
3448         }
3449         rte_eth_dev_release_port(eth_dev);
3450
3451         return 0;
3452 }
3453
3454 /* this part will resolve the slave portids after all the other pdev and vdev
3455  * have been allocated */
3456 static int
3457 bond_ethdev_configure(struct rte_eth_dev *dev)
3458 {
3459         const char *name = dev->device->name;
3460         struct bond_dev_private *internals = dev->data->dev_private;
3461         struct rte_kvargs *kvlist = internals->kvlist;
3462         int arg_count;
3463         uint16_t port_id = dev - rte_eth_devices;
3464         uint8_t agg_mode;
3465
3466         static const uint8_t default_rss_key[40] = {
3467                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3468                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3469                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3470                 0xBE, 0xAC, 0x01, 0xFA
3471         };
3472
3473         unsigned i, j;
3474
3475         /*
3476          * If RSS is enabled, fill table with default values and
3477          * set key to the the value specified in port RSS configuration.
3478          * Fall back to default RSS key if the key is not specified
3479          */
3480         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
3481                 if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
3482                         internals->rss_key_len =
3483                                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
3484                         memcpy(internals->rss_key,
3485                                dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
3486                                internals->rss_key_len);
3487                 } else {
3488                         internals->rss_key_len = sizeof(default_rss_key);
3489                         memcpy(internals->rss_key, default_rss_key,
3490                                internals->rss_key_len);
3491                 }
3492
3493                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3494                         internals->reta_conf[i].mask = ~0LL;
3495                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3496                                 internals->reta_conf[i].reta[j] =
3497                                                 (i * RTE_RETA_GROUP_SIZE + j) %
3498                                                 dev->data->nb_rx_queues;
3499                 }
3500         }
3501
3502         /* set the max_rx_pktlen */
3503         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3504
3505         /*
3506          * if no kvlist, it means that this bonded device has been created
3507          * through the bonding api.
3508          */
3509         if (!kvlist)
3510                 return 0;
3511
3512         /* Parse MAC address for bonded device */
3513         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3514         if (arg_count == 1) {
3515                 struct rte_ether_addr bond_mac;
3516
3517                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3518                                        &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3519                         RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3520                                      name);
3521                         return -1;
3522                 }
3523
3524                 /* Set MAC address */
3525                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3526                         RTE_BOND_LOG(ERR,
3527                                      "Failed to set mac address on bonded device %s",
3528                                      name);
3529                         return -1;
3530                 }
3531         } else if (arg_count > 1) {
3532                 RTE_BOND_LOG(ERR,
3533                              "MAC address can be specified only once for bonded device %s",
3534                              name);
3535                 return -1;
3536         }
3537
3538         /* Parse/set balance mode transmit policy */
3539         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3540         if (arg_count == 1) {
3541                 uint8_t xmit_policy;
3542
3543                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3544                                        &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3545                     0) {
3546                         RTE_BOND_LOG(INFO,
3547                                      "Invalid xmit policy specified for bonded device %s",
3548                                      name);
3549                         return -1;
3550                 }
3551
3552                 /* Set balance mode transmit policy*/
3553                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3554                         RTE_BOND_LOG(ERR,
3555                                      "Failed to set balance xmit policy on bonded device %s",
3556                                      name);
3557                         return -1;
3558                 }
3559         } else if (arg_count > 1) {
3560                 RTE_BOND_LOG(ERR,
3561                              "Transmit policy can be specified only once for bonded device %s",
3562                              name);
3563                 return -1;
3564         }
3565
3566         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3567                 if (rte_kvargs_process(kvlist,
3568                                        PMD_BOND_AGG_MODE_KVARG,
3569                                        &bond_ethdev_parse_slave_agg_mode_kvarg,
3570                                        &agg_mode) != 0) {
3571                         RTE_BOND_LOG(ERR,
3572                                      "Failed to parse agg selection mode for bonded device %s",
3573                                      name);
3574                 }
3575                 if (internals->mode == BONDING_MODE_8023AD) {
3576                         int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3577                                         agg_mode);
3578                         if (ret < 0) {
3579                                 RTE_BOND_LOG(ERR,
3580                                         "Invalid args for agg selection set for bonded device %s",
3581                                         name);
3582                                 return -1;
3583                         }
3584                 }
3585         }
3586
3587         /* Parse/add slave ports to bonded device */
3588         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3589                 struct bond_ethdev_slave_ports slave_ports;
3590                 unsigned i;
3591
3592                 memset(&slave_ports, 0, sizeof(slave_ports));
3593
3594                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3595                                        &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3596                         RTE_BOND_LOG(ERR,
3597                                      "Failed to parse slave ports for bonded device %s",
3598                                      name);
3599                         return -1;
3600                 }
3601
3602                 for (i = 0; i < slave_ports.slave_count; i++) {
3603                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3604                                 RTE_BOND_LOG(ERR,
3605                                              "Failed to add port %d as slave to bonded device %s",
3606                                              slave_ports.slaves[i], name);
3607                         }
3608                 }
3609
3610         } else {
3611                 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3612                 return -1;
3613         }
3614
3615         /* Parse/set primary slave port id*/
3616         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3617         if (arg_count == 1) {
3618                 uint16_t primary_slave_port_id;
3619
3620                 if (rte_kvargs_process(kvlist,
3621                                        PMD_BOND_PRIMARY_SLAVE_KVARG,
3622                                        &bond_ethdev_parse_primary_slave_port_id_kvarg,
3623                                        &primary_slave_port_id) < 0) {
3624                         RTE_BOND_LOG(INFO,
3625                                      "Invalid primary slave port id specified for bonded device %s",
3626                                      name);
3627                         return -1;
3628                 }
3629
3630                 /* Set balance mode transmit policy*/
3631                 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3632                     != 0) {
3633                         RTE_BOND_LOG(ERR,
3634                                      "Failed to set primary slave port %d on bonded device %s",
3635                                      primary_slave_port_id, name);
3636                         return -1;
3637                 }
3638         } else if (arg_count > 1) {
3639                 RTE_BOND_LOG(INFO,
3640                              "Primary slave can be specified only once for bonded device %s",
3641                              name);
3642                 return -1;
3643         }
3644
3645         /* Parse link status monitor polling interval */
3646         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3647         if (arg_count == 1) {
3648                 uint32_t lsc_poll_interval_ms;
3649
3650                 if (rte_kvargs_process(kvlist,
3651                                        PMD_BOND_LSC_POLL_PERIOD_KVARG,
3652                                        &bond_ethdev_parse_time_ms_kvarg,
3653                                        &lsc_poll_interval_ms) < 0) {
3654                         RTE_BOND_LOG(INFO,
3655                                      "Invalid lsc polling interval value specified for bonded"
3656                                      " device %s", name);
3657                         return -1;
3658                 }
3659
3660                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3661                     != 0) {
3662                         RTE_BOND_LOG(ERR,
3663                                      "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3664                                      lsc_poll_interval_ms, name);
3665                         return -1;
3666                 }
3667         } else if (arg_count > 1) {
3668                 RTE_BOND_LOG(INFO,
3669                              "LSC polling interval can be specified only once for bonded"
3670                              " device %s", name);
3671                 return -1;
3672         }
3673
3674         /* Parse link up interrupt propagation delay */
3675         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3676         if (arg_count == 1) {
3677                 uint32_t link_up_delay_ms;
3678
3679                 if (rte_kvargs_process(kvlist,
3680                                        PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3681                                        &bond_ethdev_parse_time_ms_kvarg,
3682                                        &link_up_delay_ms) < 0) {
3683                         RTE_BOND_LOG(INFO,
3684                                      "Invalid link up propagation delay value specified for"
3685                                      " bonded device %s", name);
3686                         return -1;
3687                 }
3688
3689                 /* Set balance mode transmit policy*/
3690                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3691                     != 0) {
3692                         RTE_BOND_LOG(ERR,
3693                                      "Failed to set link up propagation delay (%u ms) on bonded"
3694                                      " device %s", link_up_delay_ms, name);
3695                         return -1;
3696                 }
3697         } else if (arg_count > 1) {
3698                 RTE_BOND_LOG(INFO,
3699                              "Link up propagation delay can be specified only once for"
3700                              " bonded device %s", name);
3701                 return -1;
3702         }
3703
3704         /* Parse link down interrupt propagation delay */
3705         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3706         if (arg_count == 1) {
3707                 uint32_t link_down_delay_ms;
3708
3709                 if (rte_kvargs_process(kvlist,
3710                                        PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3711                                        &bond_ethdev_parse_time_ms_kvarg,
3712                                        &link_down_delay_ms) < 0) {
3713                         RTE_BOND_LOG(INFO,
3714                                      "Invalid link down propagation delay value specified for"
3715                                      " bonded device %s", name);
3716                         return -1;
3717                 }
3718
3719                 /* Set balance mode transmit policy*/
3720                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3721                     != 0) {
3722                         RTE_BOND_LOG(ERR,
3723                                      "Failed to set link down propagation delay (%u ms) on bonded device %s",
3724                                      link_down_delay_ms, name);
3725                         return -1;
3726                 }
3727         } else if (arg_count > 1) {
3728                 RTE_BOND_LOG(INFO,
3729                              "Link down propagation delay can be specified only once for  bonded device %s",
3730                              name);
3731                 return -1;
3732         }
3733
3734         return 0;
3735 }
3736
3737 struct rte_vdev_driver pmd_bond_drv = {
3738         .probe = bond_probe,
3739         .remove = bond_remove,
3740 };
3741
3742 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3743 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3744
3745 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3746         "slave=<ifc> "
3747         "primary=<ifc> "
3748         "mode=[0-6] "
3749         "xmit_policy=[l2 | l23 | l34] "
3750         "agg_mode=[count | stable | bandwidth] "
3751         "socket_id=<int> "
3752         "mac=<mac addr> "
3753         "lsc_poll_period_ms=<int> "
3754         "up_delay=<int> "
3755         "down_delay=<int>");
3756
3757 RTE_LOG_REGISTER(bond_logtype, pmd.net.bond, NOTICE);