net/bonding: check stop call status
[dpdk.git] / drivers / net / bonding / rte_eth_bond_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <stdbool.h>
6 #include <netinet/in.h>
7
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14 #include <rte_ip.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
22
23 #include "rte_eth_bond.h"
24 #include "eth_bond_private.h"
25 #include "eth_bond_8023ad_private.h"
26
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
30
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
32
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
35
36 static inline size_t
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
38 {
39         size_t vlan_offset = 0;
40
41         if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42                 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43                 struct rte_vlan_hdr *vlan_hdr =
44                         (struct rte_vlan_hdr *)(eth_hdr + 1);
45
46                 vlan_offset = sizeof(struct rte_vlan_hdr);
47                 *proto = vlan_hdr->eth_proto;
48
49                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50                         vlan_hdr = vlan_hdr + 1;
51                         *proto = vlan_hdr->eth_proto;
52                         vlan_offset += sizeof(struct rte_vlan_hdr);
53                 }
54         }
55         return vlan_offset;
56 }
57
58 static uint16_t
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
60 {
61         struct bond_dev_private *internals;
62
63         uint16_t num_rx_total = 0;
64         uint16_t slave_count;
65         uint16_t active_slave;
66         int i;
67
68         /* Cast to structure, containing bonded device's port id and queue id */
69         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70         internals = bd_rx_q->dev_private;
71         slave_count = internals->active_slave_count;
72         active_slave = bd_rx_q->active_slave;
73
74         for (i = 0; i < slave_count && nb_pkts; i++) {
75                 uint16_t num_rx_slave;
76
77                 /* Offset of pointer to *bufs increases as packets are received
78                  * from other slaves */
79                 num_rx_slave =
80                         rte_eth_rx_burst(internals->active_slaves[active_slave],
81                                          bd_rx_q->queue_id,
82                                          bufs + num_rx_total, nb_pkts);
83                 num_rx_total += num_rx_slave;
84                 nb_pkts -= num_rx_slave;
85                 if (++active_slave == slave_count)
86                         active_slave = 0;
87         }
88
89         if (++bd_rx_q->active_slave >= slave_count)
90                 bd_rx_q->active_slave = 0;
91         return num_rx_total;
92 }
93
94 static uint16_t
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
96                 uint16_t nb_pkts)
97 {
98         struct bond_dev_private *internals;
99
100         /* Cast to structure, containing bonded device's port id and queue id */
101         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
102
103         internals = bd_rx_q->dev_private;
104
105         return rte_eth_rx_burst(internals->current_primary_port,
106                         bd_rx_q->queue_id, bufs, nb_pkts);
107 }
108
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
111 {
112         const uint16_t ether_type_slow_be =
113                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
114
115         return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116                 (ethertype == ether_type_slow_be &&
117                 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
118 }
119
120 /*****************************************************************************
121  * Flow director's setup for mode 4 optimization
122  */
123
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125         .dst.addr_bytes = { 0 },
126         .src.addr_bytes = { 0 },
127         .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
128 };
129
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131         .dst.addr_bytes = { 0 },
132         .src.addr_bytes = { 0 },
133         .type = 0xFFFF,
134 };
135
136 static struct rte_flow_item flow_item_8023ad[] = {
137         {
138                 .type = RTE_FLOW_ITEM_TYPE_ETH,
139                 .spec = &flow_item_eth_type_8023ad,
140                 .last = NULL,
141                 .mask = &flow_item_eth_mask_type_8023ad,
142         },
143         {
144                 .type = RTE_FLOW_ITEM_TYPE_END,
145                 .spec = NULL,
146                 .last = NULL,
147                 .mask = NULL,
148         }
149 };
150
151 const struct rte_flow_attr flow_attr_8023ad = {
152         .group = 0,
153         .priority = 0,
154         .ingress = 1,
155         .egress = 0,
156         .reserved = 0,
157 };
158
159 int
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161                 uint16_t slave_port) {
162         struct rte_eth_dev_info slave_info;
163         struct rte_flow_error error;
164         struct bond_dev_private *internals = bond_dev->data->dev_private;
165
166         const struct rte_flow_action_queue lacp_queue_conf = {
167                 .index = 0,
168         };
169
170         const struct rte_flow_action actions[] = {
171                 {
172                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173                         .conf = &lacp_queue_conf
174                 },
175                 {
176                         .type = RTE_FLOW_ACTION_TYPE_END,
177                 }
178         };
179
180         int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181                         flow_item_8023ad, actions, &error);
182         if (ret < 0) {
183                 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184                                 __func__, error.message, slave_port,
185                                 internals->mode4.dedicated_queues.rx_qid);
186                 return -1;
187         }
188
189         ret = rte_eth_dev_info_get(slave_port, &slave_info);
190         if (ret != 0) {
191                 RTE_BOND_LOG(ERR,
192                         "%s: Error during getting device (port %u) info: %s\n",
193                         __func__, slave_port, strerror(-ret));
194
195                 return ret;
196         }
197
198         if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199                         slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
200                 RTE_BOND_LOG(ERR,
201                         "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202                         __func__, slave_port);
203                 return -1;
204         }
205
206         return 0;
207 }
208
209 int
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211         struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212         struct bond_dev_private *internals = bond_dev->data->dev_private;
213         struct rte_eth_dev_info bond_info;
214         uint16_t idx;
215         int ret;
216
217         /* Verify if all slaves in bonding supports flow director and */
218         if (internals->slave_count > 0) {
219                 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
220                 if (ret != 0) {
221                         RTE_BOND_LOG(ERR,
222                                 "%s: Error during getting device (port %u) info: %s\n",
223                                 __func__, bond_dev->data->port_id,
224                                 strerror(-ret));
225
226                         return ret;
227                 }
228
229                 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230                 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
231
232                 for (idx = 0; idx < internals->slave_count; idx++) {
233                         if (bond_ethdev_8023ad_flow_verify(bond_dev,
234                                         internals->slaves[idx].port_id) != 0)
235                                 return -1;
236                 }
237         }
238
239         return 0;
240 }
241
242 int
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
244
245         struct rte_flow_error error;
246         struct bond_dev_private *internals = bond_dev->data->dev_private;
247         struct rte_flow_action_queue lacp_queue_conf = {
248                 .index = internals->mode4.dedicated_queues.rx_qid,
249         };
250
251         const struct rte_flow_action actions[] = {
252                 {
253                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254                         .conf = &lacp_queue_conf
255                 },
256                 {
257                         .type = RTE_FLOW_ACTION_TYPE_END,
258                 }
259         };
260
261         internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262                         &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263         if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264                 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265                                 "(slave_port=%d queue_id=%d)",
266                                 error.message, slave_port,
267                                 internals->mode4.dedicated_queues.rx_qid);
268                 return -1;
269         }
270
271         return 0;
272 }
273
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
276                 bool dedicated_rxq)
277 {
278         /* Cast to structure, containing bonded device's port id and queue id */
279         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280         struct bond_dev_private *internals = bd_rx_q->dev_private;
281         struct rte_eth_dev *bonded_eth_dev =
282                                         &rte_eth_devices[internals->port_id];
283         struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284         struct rte_ether_hdr *hdr;
285
286         const uint16_t ether_type_slow_be =
287                 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288         uint16_t num_rx_total = 0;      /* Total number of received packets */
289         uint16_t slaves[RTE_MAX_ETHPORTS];
290         uint16_t slave_count, idx;
291
292         uint8_t collecting;  /* current slave collecting status */
293         const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294         const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
295         uint8_t subtype;
296         uint16_t i;
297         uint16_t j;
298         uint16_t k;
299
300         /* Copy slave list to protect against slave up/down changes during tx
301          * bursting */
302         slave_count = internals->active_slave_count;
303         memcpy(slaves, internals->active_slaves,
304                         sizeof(internals->active_slaves[0]) * slave_count);
305
306         idx = bd_rx_q->active_slave;
307         if (idx >= slave_count) {
308                 bd_rx_q->active_slave = 0;
309                 idx = 0;
310         }
311         for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
312                 j = num_rx_total;
313                 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
314                                          COLLECTING);
315
316                 /* Read packets from this slave */
317                 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318                                 &bufs[num_rx_total], nb_pkts - num_rx_total);
319
320                 for (k = j; k < 2 && k < num_rx_total; k++)
321                         rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
322
323                 /* Handle slow protocol packets. */
324                 while (j < num_rx_total) {
325                         if (j + 3 < num_rx_total)
326                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
327
328                         hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329                         subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
330
331                         /* Remove packet from array if:
332                          * - it is slow packet but no dedicated rxq is present,
333                          * - slave is not in collecting state,
334                          * - bonding interface is not in promiscuous mode:
335                          *   - packet is unicast and address does not match,
336                          *   - packet is multicast and bonding interface
337                          *     is not in allmulti,
338                          */
339                         if (unlikely(
340                                 (!dedicated_rxq &&
341                                  is_lacp_packets(hdr->ether_type, subtype,
342                                                  bufs[j])) ||
343                                 !collecting ||
344                                 (!promisc &&
345                                  ((rte_is_unicast_ether_addr(&hdr->d_addr) &&
346                                    !rte_is_same_ether_addr(bond_mac,
347                                                        &hdr->d_addr)) ||
348                                   (!allmulti &&
349                                    rte_is_multicast_ether_addr(&hdr->d_addr)))))) {
350
351                                 if (hdr->ether_type == ether_type_slow_be) {
352                                         bond_mode_8023ad_handle_slow_pkt(
353                                             internals, slaves[idx], bufs[j]);
354                                 } else
355                                         rte_pktmbuf_free(bufs[j]);
356
357                                 /* Packet is managed by mode 4 or dropped, shift the array */
358                                 num_rx_total--;
359                                 if (j < num_rx_total) {
360                                         memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
361                                                 (num_rx_total - j));
362                                 }
363                         } else
364                                 j++;
365                 }
366                 if (unlikely(++idx == slave_count))
367                         idx = 0;
368         }
369
370         if (++bd_rx_q->active_slave >= slave_count)
371                 bd_rx_q->active_slave = 0;
372
373         return num_rx_total;
374 }
375
376 static uint16_t
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
378                 uint16_t nb_pkts)
379 {
380         return rx_burst_8023ad(queue, bufs, nb_pkts, false);
381 }
382
383 static uint16_t
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
385                 uint16_t nb_pkts)
386 {
387         return rx_burst_8023ad(queue, bufs, nb_pkts, true);
388 }
389
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
393
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
395
396 static void
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
398 {
399         switch (arp_op) {
400         case RTE_ARP_OP_REQUEST:
401                 strlcpy(buf, "ARP Request", buf_len);
402                 return;
403         case RTE_ARP_OP_REPLY:
404                 strlcpy(buf, "ARP Reply", buf_len);
405                 return;
406         case RTE_ARP_OP_REVREQUEST:
407                 strlcpy(buf, "Reverse ARP Request", buf_len);
408                 return;
409         case RTE_ARP_OP_REVREPLY:
410                 strlcpy(buf, "Reverse ARP Reply", buf_len);
411                 return;
412         case RTE_ARP_OP_INVREQUEST:
413                 strlcpy(buf, "Peer Identify Request", buf_len);
414                 return;
415         case RTE_ARP_OP_INVREPLY:
416                 strlcpy(buf, "Peer Identify Reply", buf_len);
417                 return;
418         default:
419                 break;
420         }
421         strlcpy(buf, "Unknown", buf_len);
422         return;
423 }
424 #endif
425 #define MaxIPv4String   16
426 static void
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
428 {
429         uint32_t ipv4_addr;
430
431         ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432         snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433                 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
434                 ipv4_addr & 0xFF);
435 }
436
437 #define MAX_CLIENTS_NUMBER      128
438 uint8_t active_clients;
439 struct client_stats_t {
440         uint16_t port;
441         uint32_t ipv4_addr;
442         uint32_t ipv4_rx_packets;
443         uint32_t ipv4_tx_packets;
444 };
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
446
447 static void
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
449 {
450         int i = 0;
451
452         for (; i < MAX_CLIENTS_NUMBER; i++)     {
453                 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port))      {
454                         /* Just update RX packets number for this client */
455                         if (TXorRXindicator == &burstnumberRX)
456                                 client_stats[i].ipv4_rx_packets++;
457                         else
458                                 client_stats[i].ipv4_tx_packets++;
459                         return;
460                 }
461         }
462         /* We have a new client. Insert him to the table, and increment stats */
463         if (TXorRXindicator == &burstnumberRX)
464                 client_stats[active_clients].ipv4_rx_packets++;
465         else
466                 client_stats[active_clients].ipv4_tx_packets++;
467         client_stats[active_clients].ipv4_addr = addr;
468         client_stats[active_clients].port = port;
469         active_clients++;
470
471 }
472
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475         rte_log(RTE_LOG_DEBUG, bond_logtype,                            \
476                 "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
477                 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
478                 info,                                                   \
479                 port,                                                   \
480                 eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
481                 eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
482                 eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
483                 src_ip,                                                 \
484                 eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
485                 eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
486                 eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
487                 dst_ip,                                                 \
488                 arp_op, ++burstnumber)
489 #endif
490
491 static void
492 mode6_debug(const char __rte_unused *info,
493         struct rte_ether_hdr *eth_h, uint16_t port,
494         uint32_t __rte_unused *burstnumber)
495 {
496         struct rte_ipv4_hdr *ipv4_h;
497 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
498         struct rte_arp_hdr *arp_h;
499         char dst_ip[16];
500         char ArpOp[24];
501         char buf[16];
502 #endif
503         char src_ip[16];
504
505         uint16_t ether_type = eth_h->ether_type;
506         uint16_t offset = get_vlan_offset(eth_h, &ether_type);
507
508 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
509         strlcpy(buf, info, 16);
510 #endif
511
512         if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
513                 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
514                 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
515 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
516                 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
517                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
518 #endif
519                 update_client_stats(ipv4_h->src_addr, port, burstnumber);
520         }
521 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
522         else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
523                 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
524                 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
525                 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
526                 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
527                                 ArpOp, sizeof(ArpOp));
528                 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
529         }
530 #endif
531 }
532 #endif
533
534 static uint16_t
535 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
536 {
537         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
538         struct bond_dev_private *internals = bd_rx_q->dev_private;
539         struct rte_ether_hdr *eth_h;
540         uint16_t ether_type, offset;
541         uint16_t nb_recv_pkts;
542         int i;
543
544         nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
545
546         for (i = 0; i < nb_recv_pkts; i++) {
547                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
548                 ether_type = eth_h->ether_type;
549                 offset = get_vlan_offset(eth_h, &ether_type);
550
551                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
552 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
553                         mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
554 #endif
555                         bond_mode_alb_arp_recv(eth_h, offset, internals);
556                 }
557 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
558                 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
559                         mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
560 #endif
561         }
562
563         return nb_recv_pkts;
564 }
565
566 static uint16_t
567 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
568                 uint16_t nb_pkts)
569 {
570         struct bond_dev_private *internals;
571         struct bond_tx_queue *bd_tx_q;
572
573         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
574         uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
575
576         uint16_t num_of_slaves;
577         uint16_t slaves[RTE_MAX_ETHPORTS];
578
579         uint16_t num_tx_total = 0, num_tx_slave;
580
581         static int slave_idx = 0;
582         int i, cslave_idx = 0, tx_fail_total = 0;
583
584         bd_tx_q = (struct bond_tx_queue *)queue;
585         internals = bd_tx_q->dev_private;
586
587         /* Copy slave list to protect against slave up/down changes during tx
588          * bursting */
589         num_of_slaves = internals->active_slave_count;
590         memcpy(slaves, internals->active_slaves,
591                         sizeof(internals->active_slaves[0]) * num_of_slaves);
592
593         if (num_of_slaves < 1)
594                 return num_tx_total;
595
596         /* Populate slaves mbuf with which packets are to be sent on it  */
597         for (i = 0; i < nb_pkts; i++) {
598                 cslave_idx = (slave_idx + i) % num_of_slaves;
599                 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
600         }
601
602         /* increment current slave index so the next call to tx burst starts on the
603          * next slave */
604         slave_idx = ++cslave_idx;
605
606         /* Send packet burst on each slave device */
607         for (i = 0; i < num_of_slaves; i++) {
608                 if (slave_nb_pkts[i] > 0) {
609                         num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
610                                         slave_bufs[i], slave_nb_pkts[i]);
611
612                         /* if tx burst fails move packets to end of bufs */
613                         if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
614                                 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
615
616                                 tx_fail_total += tx_fail_slave;
617
618                                 memcpy(&bufs[nb_pkts - tx_fail_total],
619                                        &slave_bufs[i][num_tx_slave],
620                                        tx_fail_slave * sizeof(bufs[0]));
621                         }
622                         num_tx_total += num_tx_slave;
623                 }
624         }
625
626         return num_tx_total;
627 }
628
629 static uint16_t
630 bond_ethdev_tx_burst_active_backup(void *queue,
631                 struct rte_mbuf **bufs, uint16_t nb_pkts)
632 {
633         struct bond_dev_private *internals;
634         struct bond_tx_queue *bd_tx_q;
635
636         bd_tx_q = (struct bond_tx_queue *)queue;
637         internals = bd_tx_q->dev_private;
638
639         if (internals->active_slave_count < 1)
640                 return 0;
641
642         return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
643                         bufs, nb_pkts);
644 }
645
646 static inline uint16_t
647 ether_hash(struct rte_ether_hdr *eth_hdr)
648 {
649         unaligned_uint16_t *word_src_addr =
650                 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
651         unaligned_uint16_t *word_dst_addr =
652                 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
653
654         return (word_src_addr[0] ^ word_dst_addr[0]) ^
655                         (word_src_addr[1] ^ word_dst_addr[1]) ^
656                         (word_src_addr[2] ^ word_dst_addr[2]);
657 }
658
659 static inline uint32_t
660 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
661 {
662         return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
663 }
664
665 static inline uint32_t
666 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
667 {
668         unaligned_uint32_t *word_src_addr =
669                 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
670         unaligned_uint32_t *word_dst_addr =
671                 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
672
673         return (word_src_addr[0] ^ word_dst_addr[0]) ^
674                         (word_src_addr[1] ^ word_dst_addr[1]) ^
675                         (word_src_addr[2] ^ word_dst_addr[2]) ^
676                         (word_src_addr[3] ^ word_dst_addr[3]);
677 }
678
679
680 void
681 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
682                 uint16_t slave_count, uint16_t *slaves)
683 {
684         struct rte_ether_hdr *eth_hdr;
685         uint32_t hash;
686         int i;
687
688         for (i = 0; i < nb_pkts; i++) {
689                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
690
691                 hash = ether_hash(eth_hdr);
692
693                 slaves[i] = (hash ^= hash >> 8) % slave_count;
694         }
695 }
696
697 void
698 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
699                 uint16_t slave_count, uint16_t *slaves)
700 {
701         uint16_t i;
702         struct rte_ether_hdr *eth_hdr;
703         uint16_t proto;
704         size_t vlan_offset;
705         uint32_t hash, l3hash;
706
707         for (i = 0; i < nb_pkts; i++) {
708                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
709                 l3hash = 0;
710
711                 proto = eth_hdr->ether_type;
712                 hash = ether_hash(eth_hdr);
713
714                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
715
716                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
717                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
718                                         ((char *)(eth_hdr + 1) + vlan_offset);
719                         l3hash = ipv4_hash(ipv4_hdr);
720
721                 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
722                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
723                                         ((char *)(eth_hdr + 1) + vlan_offset);
724                         l3hash = ipv6_hash(ipv6_hdr);
725                 }
726
727                 hash = hash ^ l3hash;
728                 hash ^= hash >> 16;
729                 hash ^= hash >> 8;
730
731                 slaves[i] = hash % slave_count;
732         }
733 }
734
735 void
736 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
737                 uint16_t slave_count, uint16_t *slaves)
738 {
739         struct rte_ether_hdr *eth_hdr;
740         uint16_t proto;
741         size_t vlan_offset;
742         int i;
743
744         struct rte_udp_hdr *udp_hdr;
745         struct rte_tcp_hdr *tcp_hdr;
746         uint32_t hash, l3hash, l4hash;
747
748         for (i = 0; i < nb_pkts; i++) {
749                 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
750                 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
751                 proto = eth_hdr->ether_type;
752                 vlan_offset = get_vlan_offset(eth_hdr, &proto);
753                 l3hash = 0;
754                 l4hash = 0;
755
756                 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
757                         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
758                                         ((char *)(eth_hdr + 1) + vlan_offset);
759                         size_t ip_hdr_offset;
760
761                         l3hash = ipv4_hash(ipv4_hdr);
762
763                         /* there is no L4 header in fragmented packet */
764                         if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
765                                                                 == 0)) {
766                                 ip_hdr_offset = (ipv4_hdr->version_ihl
767                                         & RTE_IPV4_HDR_IHL_MASK) *
768                                         RTE_IPV4_IHL_MULTIPLIER;
769
770                                 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
771                                         tcp_hdr = (struct rte_tcp_hdr *)
772                                                 ((char *)ipv4_hdr +
773                                                         ip_hdr_offset);
774                                         if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
775                                                         < pkt_end)
776                                                 l4hash = HASH_L4_PORTS(tcp_hdr);
777                                 } else if (ipv4_hdr->next_proto_id ==
778                                                                 IPPROTO_UDP) {
779                                         udp_hdr = (struct rte_udp_hdr *)
780                                                 ((char *)ipv4_hdr +
781                                                         ip_hdr_offset);
782                                         if ((size_t)udp_hdr + sizeof(*udp_hdr)
783                                                         < pkt_end)
784                                                 l4hash = HASH_L4_PORTS(udp_hdr);
785                                 }
786                         }
787                 } else if  (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
788                         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
789                                         ((char *)(eth_hdr + 1) + vlan_offset);
790                         l3hash = ipv6_hash(ipv6_hdr);
791
792                         if (ipv6_hdr->proto == IPPROTO_TCP) {
793                                 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
794                                 l4hash = HASH_L4_PORTS(tcp_hdr);
795                         } else if (ipv6_hdr->proto == IPPROTO_UDP) {
796                                 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
797                                 l4hash = HASH_L4_PORTS(udp_hdr);
798                         }
799                 }
800
801                 hash = l3hash ^ l4hash;
802                 hash ^= hash >> 16;
803                 hash ^= hash >> 8;
804
805                 slaves[i] = hash % slave_count;
806         }
807 }
808
809 struct bwg_slave {
810         uint64_t bwg_left_int;
811         uint64_t bwg_left_remainder;
812         uint16_t slave;
813 };
814
815 void
816 bond_tlb_activate_slave(struct bond_dev_private *internals) {
817         int i;
818
819         for (i = 0; i < internals->active_slave_count; i++) {
820                 tlb_last_obytets[internals->active_slaves[i]] = 0;
821         }
822 }
823
824 static int
825 bandwidth_cmp(const void *a, const void *b)
826 {
827         const struct bwg_slave *bwg_a = a;
828         const struct bwg_slave *bwg_b = b;
829         int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
830         int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
831                         (int64_t)bwg_a->bwg_left_remainder;
832         if (diff > 0)
833                 return 1;
834         else if (diff < 0)
835                 return -1;
836         else if (diff2 > 0)
837                 return 1;
838         else if (diff2 < 0)
839                 return -1;
840         else
841                 return 0;
842 }
843
844 static void
845 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
846                 struct bwg_slave *bwg_slave)
847 {
848         struct rte_eth_link link_status;
849         int ret;
850
851         ret = rte_eth_link_get_nowait(port_id, &link_status);
852         if (ret < 0) {
853                 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
854                              port_id, rte_strerror(-ret));
855                 return;
856         }
857         uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
858         if (link_bwg == 0)
859                 return;
860         link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
861         bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
862         bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
863 }
864
865 static void
866 bond_ethdev_update_tlb_slave_cb(void *arg)
867 {
868         struct bond_dev_private *internals = arg;
869         struct rte_eth_stats slave_stats;
870         struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
871         uint16_t slave_count;
872         uint64_t tx_bytes;
873
874         uint8_t update_stats = 0;
875         uint16_t slave_id;
876         uint16_t i;
877
878         internals->slave_update_idx++;
879
880
881         if (internals->slave_update_idx >= REORDER_PERIOD_MS)
882                 update_stats = 1;
883
884         for (i = 0; i < internals->active_slave_count; i++) {
885                 slave_id = internals->active_slaves[i];
886                 rte_eth_stats_get(slave_id, &slave_stats);
887                 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
888                 bandwidth_left(slave_id, tx_bytes,
889                                 internals->slave_update_idx, &bwg_array[i]);
890                 bwg_array[i].slave = slave_id;
891
892                 if (update_stats) {
893                         tlb_last_obytets[slave_id] = slave_stats.obytes;
894                 }
895         }
896
897         if (update_stats == 1)
898                 internals->slave_update_idx = 0;
899
900         slave_count = i;
901         qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
902         for (i = 0; i < slave_count; i++)
903                 internals->tlb_slaves_order[i] = bwg_array[i].slave;
904
905         rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
906                         (struct bond_dev_private *)internals);
907 }
908
909 static uint16_t
910 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
911 {
912         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
913         struct bond_dev_private *internals = bd_tx_q->dev_private;
914
915         struct rte_eth_dev *primary_port =
916                         &rte_eth_devices[internals->primary_port];
917         uint16_t num_tx_total = 0;
918         uint16_t i, j;
919
920         uint16_t num_of_slaves = internals->active_slave_count;
921         uint16_t slaves[RTE_MAX_ETHPORTS];
922
923         struct rte_ether_hdr *ether_hdr;
924         struct rte_ether_addr primary_slave_addr;
925         struct rte_ether_addr active_slave_addr;
926
927         if (num_of_slaves < 1)
928                 return num_tx_total;
929
930         memcpy(slaves, internals->tlb_slaves_order,
931                                 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
932
933
934         rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
935
936         if (nb_pkts > 3) {
937                 for (i = 0; i < 3; i++)
938                         rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
939         }
940
941         for (i = 0; i < num_of_slaves; i++) {
942                 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
943                 for (j = num_tx_total; j < nb_pkts; j++) {
944                         if (j + 3 < nb_pkts)
945                                 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
946
947                         ether_hdr = rte_pktmbuf_mtod(bufs[j],
948                                                 struct rte_ether_hdr *);
949                         if (rte_is_same_ether_addr(&ether_hdr->s_addr,
950                                                         &primary_slave_addr))
951                                 rte_ether_addr_copy(&active_slave_addr,
952                                                 &ether_hdr->s_addr);
953 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
954                                         mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
955 #endif
956                 }
957
958                 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
959                                 bufs + num_tx_total, nb_pkts - num_tx_total);
960
961                 if (num_tx_total == nb_pkts)
962                         break;
963         }
964
965         return num_tx_total;
966 }
967
968 void
969 bond_tlb_disable(struct bond_dev_private *internals)
970 {
971         rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
972 }
973
974 void
975 bond_tlb_enable(struct bond_dev_private *internals)
976 {
977         bond_ethdev_update_tlb_slave_cb(internals);
978 }
979
980 static uint16_t
981 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
982 {
983         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
984         struct bond_dev_private *internals = bd_tx_q->dev_private;
985
986         struct rte_ether_hdr *eth_h;
987         uint16_t ether_type, offset;
988
989         struct client_data *client_info;
990
991         /*
992          * We create transmit buffers for every slave and one additional to send
993          * through tlb. In worst case every packet will be send on one port.
994          */
995         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
996         uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
997
998         /*
999          * We create separate transmit buffers for update packets as they won't
1000          * be counted in num_tx_total.
1001          */
1002         struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
1003         uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
1004
1005         struct rte_mbuf *upd_pkt;
1006         size_t pkt_size;
1007
1008         uint16_t num_send, num_not_send = 0;
1009         uint16_t num_tx_total = 0;
1010         uint16_t slave_idx;
1011
1012         int i, j;
1013
1014         /* Search tx buffer for ARP packets and forward them to alb */
1015         for (i = 0; i < nb_pkts; i++) {
1016                 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1017                 ether_type = eth_h->ether_type;
1018                 offset = get_vlan_offset(eth_h, &ether_type);
1019
1020                 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1021                         slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1022
1023                         /* Change src mac in eth header */
1024                         rte_eth_macaddr_get(slave_idx, &eth_h->s_addr);
1025
1026                         /* Add packet to slave tx buffer */
1027                         slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1028                         slave_bufs_pkts[slave_idx]++;
1029                 } else {
1030                         /* If packet is not ARP, send it with TLB policy */
1031                         slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1032                                         bufs[i];
1033                         slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1034                 }
1035         }
1036
1037         /* Update connected client ARP tables */
1038         if (internals->mode6.ntt) {
1039                 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1040                         client_info = &internals->mode6.client_table[i];
1041
1042                         if (client_info->in_use) {
1043                                 /* Allocate new packet to send ARP update on current slave */
1044                                 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1045                                 if (upd_pkt == NULL) {
1046                                         RTE_BOND_LOG(ERR,
1047                                                      "Failed to allocate ARP packet from pool");
1048                                         continue;
1049                                 }
1050                                 pkt_size = sizeof(struct rte_ether_hdr) +
1051                                         sizeof(struct rte_arp_hdr) +
1052                                         client_info->vlan_count *
1053                                         sizeof(struct rte_vlan_hdr);
1054                                 upd_pkt->data_len = pkt_size;
1055                                 upd_pkt->pkt_len = pkt_size;
1056
1057                                 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1058                                                 internals);
1059
1060                                 /* Add packet to update tx buffer */
1061                                 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1062                                 update_bufs_pkts[slave_idx]++;
1063                         }
1064                 }
1065                 internals->mode6.ntt = 0;
1066         }
1067
1068         /* Send ARP packets on proper slaves */
1069         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1070                 if (slave_bufs_pkts[i] > 0) {
1071                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1072                                         slave_bufs[i], slave_bufs_pkts[i]);
1073                         for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1074                                 bufs[nb_pkts - 1 - num_not_send - j] =
1075                                                 slave_bufs[i][nb_pkts - 1 - j];
1076                         }
1077
1078                         num_tx_total += num_send;
1079                         num_not_send += slave_bufs_pkts[i] - num_send;
1080
1081 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1082         /* Print TX stats including update packets */
1083                         for (j = 0; j < slave_bufs_pkts[i]; j++) {
1084                                 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1085                                                         struct rte_ether_hdr *);
1086                                 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1087                         }
1088 #endif
1089                 }
1090         }
1091
1092         /* Send update packets on proper slaves */
1093         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1094                 if (update_bufs_pkts[i] > 0) {
1095                         num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1096                                         update_bufs_pkts[i]);
1097                         for (j = num_send; j < update_bufs_pkts[i]; j++) {
1098                                 rte_pktmbuf_free(update_bufs[i][j]);
1099                         }
1100 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1101                         for (j = 0; j < update_bufs_pkts[i]; j++) {
1102                                 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1103                                                         struct rte_ether_hdr *);
1104                                 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1105                         }
1106 #endif
1107                 }
1108         }
1109
1110         /* Send non-ARP packets using tlb policy */
1111         if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1112                 num_send = bond_ethdev_tx_burst_tlb(queue,
1113                                 slave_bufs[RTE_MAX_ETHPORTS],
1114                                 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1115
1116                 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1117                         bufs[nb_pkts - 1 - num_not_send - j] =
1118                                         slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1119                 }
1120
1121                 num_tx_total += num_send;
1122         }
1123
1124         return num_tx_total;
1125 }
1126
1127 static inline uint16_t
1128 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1129                  uint16_t *slave_port_ids, uint16_t slave_count)
1130 {
1131         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1132         struct bond_dev_private *internals = bd_tx_q->dev_private;
1133
1134         /* Array to sort mbufs for transmission on each slave into */
1135         struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1136         /* Number of mbufs for transmission on each slave */
1137         uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1138         /* Mapping array generated by hash function to map mbufs to slaves */
1139         uint16_t bufs_slave_port_idxs[nb_bufs];
1140
1141         uint16_t slave_tx_count;
1142         uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1143
1144         uint16_t i;
1145
1146         /*
1147          * Populate slaves mbuf with the packets which are to be sent on it
1148          * selecting output slave using hash based on xmit policy
1149          */
1150         internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1151                         bufs_slave_port_idxs);
1152
1153         for (i = 0; i < nb_bufs; i++) {
1154                 /* Populate slave mbuf arrays with mbufs for that slave. */
1155                 uint16_t slave_idx = bufs_slave_port_idxs[i];
1156
1157                 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1158         }
1159
1160         /* Send packet burst on each slave device */
1161         for (i = 0; i < slave_count; i++) {
1162                 if (slave_nb_bufs[i] == 0)
1163                         continue;
1164
1165                 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1166                                 bd_tx_q->queue_id, slave_bufs[i],
1167                                 slave_nb_bufs[i]);
1168
1169                 total_tx_count += slave_tx_count;
1170
1171                 /* If tx burst fails move packets to end of bufs */
1172                 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1173                         int slave_tx_fail_count = slave_nb_bufs[i] -
1174                                         slave_tx_count;
1175                         total_tx_fail_count += slave_tx_fail_count;
1176                         memcpy(&bufs[nb_bufs - total_tx_fail_count],
1177                                &slave_bufs[i][slave_tx_count],
1178                                slave_tx_fail_count * sizeof(bufs[0]));
1179                 }
1180         }
1181
1182         return total_tx_count;
1183 }
1184
1185 static uint16_t
1186 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1187                 uint16_t nb_bufs)
1188 {
1189         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1190         struct bond_dev_private *internals = bd_tx_q->dev_private;
1191
1192         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1193         uint16_t slave_count;
1194
1195         if (unlikely(nb_bufs == 0))
1196                 return 0;
1197
1198         /* Copy slave list to protect against slave up/down changes during tx
1199          * bursting
1200          */
1201         slave_count = internals->active_slave_count;
1202         if (unlikely(slave_count < 1))
1203                 return 0;
1204
1205         memcpy(slave_port_ids, internals->active_slaves,
1206                         sizeof(slave_port_ids[0]) * slave_count);
1207         return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1208                                 slave_count);
1209 }
1210
1211 static inline uint16_t
1212 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1213                 bool dedicated_txq)
1214 {
1215         struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1216         struct bond_dev_private *internals = bd_tx_q->dev_private;
1217
1218         uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1219         uint16_t slave_count;
1220
1221         uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1222         uint16_t dist_slave_count;
1223
1224         uint16_t slave_tx_count;
1225
1226         uint16_t i;
1227
1228         /* Copy slave list to protect against slave up/down changes during tx
1229          * bursting */
1230         slave_count = internals->active_slave_count;
1231         if (unlikely(slave_count < 1))
1232                 return 0;
1233
1234         memcpy(slave_port_ids, internals->active_slaves,
1235                         sizeof(slave_port_ids[0]) * slave_count);
1236
1237         if (dedicated_txq)
1238                 goto skip_tx_ring;
1239
1240         /* Check for LACP control packets and send if available */
1241         for (i = 0; i < slave_count; i++) {
1242                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1243                 struct rte_mbuf *ctrl_pkt = NULL;
1244
1245                 if (likely(rte_ring_empty(port->tx_ring)))
1246                         continue;
1247
1248                 if (rte_ring_dequeue(port->tx_ring,
1249                                      (void **)&ctrl_pkt) != -ENOENT) {
1250                         slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1251                                         bd_tx_q->queue_id, &ctrl_pkt, 1);
1252                         /*
1253                          * re-enqueue LAG control plane packets to buffering
1254                          * ring if transmission fails so the packet isn't lost.
1255                          */
1256                         if (slave_tx_count != 1)
1257                                 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1258                 }
1259         }
1260
1261 skip_tx_ring:
1262         if (unlikely(nb_bufs == 0))
1263                 return 0;
1264
1265         dist_slave_count = 0;
1266         for (i = 0; i < slave_count; i++) {
1267                 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1268
1269                 if (ACTOR_STATE(port, DISTRIBUTING))
1270                         dist_slave_port_ids[dist_slave_count++] =
1271                                         slave_port_ids[i];
1272         }
1273
1274         if (unlikely(dist_slave_count < 1))
1275                 return 0;
1276
1277         return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1278                                 dist_slave_count);
1279 }
1280
1281 static uint16_t
1282 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1283                 uint16_t nb_bufs)
1284 {
1285         return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1286 }
1287
1288 static uint16_t
1289 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1290                 uint16_t nb_bufs)
1291 {
1292         return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1293 }
1294
1295 static uint16_t
1296 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1297                 uint16_t nb_pkts)
1298 {
1299         struct bond_dev_private *internals;
1300         struct bond_tx_queue *bd_tx_q;
1301
1302         uint16_t slaves[RTE_MAX_ETHPORTS];
1303         uint8_t tx_failed_flag = 0;
1304         uint16_t num_of_slaves;
1305
1306         uint16_t max_nb_of_tx_pkts = 0;
1307
1308         int slave_tx_total[RTE_MAX_ETHPORTS];
1309         int i, most_successful_tx_slave = -1;
1310
1311         bd_tx_q = (struct bond_tx_queue *)queue;
1312         internals = bd_tx_q->dev_private;
1313
1314         /* Copy slave list to protect against slave up/down changes during tx
1315          * bursting */
1316         num_of_slaves = internals->active_slave_count;
1317         memcpy(slaves, internals->active_slaves,
1318                         sizeof(internals->active_slaves[0]) * num_of_slaves);
1319
1320         if (num_of_slaves < 1)
1321                 return 0;
1322
1323         /* Increment reference count on mbufs */
1324         for (i = 0; i < nb_pkts; i++)
1325                 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1326
1327         /* Transmit burst on each active slave */
1328         for (i = 0; i < num_of_slaves; i++) {
1329                 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1330                                         bufs, nb_pkts);
1331
1332                 if (unlikely(slave_tx_total[i] < nb_pkts))
1333                         tx_failed_flag = 1;
1334
1335                 /* record the value and slave index for the slave which transmits the
1336                  * maximum number of packets */
1337                 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1338                         max_nb_of_tx_pkts = slave_tx_total[i];
1339                         most_successful_tx_slave = i;
1340                 }
1341         }
1342
1343         /* if slaves fail to transmit packets from burst, the calling application
1344          * is not expected to know about multiple references to packets so we must
1345          * handle failures of all packets except those of the most successful slave
1346          */
1347         if (unlikely(tx_failed_flag))
1348                 for (i = 0; i < num_of_slaves; i++)
1349                         if (i != most_successful_tx_slave)
1350                                 while (slave_tx_total[i] < nb_pkts)
1351                                         rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1352
1353         return max_nb_of_tx_pkts;
1354 }
1355
1356 static void
1357 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1358 {
1359         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1360
1361         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1362                 /**
1363                  * If in mode 4 then save the link properties of the first
1364                  * slave, all subsequent slaves must match these properties
1365                  */
1366                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1367
1368                 bond_link->link_autoneg = slave_link->link_autoneg;
1369                 bond_link->link_duplex = slave_link->link_duplex;
1370                 bond_link->link_speed = slave_link->link_speed;
1371         } else {
1372                 /**
1373                  * In any other mode the link properties are set to default
1374                  * values of AUTONEG/DUPLEX
1375                  */
1376                 ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
1377                 ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1378         }
1379 }
1380
1381 static int
1382 link_properties_valid(struct rte_eth_dev *ethdev,
1383                 struct rte_eth_link *slave_link)
1384 {
1385         struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1386
1387         if (bond_ctx->mode == BONDING_MODE_8023AD) {
1388                 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1389
1390                 if (bond_link->link_duplex != slave_link->link_duplex ||
1391                         bond_link->link_autoneg != slave_link->link_autoneg ||
1392                         bond_link->link_speed != slave_link->link_speed)
1393                         return -1;
1394         }
1395
1396         return 0;
1397 }
1398
1399 int
1400 mac_address_get(struct rte_eth_dev *eth_dev,
1401                 struct rte_ether_addr *dst_mac_addr)
1402 {
1403         struct rte_ether_addr *mac_addr;
1404
1405         if (eth_dev == NULL) {
1406                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1407                 return -1;
1408         }
1409
1410         if (dst_mac_addr == NULL) {
1411                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1412                 return -1;
1413         }
1414
1415         mac_addr = eth_dev->data->mac_addrs;
1416
1417         rte_ether_addr_copy(mac_addr, dst_mac_addr);
1418         return 0;
1419 }
1420
1421 int
1422 mac_address_set(struct rte_eth_dev *eth_dev,
1423                 struct rte_ether_addr *new_mac_addr)
1424 {
1425         struct rte_ether_addr *mac_addr;
1426
1427         if (eth_dev == NULL) {
1428                 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1429                 return -1;
1430         }
1431
1432         if (new_mac_addr == NULL) {
1433                 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1434                 return -1;
1435         }
1436
1437         mac_addr = eth_dev->data->mac_addrs;
1438
1439         /* If new MAC is different to current MAC then update */
1440         if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1441                 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1442
1443         return 0;
1444 }
1445
1446 static const struct rte_ether_addr null_mac_addr;
1447
1448 /*
1449  * Add additional MAC addresses to the slave
1450  */
1451 int
1452 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1453                 uint16_t slave_port_id)
1454 {
1455         int i, ret;
1456         struct rte_ether_addr *mac_addr;
1457
1458         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1459                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1460                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1461                         break;
1462
1463                 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1464                 if (ret < 0) {
1465                         /* rollback */
1466                         for (i--; i > 0; i--)
1467                                 rte_eth_dev_mac_addr_remove(slave_port_id,
1468                                         &bonded_eth_dev->data->mac_addrs[i]);
1469                         return ret;
1470                 }
1471         }
1472
1473         return 0;
1474 }
1475
1476 /*
1477  * Remove additional MAC addresses from the slave
1478  */
1479 int
1480 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1481                 uint16_t slave_port_id)
1482 {
1483         int i, rc, ret;
1484         struct rte_ether_addr *mac_addr;
1485
1486         rc = 0;
1487         for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1488                 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1489                 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1490                         break;
1491
1492                 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1493                 /* save only the first error */
1494                 if (ret < 0 && rc == 0)
1495                         rc = ret;
1496         }
1497
1498         return rc;
1499 }
1500
1501 int
1502 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1503 {
1504         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1505         bool set;
1506         int i;
1507
1508         /* Update slave devices MAC addresses */
1509         if (internals->slave_count < 1)
1510                 return -1;
1511
1512         switch (internals->mode) {
1513         case BONDING_MODE_ROUND_ROBIN:
1514         case BONDING_MODE_BALANCE:
1515         case BONDING_MODE_BROADCAST:
1516                 for (i = 0; i < internals->slave_count; i++) {
1517                         if (rte_eth_dev_default_mac_addr_set(
1518                                         internals->slaves[i].port_id,
1519                                         bonded_eth_dev->data->mac_addrs)) {
1520                                 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1521                                                 internals->slaves[i].port_id);
1522                                 return -1;
1523                         }
1524                 }
1525                 break;
1526         case BONDING_MODE_8023AD:
1527                 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1528                 break;
1529         case BONDING_MODE_ACTIVE_BACKUP:
1530         case BONDING_MODE_TLB:
1531         case BONDING_MODE_ALB:
1532         default:
1533                 set = true;
1534                 for (i = 0; i < internals->slave_count; i++) {
1535                         if (internals->slaves[i].port_id ==
1536                                         internals->current_primary_port) {
1537                                 if (rte_eth_dev_default_mac_addr_set(
1538                                                 internals->current_primary_port,
1539                                                 bonded_eth_dev->data->mac_addrs)) {
1540                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1541                                                         internals->current_primary_port);
1542                                         set = false;
1543                                 }
1544                         } else {
1545                                 if (rte_eth_dev_default_mac_addr_set(
1546                                                 internals->slaves[i].port_id,
1547                                                 &internals->slaves[i].persisted_mac_addr)) {
1548                                         RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1549                                                         internals->slaves[i].port_id);
1550                                 }
1551                         }
1552                 }
1553                 if (!set)
1554                         return -1;
1555         }
1556
1557         return 0;
1558 }
1559
1560 int
1561 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1562 {
1563         struct bond_dev_private *internals;
1564
1565         internals = eth_dev->data->dev_private;
1566
1567         switch (mode) {
1568         case BONDING_MODE_ROUND_ROBIN:
1569                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1570                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1571                 break;
1572         case BONDING_MODE_ACTIVE_BACKUP:
1573                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1574                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1575                 break;
1576         case BONDING_MODE_BALANCE:
1577                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1578                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1579                 break;
1580         case BONDING_MODE_BROADCAST:
1581                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1582                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1583                 break;
1584         case BONDING_MODE_8023AD:
1585                 if (bond_mode_8023ad_enable(eth_dev) != 0)
1586                         return -1;
1587
1588                 if (internals->mode4.dedicated_queues.enabled == 0) {
1589                         eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1590                         eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1591                         RTE_BOND_LOG(WARNING,
1592                                 "Using mode 4, it is necessary to do TX burst "
1593                                 "and RX burst at least every 100ms.");
1594                 } else {
1595                         /* Use flow director's optimization */
1596                         eth_dev->rx_pkt_burst =
1597                                         bond_ethdev_rx_burst_8023ad_fast_queue;
1598                         eth_dev->tx_pkt_burst =
1599                                         bond_ethdev_tx_burst_8023ad_fast_queue;
1600                 }
1601                 break;
1602         case BONDING_MODE_TLB:
1603                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1604                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1605                 break;
1606         case BONDING_MODE_ALB:
1607                 if (bond_mode_alb_enable(eth_dev) != 0)
1608                         return -1;
1609
1610                 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1611                 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1612                 break;
1613         default:
1614                 return -1;
1615         }
1616
1617         internals->mode = mode;
1618
1619         return 0;
1620 }
1621
1622
1623 static int
1624 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1625                 struct rte_eth_dev *slave_eth_dev)
1626 {
1627         int errval = 0;
1628         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1629         struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1630
1631         if (port->slow_pool == NULL) {
1632                 char mem_name[256];
1633                 int slave_id = slave_eth_dev->data->port_id;
1634
1635                 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1636                                 slave_id);
1637                 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1638                         250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1639                         slave_eth_dev->data->numa_node);
1640
1641                 /* Any memory allocation failure in initialization is critical because
1642                  * resources can't be free, so reinitialization is impossible. */
1643                 if (port->slow_pool == NULL) {
1644                         rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1645                                 slave_id, mem_name, rte_strerror(rte_errno));
1646                 }
1647         }
1648
1649         if (internals->mode4.dedicated_queues.enabled == 1) {
1650                 /* Configure slow Rx queue */
1651
1652                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1653                                 internals->mode4.dedicated_queues.rx_qid, 128,
1654                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1655                                 NULL, port->slow_pool);
1656                 if (errval != 0) {
1657                         RTE_BOND_LOG(ERR,
1658                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1659                                         slave_eth_dev->data->port_id,
1660                                         internals->mode4.dedicated_queues.rx_qid,
1661                                         errval);
1662                         return errval;
1663                 }
1664
1665                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1666                                 internals->mode4.dedicated_queues.tx_qid, 512,
1667                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1668                                 NULL);
1669                 if (errval != 0) {
1670                         RTE_BOND_LOG(ERR,
1671                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1672                                 slave_eth_dev->data->port_id,
1673                                 internals->mode4.dedicated_queues.tx_qid,
1674                                 errval);
1675                         return errval;
1676                 }
1677         }
1678         return 0;
1679 }
1680
1681 int
1682 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1683                 struct rte_eth_dev *slave_eth_dev)
1684 {
1685         struct bond_rx_queue *bd_rx_q;
1686         struct bond_tx_queue *bd_tx_q;
1687         uint16_t nb_rx_queues;
1688         uint16_t nb_tx_queues;
1689
1690         int errval;
1691         uint16_t q_id;
1692         struct rte_flow_error flow_error;
1693
1694         struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1695
1696         /* Stop slave */
1697         errval = rte_eth_dev_stop(slave_eth_dev->data->port_id);
1698         if (errval != 0)
1699                 RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)",
1700                              slave_eth_dev->data->port_id, errval);
1701
1702         /* Enable interrupts on slave device if supported */
1703         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1704                 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1705
1706         /* If RSS is enabled for bonding, try to enable it for slaves  */
1707         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1708                 if (internals->rss_key_len != 0) {
1709                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1710                                         internals->rss_key_len;
1711                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1712                                         internals->rss_key;
1713                 } else {
1714                         slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1715                 }
1716
1717                 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1718                                 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1719                 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1720                                 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1721         }
1722
1723         if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
1724                         DEV_RX_OFFLOAD_VLAN_FILTER)
1725                 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1726                                 DEV_RX_OFFLOAD_VLAN_FILTER;
1727         else
1728                 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1729                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
1730
1731         nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1732         nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1733
1734         if (internals->mode == BONDING_MODE_8023AD) {
1735                 if (internals->mode4.dedicated_queues.enabled == 1) {
1736                         nb_rx_queues++;
1737                         nb_tx_queues++;
1738                 }
1739         }
1740
1741         errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1742                                      bonded_eth_dev->data->mtu);
1743         if (errval != 0 && errval != -ENOTSUP) {
1744                 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1745                                 slave_eth_dev->data->port_id, errval);
1746                 return errval;
1747         }
1748
1749         /* Configure device */
1750         errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1751                         nb_rx_queues, nb_tx_queues,
1752                         &(slave_eth_dev->data->dev_conf));
1753         if (errval != 0) {
1754                 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1755                                 slave_eth_dev->data->port_id, errval);
1756                 return errval;
1757         }
1758
1759         /* Setup Rx Queues */
1760         for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1761                 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1762
1763                 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1764                                 bd_rx_q->nb_rx_desc,
1765                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1766                                 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1767                 if (errval != 0) {
1768                         RTE_BOND_LOG(ERR,
1769                                         "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1770                                         slave_eth_dev->data->port_id, q_id, errval);
1771                         return errval;
1772                 }
1773         }
1774
1775         /* Setup Tx Queues */
1776         for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1777                 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1778
1779                 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1780                                 bd_tx_q->nb_tx_desc,
1781                                 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1782                                 &bd_tx_q->tx_conf);
1783                 if (errval != 0) {
1784                         RTE_BOND_LOG(ERR,
1785                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1786                                 slave_eth_dev->data->port_id, q_id, errval);
1787                         return errval;
1788                 }
1789         }
1790
1791         if (internals->mode == BONDING_MODE_8023AD &&
1792                         internals->mode4.dedicated_queues.enabled == 1) {
1793                 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1794                                 != 0)
1795                         return errval;
1796
1797                 if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1798                                 slave_eth_dev->data->port_id) != 0) {
1799                         RTE_BOND_LOG(ERR,
1800                                 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1801                                 slave_eth_dev->data->port_id, q_id, errval);
1802                         return -1;
1803                 }
1804
1805                 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1806                         rte_flow_destroy(slave_eth_dev->data->port_id,
1807                                         internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1808                                         &flow_error);
1809
1810                 bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1811                                 slave_eth_dev->data->port_id);
1812         }
1813
1814         /* Start device */
1815         errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1816         if (errval != 0) {
1817                 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1818                                 slave_eth_dev->data->port_id, errval);
1819                 return -1;
1820         }
1821
1822         /* If RSS is enabled for bonding, synchronize RETA */
1823         if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1824                 int i;
1825                 struct bond_dev_private *internals;
1826
1827                 internals = bonded_eth_dev->data->dev_private;
1828
1829                 for (i = 0; i < internals->slave_count; i++) {
1830                         if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1831                                 errval = rte_eth_dev_rss_reta_update(
1832                                                 slave_eth_dev->data->port_id,
1833                                                 &internals->reta_conf[0],
1834                                                 internals->slaves[i].reta_size);
1835                                 if (errval != 0) {
1836                                         RTE_BOND_LOG(WARNING,
1837                                                      "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1838                                                      " RSS Configuration for bonding may be inconsistent.",
1839                                                      slave_eth_dev->data->port_id, errval);
1840                                 }
1841                                 break;
1842                         }
1843                 }
1844         }
1845
1846         /* If lsc interrupt is set, check initial slave's link status */
1847         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1848                 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1849                 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1850                         RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1851                         NULL);
1852         }
1853
1854         return 0;
1855 }
1856
1857 void
1858 slave_remove(struct bond_dev_private *internals,
1859                 struct rte_eth_dev *slave_eth_dev)
1860 {
1861         uint16_t i;
1862
1863         for (i = 0; i < internals->slave_count; i++)
1864                 if (internals->slaves[i].port_id ==
1865                                 slave_eth_dev->data->port_id)
1866                         break;
1867
1868         if (i < (internals->slave_count - 1)) {
1869                 struct rte_flow *flow;
1870
1871                 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1872                                 sizeof(internals->slaves[0]) *
1873                                 (internals->slave_count - i - 1));
1874                 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1875                         memmove(&flow->flows[i], &flow->flows[i + 1],
1876                                 sizeof(flow->flows[0]) *
1877                                 (internals->slave_count - i - 1));
1878                         flow->flows[internals->slave_count - 1] = NULL;
1879                 }
1880         }
1881
1882         internals->slave_count--;
1883
1884         /* force reconfiguration of slave interfaces */
1885         rte_eth_dev_internal_reset(slave_eth_dev);
1886 }
1887
1888 static void
1889 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1890
1891 void
1892 slave_add(struct bond_dev_private *internals,
1893                 struct rte_eth_dev *slave_eth_dev)
1894 {
1895         struct bond_slave_details *slave_details =
1896                         &internals->slaves[internals->slave_count];
1897
1898         slave_details->port_id = slave_eth_dev->data->port_id;
1899         slave_details->last_link_status = 0;
1900
1901         /* Mark slave devices that don't support interrupts so we can
1902          * compensate when we start the bond
1903          */
1904         if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1905                 slave_details->link_status_poll_enabled = 1;
1906         }
1907
1908         slave_details->link_status_wait_to_complete = 0;
1909         /* clean tlb_last_obytes when adding port for bonding device */
1910         memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1911                         sizeof(struct rte_ether_addr));
1912 }
1913
1914 void
1915 bond_ethdev_primary_set(struct bond_dev_private *internals,
1916                 uint16_t slave_port_id)
1917 {
1918         int i;
1919
1920         if (internals->active_slave_count < 1)
1921                 internals->current_primary_port = slave_port_id;
1922         else
1923                 /* Search bonded device slave ports for new proposed primary port */
1924                 for (i = 0; i < internals->active_slave_count; i++) {
1925                         if (internals->active_slaves[i] == slave_port_id)
1926                                 internals->current_primary_port = slave_port_id;
1927                 }
1928 }
1929
1930 static int
1931 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1932
1933 static int
1934 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1935 {
1936         struct bond_dev_private *internals;
1937         int i;
1938
1939         /* slave eth dev will be started by bonded device */
1940         if (check_for_bonded_ethdev(eth_dev)) {
1941                 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1942                                 eth_dev->data->port_id);
1943                 return -1;
1944         }
1945
1946         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1947         eth_dev->data->dev_started = 1;
1948
1949         internals = eth_dev->data->dev_private;
1950
1951         if (internals->slave_count == 0) {
1952                 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1953                 goto out_err;
1954         }
1955
1956         if (internals->user_defined_mac == 0) {
1957                 struct rte_ether_addr *new_mac_addr = NULL;
1958
1959                 for (i = 0; i < internals->slave_count; i++)
1960                         if (internals->slaves[i].port_id == internals->primary_port)
1961                                 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1962
1963                 if (new_mac_addr == NULL)
1964                         goto out_err;
1965
1966                 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1967                         RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1968                                         eth_dev->data->port_id);
1969                         goto out_err;
1970                 }
1971         }
1972
1973         if (internals->mode == BONDING_MODE_8023AD) {
1974                 if (internals->mode4.dedicated_queues.enabled == 1) {
1975                         internals->mode4.dedicated_queues.rx_qid =
1976                                         eth_dev->data->nb_rx_queues;
1977                         internals->mode4.dedicated_queues.tx_qid =
1978                                         eth_dev->data->nb_tx_queues;
1979                 }
1980         }
1981
1982
1983         /* Reconfigure each slave device if starting bonded device */
1984         for (i = 0; i < internals->slave_count; i++) {
1985                 struct rte_eth_dev *slave_ethdev =
1986                                 &(rte_eth_devices[internals->slaves[i].port_id]);
1987                 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1988                         RTE_BOND_LOG(ERR,
1989                                 "bonded port (%d) failed to reconfigure slave device (%d)",
1990                                 eth_dev->data->port_id,
1991                                 internals->slaves[i].port_id);
1992                         goto out_err;
1993                 }
1994                 /* We will need to poll for link status if any slave doesn't
1995                  * support interrupts
1996                  */
1997                 if (internals->slaves[i].link_status_poll_enabled)
1998                         internals->link_status_polling_enabled = 1;
1999         }
2000
2001         /* start polling if needed */
2002         if (internals->link_status_polling_enabled) {
2003                 rte_eal_alarm_set(
2004                         internals->link_status_polling_interval_ms * 1000,
2005                         bond_ethdev_slave_link_status_change_monitor,
2006                         (void *)&rte_eth_devices[internals->port_id]);
2007         }
2008
2009         /* Update all slave devices MACs*/
2010         if (mac_address_slaves_update(eth_dev) != 0)
2011                 goto out_err;
2012
2013         if (internals->user_defined_primary_port)
2014                 bond_ethdev_primary_set(internals, internals->primary_port);
2015
2016         if (internals->mode == BONDING_MODE_8023AD)
2017                 bond_mode_8023ad_start(eth_dev);
2018
2019         if (internals->mode == BONDING_MODE_TLB ||
2020                         internals->mode == BONDING_MODE_ALB)
2021                 bond_tlb_enable(internals);
2022
2023         return 0;
2024
2025 out_err:
2026         eth_dev->data->dev_started = 0;
2027         return -1;
2028 }
2029
2030 static void
2031 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2032 {
2033         uint16_t i;
2034
2035         if (dev->data->rx_queues != NULL) {
2036                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2037                         rte_free(dev->data->rx_queues[i]);
2038                         dev->data->rx_queues[i] = NULL;
2039                 }
2040                 dev->data->nb_rx_queues = 0;
2041         }
2042
2043         if (dev->data->tx_queues != NULL) {
2044                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2045                         rte_free(dev->data->tx_queues[i]);
2046                         dev->data->tx_queues[i] = NULL;
2047                 }
2048                 dev->data->nb_tx_queues = 0;
2049         }
2050 }
2051
2052 void
2053 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2054 {
2055         struct bond_dev_private *internals = eth_dev->data->dev_private;
2056         uint16_t i;
2057
2058         if (internals->mode == BONDING_MODE_8023AD) {
2059                 struct port *port;
2060                 void *pkt = NULL;
2061
2062                 bond_mode_8023ad_stop(eth_dev);
2063
2064                 /* Discard all messages to/from mode 4 state machines */
2065                 for (i = 0; i < internals->active_slave_count; i++) {
2066                         port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2067
2068                         RTE_ASSERT(port->rx_ring != NULL);
2069                         while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2070                                 rte_pktmbuf_free(pkt);
2071
2072                         RTE_ASSERT(port->tx_ring != NULL);
2073                         while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2074                                 rte_pktmbuf_free(pkt);
2075                 }
2076         }
2077
2078         if (internals->mode == BONDING_MODE_TLB ||
2079                         internals->mode == BONDING_MODE_ALB) {
2080                 bond_tlb_disable(internals);
2081                 for (i = 0; i < internals->active_slave_count; i++)
2082                         tlb_last_obytets[internals->active_slaves[i]] = 0;
2083         }
2084
2085         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2086         eth_dev->data->dev_started = 0;
2087
2088         internals->link_status_polling_enabled = 0;
2089         for (i = 0; i < internals->slave_count; i++) {
2090                 uint16_t slave_id = internals->slaves[i].port_id;
2091                 if (find_slave_by_id(internals->active_slaves,
2092                                 internals->active_slave_count, slave_id) !=
2093                                                 internals->active_slave_count) {
2094                         internals->slaves[i].last_link_status = 0;
2095                         rte_eth_dev_stop(slave_id);
2096                         deactivate_slave(eth_dev, slave_id);
2097                 }
2098         }
2099 }
2100
2101 int
2102 bond_ethdev_close(struct rte_eth_dev *dev)
2103 {
2104         struct bond_dev_private *internals = dev->data->dev_private;
2105         uint16_t bond_port_id = internals->port_id;
2106         int skipped = 0;
2107         struct rte_flow_error ferror;
2108
2109         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2110                 return 0;
2111
2112         RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2113         while (internals->slave_count != skipped) {
2114                 uint16_t port_id = internals->slaves[skipped].port_id;
2115
2116                 if (rte_eth_dev_stop(port_id) != 0) {
2117                         RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2118                                      port_id);
2119                         skipped++;
2120                 }
2121
2122                 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2123                         RTE_BOND_LOG(ERR,
2124                                      "Failed to remove port %d from bonded device %s",
2125                                      port_id, dev->device->name);
2126                         skipped++;
2127                 }
2128         }
2129         bond_flow_ops.flush(dev, &ferror);
2130         bond_ethdev_free_queues(dev);
2131         rte_bitmap_reset(internals->vlan_filter_bmp);
2132         rte_bitmap_free(internals->vlan_filter_bmp);
2133         rte_free(internals->vlan_filter_bmpmem);
2134
2135         /* Try to release mempool used in mode6. If the bond
2136          * device is not mode6, free the NULL is not problem.
2137          */
2138         rte_mempool_free(internals->mode6.mempool);
2139
2140         return 0;
2141 }
2142
2143 /* forward declaration */
2144 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2145
2146 static int
2147 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2148 {
2149         struct bond_dev_private *internals = dev->data->dev_private;
2150         struct bond_slave_details slave;
2151         int ret;
2152
2153         uint16_t max_nb_rx_queues = UINT16_MAX;
2154         uint16_t max_nb_tx_queues = UINT16_MAX;
2155         uint16_t max_rx_desc_lim = UINT16_MAX;
2156         uint16_t max_tx_desc_lim = UINT16_MAX;
2157
2158         dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2159
2160         dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2161                         internals->candidate_max_rx_pktlen :
2162                         RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2163
2164         /* Max number of tx/rx queues that the bonded device can support is the
2165          * minimum values of the bonded slaves, as all slaves must be capable
2166          * of supporting the same number of tx/rx queues.
2167          */
2168         if (internals->slave_count > 0) {
2169                 struct rte_eth_dev_info slave_info;
2170                 uint16_t idx;
2171
2172                 for (idx = 0; idx < internals->slave_count; idx++) {
2173                         slave = internals->slaves[idx];
2174                         ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2175                         if (ret != 0) {
2176                                 RTE_BOND_LOG(ERR,
2177                                         "%s: Error during getting device (port %u) info: %s\n",
2178                                         __func__,
2179                                         slave.port_id,
2180                                         strerror(-ret));
2181
2182                                 return ret;
2183                         }
2184
2185                         if (slave_info.max_rx_queues < max_nb_rx_queues)
2186                                 max_nb_rx_queues = slave_info.max_rx_queues;
2187
2188                         if (slave_info.max_tx_queues < max_nb_tx_queues)
2189                                 max_nb_tx_queues = slave_info.max_tx_queues;
2190
2191                         if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2192                                 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2193
2194                         if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2195                                 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2196                 }
2197         }
2198
2199         dev_info->max_rx_queues = max_nb_rx_queues;
2200         dev_info->max_tx_queues = max_nb_tx_queues;
2201
2202         memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2203                sizeof(dev_info->default_rxconf));
2204         memcpy(&dev_info->default_txconf, &internals->default_txconf,
2205                sizeof(dev_info->default_txconf));
2206
2207         dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2208         dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2209
2210         /**
2211          * If dedicated hw queues enabled for link bonding device in LACP mode
2212          * then we need to reduce the maximum number of data path queues by 1.
2213          */
2214         if (internals->mode == BONDING_MODE_8023AD &&
2215                 internals->mode4.dedicated_queues.enabled == 1) {
2216                 dev_info->max_rx_queues--;
2217                 dev_info->max_tx_queues--;
2218         }
2219
2220         dev_info->min_rx_bufsize = 0;
2221
2222         dev_info->rx_offload_capa = internals->rx_offload_capa;
2223         dev_info->tx_offload_capa = internals->tx_offload_capa;
2224         dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2225         dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2226         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2227
2228         dev_info->reta_size = internals->reta_size;
2229
2230         return 0;
2231 }
2232
2233 static int
2234 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2235 {
2236         int res;
2237         uint16_t i;
2238         struct bond_dev_private *internals = dev->data->dev_private;
2239
2240         /* don't do this while a slave is being added */
2241         rte_spinlock_lock(&internals->lock);
2242
2243         if (on)
2244                 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2245         else
2246                 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2247
2248         for (i = 0; i < internals->slave_count; i++) {
2249                 uint16_t port_id = internals->slaves[i].port_id;
2250
2251                 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2252                 if (res == ENOTSUP)
2253                         RTE_BOND_LOG(WARNING,
2254                                      "Setting VLAN filter on slave port %u not supported.",
2255                                      port_id);
2256         }
2257
2258         rte_spinlock_unlock(&internals->lock);
2259         return 0;
2260 }
2261
2262 static int
2263 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2264                 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2265                 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2266 {
2267         struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2268                         rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2269                                         0, dev->data->numa_node);
2270         if (bd_rx_q == NULL)
2271                 return -1;
2272
2273         bd_rx_q->queue_id = rx_queue_id;
2274         bd_rx_q->dev_private = dev->data->dev_private;
2275
2276         bd_rx_q->nb_rx_desc = nb_rx_desc;
2277
2278         memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2279         bd_rx_q->mb_pool = mb_pool;
2280
2281         dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2282
2283         return 0;
2284 }
2285
2286 static int
2287 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2288                 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2289                 const struct rte_eth_txconf *tx_conf)
2290 {
2291         struct bond_tx_queue *bd_tx_q  = (struct bond_tx_queue *)
2292                         rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2293                                         0, dev->data->numa_node);
2294
2295         if (bd_tx_q == NULL)
2296                 return -1;
2297
2298         bd_tx_q->queue_id = tx_queue_id;
2299         bd_tx_q->dev_private = dev->data->dev_private;
2300
2301         bd_tx_q->nb_tx_desc = nb_tx_desc;
2302         memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2303
2304         dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2305
2306         return 0;
2307 }
2308
2309 static void
2310 bond_ethdev_rx_queue_release(void *queue)
2311 {
2312         if (queue == NULL)
2313                 return;
2314
2315         rte_free(queue);
2316 }
2317
2318 static void
2319 bond_ethdev_tx_queue_release(void *queue)
2320 {
2321         if (queue == NULL)
2322                 return;
2323
2324         rte_free(queue);
2325 }
2326
2327 static void
2328 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2329 {
2330         struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2331         struct bond_dev_private *internals;
2332
2333         /* Default value for polling slave found is true as we don't want to
2334          * disable the polling thread if we cannot get the lock */
2335         int i, polling_slave_found = 1;
2336
2337         if (cb_arg == NULL)
2338                 return;
2339
2340         bonded_ethdev = cb_arg;
2341         internals = bonded_ethdev->data->dev_private;
2342
2343         if (!bonded_ethdev->data->dev_started ||
2344                 !internals->link_status_polling_enabled)
2345                 return;
2346
2347         /* If device is currently being configured then don't check slaves link
2348          * status, wait until next period */
2349         if (rte_spinlock_trylock(&internals->lock)) {
2350                 if (internals->slave_count > 0)
2351                         polling_slave_found = 0;
2352
2353                 for (i = 0; i < internals->slave_count; i++) {
2354                         if (!internals->slaves[i].link_status_poll_enabled)
2355                                 continue;
2356
2357                         slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2358                         polling_slave_found = 1;
2359
2360                         /* Update slave link status */
2361                         (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2362                                         internals->slaves[i].link_status_wait_to_complete);
2363
2364                         /* if link status has changed since last checked then call lsc
2365                          * event callback */
2366                         if (slave_ethdev->data->dev_link.link_status !=
2367                                         internals->slaves[i].last_link_status) {
2368                                 internals->slaves[i].last_link_status =
2369                                                 slave_ethdev->data->dev_link.link_status;
2370
2371                                 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2372                                                 RTE_ETH_EVENT_INTR_LSC,
2373                                                 &bonded_ethdev->data->port_id,
2374                                                 NULL);
2375                         }
2376                 }
2377                 rte_spinlock_unlock(&internals->lock);
2378         }
2379
2380         if (polling_slave_found)
2381                 /* Set alarm to continue monitoring link status of slave ethdev's */
2382                 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2383                                 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2384 }
2385
2386 static int
2387 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2388 {
2389         int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2390
2391         struct bond_dev_private *bond_ctx;
2392         struct rte_eth_link slave_link;
2393
2394         bool one_link_update_succeeded;
2395         uint32_t idx;
2396         int ret;
2397
2398         bond_ctx = ethdev->data->dev_private;
2399
2400         ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2401
2402         if (ethdev->data->dev_started == 0 ||
2403                         bond_ctx->active_slave_count == 0) {
2404                 ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
2405                 return 0;
2406         }
2407
2408         ethdev->data->dev_link.link_status = ETH_LINK_UP;
2409
2410         if (wait_to_complete)
2411                 link_update = rte_eth_link_get;
2412         else
2413                 link_update = rte_eth_link_get_nowait;
2414
2415         switch (bond_ctx->mode) {
2416         case BONDING_MODE_BROADCAST:
2417                 /**
2418                  * Setting link speed to UINT32_MAX to ensure we pick up the
2419                  * value of the first active slave
2420                  */
2421                 ethdev->data->dev_link.link_speed = UINT32_MAX;
2422
2423                 /**
2424                  * link speed is minimum value of all the slaves link speed as
2425                  * packet loss will occur on this slave if transmission at rates
2426                  * greater than this are attempted
2427                  */
2428                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2429                         ret = link_update(bond_ctx->active_slaves[idx],
2430                                           &slave_link);
2431                         if (ret < 0) {
2432                                 ethdev->data->dev_link.link_speed =
2433                                         ETH_SPEED_NUM_NONE;
2434                                 RTE_BOND_LOG(ERR,
2435                                         "Slave (port %u) link get failed: %s",
2436                                         bond_ctx->active_slaves[idx],
2437                                         rte_strerror(-ret));
2438                                 return 0;
2439                         }
2440
2441                         if (slave_link.link_speed <
2442                                         ethdev->data->dev_link.link_speed)
2443                                 ethdev->data->dev_link.link_speed =
2444                                                 slave_link.link_speed;
2445                 }
2446                 break;
2447         case BONDING_MODE_ACTIVE_BACKUP:
2448                 /* Current primary slave */
2449                 ret = link_update(bond_ctx->current_primary_port, &slave_link);
2450                 if (ret < 0) {
2451                         RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
2452                                 bond_ctx->current_primary_port,
2453                                 rte_strerror(-ret));
2454                         return 0;
2455                 }
2456
2457                 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2458                 break;
2459         case BONDING_MODE_8023AD:
2460                 ethdev->data->dev_link.link_autoneg =
2461                                 bond_ctx->mode4.slave_link.link_autoneg;
2462                 ethdev->data->dev_link.link_duplex =
2463                                 bond_ctx->mode4.slave_link.link_duplex;
2464                 /* fall through */
2465                 /* to update link speed */
2466         case BONDING_MODE_ROUND_ROBIN:
2467         case BONDING_MODE_BALANCE:
2468         case BONDING_MODE_TLB:
2469         case BONDING_MODE_ALB:
2470         default:
2471                 /**
2472                  * In theses mode the maximum theoretical link speed is the sum
2473                  * of all the slaves
2474                  */
2475                 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2476                 one_link_update_succeeded = false;
2477
2478                 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2479                         ret = link_update(bond_ctx->active_slaves[idx],
2480                                         &slave_link);
2481                         if (ret < 0) {
2482                                 RTE_BOND_LOG(ERR,
2483                                         "Slave (port %u) link get failed: %s",
2484                                         bond_ctx->active_slaves[idx],
2485                                         rte_strerror(-ret));
2486                                 continue;
2487                         }
2488
2489                         one_link_update_succeeded = true;
2490                         ethdev->data->dev_link.link_speed +=
2491                                         slave_link.link_speed;
2492                 }
2493
2494                 if (!one_link_update_succeeded) {
2495                         RTE_BOND_LOG(ERR, "All slaves link get failed");
2496                         return 0;
2497                 }
2498         }
2499
2500
2501         return 0;
2502 }
2503
2504
2505 static int
2506 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2507 {
2508         struct bond_dev_private *internals = dev->data->dev_private;
2509         struct rte_eth_stats slave_stats;
2510         int i, j;
2511
2512         for (i = 0; i < internals->slave_count; i++) {
2513                 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2514
2515                 stats->ipackets += slave_stats.ipackets;
2516                 stats->opackets += slave_stats.opackets;
2517                 stats->ibytes += slave_stats.ibytes;
2518                 stats->obytes += slave_stats.obytes;
2519                 stats->imissed += slave_stats.imissed;
2520                 stats->ierrors += slave_stats.ierrors;
2521                 stats->oerrors += slave_stats.oerrors;
2522                 stats->rx_nombuf += slave_stats.rx_nombuf;
2523
2524                 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2525                         stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2526                         stats->q_opackets[j] += slave_stats.q_opackets[j];
2527                         stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2528                         stats->q_obytes[j] += slave_stats.q_obytes[j];
2529                         stats->q_errors[j] += slave_stats.q_errors[j];
2530                 }
2531
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int
2538 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2539 {
2540         struct bond_dev_private *internals = dev->data->dev_private;
2541         int i;
2542         int err;
2543         int ret;
2544
2545         for (i = 0, err = 0; i < internals->slave_count; i++) {
2546                 ret = rte_eth_stats_reset(internals->slaves[i].port_id);
2547                 if (ret != 0)
2548                         err = ret;
2549         }
2550
2551         return err;
2552 }
2553
2554 static int
2555 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2556 {
2557         struct bond_dev_private *internals = eth_dev->data->dev_private;
2558         int i;
2559         int ret = 0;
2560         uint16_t port_id;
2561
2562         switch (internals->mode) {
2563         /* Promiscuous mode is propagated to all slaves */
2564         case BONDING_MODE_ROUND_ROBIN:
2565         case BONDING_MODE_BALANCE:
2566         case BONDING_MODE_BROADCAST:
2567         case BONDING_MODE_8023AD: {
2568                 unsigned int slave_ok = 0;
2569
2570                 for (i = 0; i < internals->slave_count; i++) {
2571                         port_id = internals->slaves[i].port_id;
2572
2573                         ret = rte_eth_promiscuous_enable(port_id);
2574                         if (ret != 0)
2575                                 RTE_BOND_LOG(ERR,
2576                                         "Failed to enable promiscuous mode for port %u: %s",
2577                                         port_id, rte_strerror(-ret));
2578                         else
2579                                 slave_ok++;
2580                 }
2581                 /*
2582                  * Report success if operation is successful on at least
2583                  * on one slave. Otherwise return last error code.
2584                  */
2585                 if (slave_ok > 0)
2586                         ret = 0;
2587                 break;
2588         }
2589         /* Promiscuous mode is propagated only to primary slave */
2590         case BONDING_MODE_ACTIVE_BACKUP:
2591         case BONDING_MODE_TLB:
2592         case BONDING_MODE_ALB:
2593         default:
2594                 /* Do not touch promisc when there cannot be primary ports */
2595                 if (internals->slave_count == 0)
2596                         break;
2597                 port_id = internals->current_primary_port;
2598                 ret = rte_eth_promiscuous_enable(port_id);
2599                 if (ret != 0)
2600                         RTE_BOND_LOG(ERR,
2601                                 "Failed to enable promiscuous mode for port %u: %s",
2602                                 port_id, rte_strerror(-ret));
2603         }
2604
2605         return ret;
2606 }
2607
2608 static int
2609 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2610 {
2611         struct bond_dev_private *internals = dev->data->dev_private;
2612         int i;
2613         int ret = 0;
2614         uint16_t port_id;
2615
2616         switch (internals->mode) {
2617         /* Promiscuous mode is propagated to all slaves */
2618         case BONDING_MODE_ROUND_ROBIN:
2619         case BONDING_MODE_BALANCE:
2620         case BONDING_MODE_BROADCAST:
2621         case BONDING_MODE_8023AD: {
2622                 unsigned int slave_ok = 0;
2623
2624                 for (i = 0; i < internals->slave_count; i++) {
2625                         port_id = internals->slaves[i].port_id;
2626
2627                         if (internals->mode == BONDING_MODE_8023AD &&
2628                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2629                                         BOND_8023AD_FORCED_PROMISC) {
2630                                 slave_ok++;
2631                                 continue;
2632                         }
2633                         ret = rte_eth_promiscuous_disable(port_id);
2634                         if (ret != 0)
2635                                 RTE_BOND_LOG(ERR,
2636                                         "Failed to disable promiscuous mode for port %u: %s",
2637                                         port_id, rte_strerror(-ret));
2638                         else
2639                                 slave_ok++;
2640                 }
2641                 /*
2642                  * Report success if operation is successful on at least
2643                  * on one slave. Otherwise return last error code.
2644                  */
2645                 if (slave_ok > 0)
2646                         ret = 0;
2647                 break;
2648         }
2649         /* Promiscuous mode is propagated only to primary slave */
2650         case BONDING_MODE_ACTIVE_BACKUP:
2651         case BONDING_MODE_TLB:
2652         case BONDING_MODE_ALB:
2653         default:
2654                 /* Do not touch promisc when there cannot be primary ports */
2655                 if (internals->slave_count == 0)
2656                         break;
2657                 port_id = internals->current_primary_port;
2658                 ret = rte_eth_promiscuous_disable(port_id);
2659                 if (ret != 0)
2660                         RTE_BOND_LOG(ERR,
2661                                 "Failed to disable promiscuous mode for port %u: %s",
2662                                 port_id, rte_strerror(-ret));
2663         }
2664
2665         return ret;
2666 }
2667
2668 static int
2669 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2670 {
2671         struct bond_dev_private *internals = eth_dev->data->dev_private;
2672         int i;
2673         int ret = 0;
2674         uint16_t port_id;
2675
2676         switch (internals->mode) {
2677         /* allmulti mode is propagated to all slaves */
2678         case BONDING_MODE_ROUND_ROBIN:
2679         case BONDING_MODE_BALANCE:
2680         case BONDING_MODE_BROADCAST:
2681         case BONDING_MODE_8023AD: {
2682                 unsigned int slave_ok = 0;
2683
2684                 for (i = 0; i < internals->slave_count; i++) {
2685                         port_id = internals->slaves[i].port_id;
2686
2687                         ret = rte_eth_allmulticast_enable(port_id);
2688                         if (ret != 0)
2689                                 RTE_BOND_LOG(ERR,
2690                                         "Failed to enable allmulti mode for port %u: %s",
2691                                         port_id, rte_strerror(-ret));
2692                         else
2693                                 slave_ok++;
2694                 }
2695                 /*
2696                  * Report success if operation is successful on at least
2697                  * on one slave. Otherwise return last error code.
2698                  */
2699                 if (slave_ok > 0)
2700                         ret = 0;
2701                 break;
2702         }
2703         /* allmulti mode is propagated only to primary slave */
2704         case BONDING_MODE_ACTIVE_BACKUP:
2705         case BONDING_MODE_TLB:
2706         case BONDING_MODE_ALB:
2707         default:
2708                 /* Do not touch allmulti when there cannot be primary ports */
2709                 if (internals->slave_count == 0)
2710                         break;
2711                 port_id = internals->current_primary_port;
2712                 ret = rte_eth_allmulticast_enable(port_id);
2713                 if (ret != 0)
2714                         RTE_BOND_LOG(ERR,
2715                                 "Failed to enable allmulti mode for port %u: %s",
2716                                 port_id, rte_strerror(-ret));
2717         }
2718
2719         return ret;
2720 }
2721
2722 static int
2723 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2724 {
2725         struct bond_dev_private *internals = eth_dev->data->dev_private;
2726         int i;
2727         int ret = 0;
2728         uint16_t port_id;
2729
2730         switch (internals->mode) {
2731         /* allmulti mode is propagated to all slaves */
2732         case BONDING_MODE_ROUND_ROBIN:
2733         case BONDING_MODE_BALANCE:
2734         case BONDING_MODE_BROADCAST:
2735         case BONDING_MODE_8023AD: {
2736                 unsigned int slave_ok = 0;
2737
2738                 for (i = 0; i < internals->slave_count; i++) {
2739                         uint16_t port_id = internals->slaves[i].port_id;
2740
2741                         if (internals->mode == BONDING_MODE_8023AD &&
2742                             bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2743                                         BOND_8023AD_FORCED_ALLMULTI)
2744                                 continue;
2745
2746                         ret = rte_eth_allmulticast_disable(port_id);
2747                         if (ret != 0)
2748                                 RTE_BOND_LOG(ERR,
2749                                         "Failed to disable allmulti mode for port %u: %s",
2750                                         port_id, rte_strerror(-ret));
2751                         else
2752                                 slave_ok++;
2753                 }
2754                 /*
2755                  * Report success if operation is successful on at least
2756                  * on one slave. Otherwise return last error code.
2757                  */
2758                 if (slave_ok > 0)
2759                         ret = 0;
2760                 break;
2761         }
2762         /* allmulti mode is propagated only to primary slave */
2763         case BONDING_MODE_ACTIVE_BACKUP:
2764         case BONDING_MODE_TLB:
2765         case BONDING_MODE_ALB:
2766         default:
2767                 /* Do not touch allmulti when there cannot be primary ports */
2768                 if (internals->slave_count == 0)
2769                         break;
2770                 port_id = internals->current_primary_port;
2771                 ret = rte_eth_allmulticast_disable(port_id);
2772                 if (ret != 0)
2773                         RTE_BOND_LOG(ERR,
2774                                 "Failed to disable allmulti mode for port %u: %s",
2775                                 port_id, rte_strerror(-ret));
2776         }
2777
2778         return ret;
2779 }
2780
2781 static void
2782 bond_ethdev_delayed_lsc_propagation(void *arg)
2783 {
2784         if (arg == NULL)
2785                 return;
2786
2787         rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2788                         RTE_ETH_EVENT_INTR_LSC, NULL);
2789 }
2790
2791 int
2792 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2793                 void *param, void *ret_param __rte_unused)
2794 {
2795         struct rte_eth_dev *bonded_eth_dev;
2796         struct bond_dev_private *internals;
2797         struct rte_eth_link link;
2798         int rc = -1;
2799         int ret;
2800
2801         uint8_t lsc_flag = 0;
2802         int valid_slave = 0;
2803         uint16_t active_pos;
2804         uint16_t i;
2805
2806         if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2807                 return rc;
2808
2809         bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2810
2811         if (check_for_bonded_ethdev(bonded_eth_dev))
2812                 return rc;
2813
2814         internals = bonded_eth_dev->data->dev_private;
2815
2816         /* If the device isn't started don't handle interrupts */
2817         if (!bonded_eth_dev->data->dev_started)
2818                 return rc;
2819
2820         /* verify that port_id is a valid slave of bonded port */
2821         for (i = 0; i < internals->slave_count; i++) {
2822                 if (internals->slaves[i].port_id == port_id) {
2823                         valid_slave = 1;
2824                         break;
2825                 }
2826         }
2827
2828         if (!valid_slave)
2829                 return rc;
2830
2831         /* Synchronize lsc callback parallel calls either by real link event
2832          * from the slaves PMDs or by the bonding PMD itself.
2833          */
2834         rte_spinlock_lock(&internals->lsc_lock);
2835
2836         /* Search for port in active port list */
2837         active_pos = find_slave_by_id(internals->active_slaves,
2838                         internals->active_slave_count, port_id);
2839
2840         ret = rte_eth_link_get_nowait(port_id, &link);
2841         if (ret < 0)
2842                 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
2843
2844         if (ret == 0 && link.link_status) {
2845                 if (active_pos < internals->active_slave_count)
2846                         goto link_update;
2847
2848                 /* check link state properties if bonded link is up*/
2849                 if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
2850                         if (link_properties_valid(bonded_eth_dev, &link) != 0)
2851                                 RTE_BOND_LOG(ERR, "Invalid link properties "
2852                                              "for slave %d in bonding mode %d",
2853                                              port_id, internals->mode);
2854                 } else {
2855                         /* inherit slave link properties */
2856                         link_properties_set(bonded_eth_dev, &link);
2857                 }
2858
2859                 /* If no active slave ports then set this port to be
2860                  * the primary port.
2861                  */
2862                 if (internals->active_slave_count < 1) {
2863                         /* If first active slave, then change link status */
2864                         bonded_eth_dev->data->dev_link.link_status =
2865                                                                 ETH_LINK_UP;
2866                         internals->current_primary_port = port_id;
2867                         lsc_flag = 1;
2868
2869                         mac_address_slaves_update(bonded_eth_dev);
2870                 }
2871
2872                 activate_slave(bonded_eth_dev, port_id);
2873
2874                 /* If the user has defined the primary port then default to
2875                  * using it.
2876                  */
2877                 if (internals->user_defined_primary_port &&
2878                                 internals->primary_port == port_id)
2879                         bond_ethdev_primary_set(internals, port_id);
2880         } else {
2881                 if (active_pos == internals->active_slave_count)
2882                         goto link_update;
2883
2884                 /* Remove from active slave list */
2885                 deactivate_slave(bonded_eth_dev, port_id);
2886
2887                 if (internals->active_slave_count < 1)
2888                         lsc_flag = 1;
2889
2890                 /* Update primary id, take first active slave from list or if none
2891                  * available set to -1 */
2892                 if (port_id == internals->current_primary_port) {
2893                         if (internals->active_slave_count > 0)
2894                                 bond_ethdev_primary_set(internals,
2895                                                 internals->active_slaves[0]);
2896                         else
2897                                 internals->current_primary_port = internals->primary_port;
2898                         mac_address_slaves_update(bonded_eth_dev);
2899                 }
2900         }
2901
2902 link_update:
2903         /**
2904          * Update bonded device link properties after any change to active
2905          * slaves
2906          */
2907         bond_ethdev_link_update(bonded_eth_dev, 0);
2908
2909         if (lsc_flag) {
2910                 /* Cancel any possible outstanding interrupts if delays are enabled */
2911                 if (internals->link_up_delay_ms > 0 ||
2912                         internals->link_down_delay_ms > 0)
2913                         rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2914                                         bonded_eth_dev);
2915
2916                 if (bonded_eth_dev->data->dev_link.link_status) {
2917                         if (internals->link_up_delay_ms > 0)
2918                                 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2919                                                 bond_ethdev_delayed_lsc_propagation,
2920                                                 (void *)bonded_eth_dev);
2921                         else
2922                                 rte_eth_dev_callback_process(bonded_eth_dev,
2923                                                 RTE_ETH_EVENT_INTR_LSC,
2924                                                 NULL);
2925
2926                 } else {
2927                         if (internals->link_down_delay_ms > 0)
2928                                 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2929                                                 bond_ethdev_delayed_lsc_propagation,
2930                                                 (void *)bonded_eth_dev);
2931                         else
2932                                 rte_eth_dev_callback_process(bonded_eth_dev,
2933                                                 RTE_ETH_EVENT_INTR_LSC,
2934                                                 NULL);
2935                 }
2936         }
2937
2938         rte_spinlock_unlock(&internals->lsc_lock);
2939
2940         return rc;
2941 }
2942
2943 static int
2944 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2945                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2946 {
2947         unsigned i, j;
2948         int result = 0;
2949         int slave_reta_size;
2950         unsigned reta_count;
2951         struct bond_dev_private *internals = dev->data->dev_private;
2952
2953         if (reta_size != internals->reta_size)
2954                 return -EINVAL;
2955
2956          /* Copy RETA table */
2957         reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
2958                         RTE_RETA_GROUP_SIZE;
2959
2960         for (i = 0; i < reta_count; i++) {
2961                 internals->reta_conf[i].mask = reta_conf[i].mask;
2962                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2963                         if ((reta_conf[i].mask >> j) & 0x01)
2964                                 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2965         }
2966
2967         /* Fill rest of array */
2968         for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2969                 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2970                                 sizeof(internals->reta_conf[0]) * reta_count);
2971
2972         /* Propagate RETA over slaves */
2973         for (i = 0; i < internals->slave_count; i++) {
2974                 slave_reta_size = internals->slaves[i].reta_size;
2975                 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2976                                 &internals->reta_conf[0], slave_reta_size);
2977                 if (result < 0)
2978                         return result;
2979         }
2980
2981         return 0;
2982 }
2983
2984 static int
2985 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2986                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2987 {
2988         int i, j;
2989         struct bond_dev_private *internals = dev->data->dev_private;
2990
2991         if (reta_size != internals->reta_size)
2992                 return -EINVAL;
2993
2994          /* Copy RETA table */
2995         for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2996                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2997                         if ((reta_conf[i].mask >> j) & 0x01)
2998                                 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2999
3000         return 0;
3001 }
3002
3003 static int
3004 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
3005                 struct rte_eth_rss_conf *rss_conf)
3006 {
3007         int i, result = 0;
3008         struct bond_dev_private *internals = dev->data->dev_private;
3009         struct rte_eth_rss_conf bond_rss_conf;
3010
3011         memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
3012
3013         bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
3014
3015         if (bond_rss_conf.rss_hf != 0)
3016                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
3017
3018         if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
3019                         sizeof(internals->rss_key)) {
3020                 if (bond_rss_conf.rss_key_len == 0)
3021                         bond_rss_conf.rss_key_len = 40;
3022                 internals->rss_key_len = bond_rss_conf.rss_key_len;
3023                 memcpy(internals->rss_key, bond_rss_conf.rss_key,
3024                                 internals->rss_key_len);
3025         }
3026
3027         for (i = 0; i < internals->slave_count; i++) {
3028                 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
3029                                 &bond_rss_conf);
3030                 if (result < 0)
3031                         return result;
3032         }
3033
3034         return 0;
3035 }
3036
3037 static int
3038 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3039                 struct rte_eth_rss_conf *rss_conf)
3040 {
3041         struct bond_dev_private *internals = dev->data->dev_private;
3042
3043         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3044         rss_conf->rss_key_len = internals->rss_key_len;
3045         if (rss_conf->rss_key)
3046                 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3047
3048         return 0;
3049 }
3050
3051 static int
3052 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3053 {
3054         struct rte_eth_dev *slave_eth_dev;
3055         struct bond_dev_private *internals = dev->data->dev_private;
3056         int ret, i;
3057
3058         rte_spinlock_lock(&internals->lock);
3059
3060         for (i = 0; i < internals->slave_count; i++) {
3061                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3062                 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
3063                         rte_spinlock_unlock(&internals->lock);
3064                         return -ENOTSUP;
3065                 }
3066         }
3067         for (i = 0; i < internals->slave_count; i++) {
3068                 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
3069                 if (ret < 0) {
3070                         rte_spinlock_unlock(&internals->lock);
3071                         return ret;
3072                 }
3073         }
3074
3075         rte_spinlock_unlock(&internals->lock);
3076         return 0;
3077 }
3078
3079 static int
3080 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
3081                         struct rte_ether_addr *addr)
3082 {
3083         if (mac_address_set(dev, addr)) {
3084                 RTE_BOND_LOG(ERR, "Failed to update MAC address");
3085                 return -EINVAL;
3086         }
3087
3088         return 0;
3089 }
3090
3091 static int
3092 bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
3093                  enum rte_filter_type type, enum rte_filter_op op, void *arg)
3094 {
3095         if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
3096                 *(const void **)arg = &bond_flow_ops;
3097                 return 0;
3098         }
3099         return -ENOTSUP;
3100 }
3101
3102 static int
3103 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
3104                         struct rte_ether_addr *mac_addr,
3105                         __rte_unused uint32_t index, uint32_t vmdq)
3106 {
3107         struct rte_eth_dev *slave_eth_dev;
3108         struct bond_dev_private *internals = dev->data->dev_private;
3109         int ret, i;
3110
3111         rte_spinlock_lock(&internals->lock);
3112
3113         for (i = 0; i < internals->slave_count; i++) {
3114                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3115                 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
3116                          *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
3117                         ret = -ENOTSUP;
3118                         goto end;
3119                 }
3120         }
3121
3122         for (i = 0; i < internals->slave_count; i++) {
3123                 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
3124                                 mac_addr, vmdq);
3125                 if (ret < 0) {
3126                         /* rollback */
3127                         for (i--; i >= 0; i--)
3128                                 rte_eth_dev_mac_addr_remove(
3129                                         internals->slaves[i].port_id, mac_addr);
3130                         goto end;
3131                 }
3132         }
3133
3134         ret = 0;
3135 end:
3136         rte_spinlock_unlock(&internals->lock);
3137         return ret;
3138 }
3139
3140 static void
3141 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3142 {
3143         struct rte_eth_dev *slave_eth_dev;
3144         struct bond_dev_private *internals = dev->data->dev_private;
3145         int i;
3146
3147         rte_spinlock_lock(&internals->lock);
3148
3149         for (i = 0; i < internals->slave_count; i++) {
3150                 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3151                 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3152                         goto end;
3153         }
3154
3155         struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3156
3157         for (i = 0; i < internals->slave_count; i++)
3158                 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3159                                 mac_addr);
3160
3161 end:
3162         rte_spinlock_unlock(&internals->lock);
3163 }
3164
3165 const struct eth_dev_ops default_dev_ops = {
3166         .dev_start            = bond_ethdev_start,
3167         .dev_stop             = bond_ethdev_stop,
3168         .dev_close            = bond_ethdev_close,
3169         .dev_configure        = bond_ethdev_configure,
3170         .dev_infos_get        = bond_ethdev_info,
3171         .vlan_filter_set      = bond_ethdev_vlan_filter_set,
3172         .rx_queue_setup       = bond_ethdev_rx_queue_setup,
3173         .tx_queue_setup       = bond_ethdev_tx_queue_setup,
3174         .rx_queue_release     = bond_ethdev_rx_queue_release,
3175         .tx_queue_release     = bond_ethdev_tx_queue_release,
3176         .link_update          = bond_ethdev_link_update,
3177         .stats_get            = bond_ethdev_stats_get,
3178         .stats_reset          = bond_ethdev_stats_reset,
3179         .promiscuous_enable   = bond_ethdev_promiscuous_enable,
3180         .promiscuous_disable  = bond_ethdev_promiscuous_disable,
3181         .allmulticast_enable  = bond_ethdev_allmulticast_enable,
3182         .allmulticast_disable = bond_ethdev_allmulticast_disable,
3183         .reta_update          = bond_ethdev_rss_reta_update,
3184         .reta_query           = bond_ethdev_rss_reta_query,
3185         .rss_hash_update      = bond_ethdev_rss_hash_update,
3186         .rss_hash_conf_get    = bond_ethdev_rss_hash_conf_get,
3187         .mtu_set              = bond_ethdev_mtu_set,
3188         .mac_addr_set         = bond_ethdev_mac_address_set,
3189         .mac_addr_add         = bond_ethdev_mac_addr_add,
3190         .mac_addr_remove      = bond_ethdev_mac_addr_remove,
3191         .filter_ctrl          = bond_filter_ctrl
3192 };
3193
3194 static int
3195 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3196 {
3197         const char *name = rte_vdev_device_name(dev);
3198         uint8_t socket_id = dev->device.numa_node;
3199         struct bond_dev_private *internals = NULL;
3200         struct rte_eth_dev *eth_dev = NULL;
3201         uint32_t vlan_filter_bmp_size;
3202
3203         /* now do all data allocation - for eth_dev structure, dummy pci driver
3204          * and internal (private) data
3205          */
3206
3207         /* reserve an ethdev entry */
3208         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3209         if (eth_dev == NULL) {
3210                 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3211                 goto err;
3212         }
3213
3214         internals = eth_dev->data->dev_private;
3215         eth_dev->data->nb_rx_queues = (uint16_t)1;
3216         eth_dev->data->nb_tx_queues = (uint16_t)1;
3217
3218         /* Allocate memory for storing MAC addresses */
3219         eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3220                         BOND_MAX_MAC_ADDRS, 0, socket_id);
3221         if (eth_dev->data->mac_addrs == NULL) {
3222                 RTE_BOND_LOG(ERR,
3223                              "Failed to allocate %u bytes needed to store MAC addresses",
3224                              RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3225                 goto err;
3226         }
3227
3228         eth_dev->dev_ops = &default_dev_ops;
3229         eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
3230
3231         rte_spinlock_init(&internals->lock);
3232         rte_spinlock_init(&internals->lsc_lock);
3233
3234         internals->port_id = eth_dev->data->port_id;
3235         internals->mode = BONDING_MODE_INVALID;
3236         internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3237         internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3238         internals->burst_xmit_hash = burst_xmit_l2_hash;
3239         internals->user_defined_mac = 0;
3240
3241         internals->link_status_polling_enabled = 0;
3242
3243         internals->link_status_polling_interval_ms =
3244                 DEFAULT_POLLING_INTERVAL_10_MS;
3245         internals->link_down_delay_ms = 0;
3246         internals->link_up_delay_ms = 0;
3247
3248         internals->slave_count = 0;
3249         internals->active_slave_count = 0;
3250         internals->rx_offload_capa = 0;
3251         internals->tx_offload_capa = 0;
3252         internals->rx_queue_offload_capa = 0;
3253         internals->tx_queue_offload_capa = 0;
3254         internals->candidate_max_rx_pktlen = 0;
3255         internals->max_rx_pktlen = 0;
3256
3257         /* Initially allow to choose any offload type */
3258         internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
3259
3260         memset(&internals->default_rxconf, 0,
3261                sizeof(internals->default_rxconf));
3262         memset(&internals->default_txconf, 0,
3263                sizeof(internals->default_txconf));
3264
3265         memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3266         memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3267
3268         memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3269         memset(internals->slaves, 0, sizeof(internals->slaves));
3270
3271         TAILQ_INIT(&internals->flow_list);
3272         internals->flow_isolated_valid = 0;
3273
3274         /* Set mode 4 default configuration */
3275         bond_mode_8023ad_setup(eth_dev, NULL);
3276         if (bond_ethdev_mode_set(eth_dev, mode)) {
3277                 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
3278                                  eth_dev->data->port_id, mode);
3279                 goto err;
3280         }
3281
3282         vlan_filter_bmp_size =
3283                 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3284         internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3285                                                    RTE_CACHE_LINE_SIZE);
3286         if (internals->vlan_filter_bmpmem == NULL) {
3287                 RTE_BOND_LOG(ERR,
3288                              "Failed to allocate vlan bitmap for bonded device %u",
3289                              eth_dev->data->port_id);
3290                 goto err;
3291         }
3292
3293         internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3294                         internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3295         if (internals->vlan_filter_bmp == NULL) {
3296                 RTE_BOND_LOG(ERR,
3297                              "Failed to init vlan bitmap for bonded device %u",
3298                              eth_dev->data->port_id);
3299                 rte_free(internals->vlan_filter_bmpmem);
3300                 goto err;
3301         }
3302
3303         return eth_dev->data->port_id;
3304
3305 err:
3306         rte_free(internals);
3307         if (eth_dev != NULL)
3308                 eth_dev->data->dev_private = NULL;
3309         rte_eth_dev_release_port(eth_dev);
3310         return -1;
3311 }
3312
3313 static int
3314 bond_probe(struct rte_vdev_device *dev)
3315 {
3316         const char *name;
3317         struct bond_dev_private *internals;
3318         struct rte_kvargs *kvlist;
3319         uint8_t bonding_mode, socket_id/*, agg_mode*/;
3320         int  arg_count, port_id;
3321         uint8_t agg_mode;
3322         struct rte_eth_dev *eth_dev;
3323
3324         if (!dev)
3325                 return -EINVAL;
3326
3327         name = rte_vdev_device_name(dev);
3328         RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3329
3330         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3331                 eth_dev = rte_eth_dev_attach_secondary(name);
3332                 if (!eth_dev) {
3333                         RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3334                         return -1;
3335                 }
3336                 /* TODO: request info from primary to set up Rx and Tx */
3337                 eth_dev->dev_ops = &default_dev_ops;
3338                 eth_dev->device = &dev->device;
3339                 rte_eth_dev_probing_finish(eth_dev);
3340                 return 0;
3341         }
3342
3343         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3344                 pmd_bond_init_valid_arguments);
3345         if (kvlist == NULL)
3346                 return -1;
3347
3348         /* Parse link bonding mode */
3349         if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3350                 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3351                                 &bond_ethdev_parse_slave_mode_kvarg,
3352                                 &bonding_mode) != 0) {
3353                         RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3354                                         name);
3355                         goto parse_error;
3356                 }
3357         } else {
3358                 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3359                                 "device %s", name);
3360                 goto parse_error;
3361         }
3362
3363         /* Parse socket id to create bonding device on */
3364         arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3365         if (arg_count == 1) {
3366                 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3367                                 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3368                                 != 0) {
3369                         RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3370                                         "bonded device %s", name);
3371                         goto parse_error;
3372                 }
3373         } else if (arg_count > 1) {
3374                 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3375                                 "bonded device %s", name);
3376                 goto parse_error;
3377         } else {
3378                 socket_id = rte_socket_id();
3379         }
3380
3381         dev->device.numa_node = socket_id;
3382
3383         /* Create link bonding eth device */
3384         port_id = bond_alloc(dev, bonding_mode);
3385         if (port_id < 0) {
3386                 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3387                                 "socket %u.",   name, bonding_mode, socket_id);
3388                 goto parse_error;
3389         }
3390         internals = rte_eth_devices[port_id].data->dev_private;
3391         internals->kvlist = kvlist;
3392
3393         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3394                 if (rte_kvargs_process(kvlist,
3395                                 PMD_BOND_AGG_MODE_KVARG,
3396                                 &bond_ethdev_parse_slave_agg_mode_kvarg,
3397                                 &agg_mode) != 0) {
3398                         RTE_BOND_LOG(ERR,
3399                                         "Failed to parse agg selection mode for bonded device %s",
3400                                         name);
3401                         goto parse_error;
3402                 }
3403
3404                 if (internals->mode == BONDING_MODE_8023AD)
3405                         internals->mode4.agg_selection = agg_mode;
3406         } else {
3407                 internals->mode4.agg_selection = AGG_STABLE;
3408         }
3409
3410         rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3411         RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3412                         "socket %u.",   name, port_id, bonding_mode, socket_id);
3413         return 0;
3414
3415 parse_error:
3416         rte_kvargs_free(kvlist);
3417
3418         return -1;
3419 }
3420
3421 static int
3422 bond_remove(struct rte_vdev_device *dev)
3423 {
3424         struct rte_eth_dev *eth_dev;
3425         struct bond_dev_private *internals;
3426         const char *name;
3427
3428         if (!dev)
3429                 return -EINVAL;
3430
3431         name = rte_vdev_device_name(dev);
3432         RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3433
3434         /* find an ethdev entry */
3435         eth_dev = rte_eth_dev_allocated(name);
3436         if (eth_dev == NULL)
3437                 return 0; /* port already released */
3438
3439         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3440                 return rte_eth_dev_release_port(eth_dev);
3441
3442         RTE_ASSERT(eth_dev->device == &dev->device);
3443
3444         internals = eth_dev->data->dev_private;
3445         if (internals->slave_count != 0)
3446                 return -EBUSY;
3447
3448         if (eth_dev->data->dev_started == 1) {
3449                 bond_ethdev_stop(eth_dev);
3450                 bond_ethdev_close(eth_dev);
3451         }
3452         rte_eth_dev_release_port(eth_dev);
3453
3454         return 0;
3455 }
3456
3457 /* this part will resolve the slave portids after all the other pdev and vdev
3458  * have been allocated */
3459 static int
3460 bond_ethdev_configure(struct rte_eth_dev *dev)
3461 {
3462         const char *name = dev->device->name;
3463         struct bond_dev_private *internals = dev->data->dev_private;
3464         struct rte_kvargs *kvlist = internals->kvlist;
3465         int arg_count;
3466         uint16_t port_id = dev - rte_eth_devices;
3467         uint8_t agg_mode;
3468
3469         static const uint8_t default_rss_key[40] = {
3470                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3471                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3472                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3473                 0xBE, 0xAC, 0x01, 0xFA
3474         };
3475
3476         unsigned i, j;
3477
3478         /*
3479          * If RSS is enabled, fill table with default values and
3480          * set key to the the value specified in port RSS configuration.
3481          * Fall back to default RSS key if the key is not specified
3482          */
3483         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
3484                 if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
3485                         internals->rss_key_len =
3486                                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
3487                         memcpy(internals->rss_key,
3488                                dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
3489                                internals->rss_key_len);
3490                 } else {
3491                         internals->rss_key_len = sizeof(default_rss_key);
3492                         memcpy(internals->rss_key, default_rss_key,
3493                                internals->rss_key_len);
3494                 }
3495
3496                 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3497                         internals->reta_conf[i].mask = ~0LL;
3498                         for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3499                                 internals->reta_conf[i].reta[j] =
3500                                                 (i * RTE_RETA_GROUP_SIZE + j) %
3501                                                 dev->data->nb_rx_queues;
3502                 }
3503         }
3504
3505         /* set the max_rx_pktlen */
3506         internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3507
3508         /*
3509          * if no kvlist, it means that this bonded device has been created
3510          * through the bonding api.
3511          */
3512         if (!kvlist)
3513                 return 0;
3514
3515         /* Parse MAC address for bonded device */
3516         arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3517         if (arg_count == 1) {
3518                 struct rte_ether_addr bond_mac;
3519
3520                 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3521                                        &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3522                         RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3523                                      name);
3524                         return -1;
3525                 }
3526
3527                 /* Set MAC address */
3528                 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3529                         RTE_BOND_LOG(ERR,
3530                                      "Failed to set mac address on bonded device %s",
3531                                      name);
3532                         return -1;
3533                 }
3534         } else if (arg_count > 1) {
3535                 RTE_BOND_LOG(ERR,
3536                              "MAC address can be specified only once for bonded device %s",
3537                              name);
3538                 return -1;
3539         }
3540
3541         /* Parse/set balance mode transmit policy */
3542         arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3543         if (arg_count == 1) {
3544                 uint8_t xmit_policy;
3545
3546                 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3547                                        &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3548                     0) {
3549                         RTE_BOND_LOG(INFO,
3550                                      "Invalid xmit policy specified for bonded device %s",
3551                                      name);
3552                         return -1;
3553                 }
3554
3555                 /* Set balance mode transmit policy*/
3556                 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3557                         RTE_BOND_LOG(ERR,
3558                                      "Failed to set balance xmit policy on bonded device %s",
3559                                      name);
3560                         return -1;
3561                 }
3562         } else if (arg_count > 1) {
3563                 RTE_BOND_LOG(ERR,
3564                              "Transmit policy can be specified only once for bonded device %s",
3565                              name);
3566                 return -1;
3567         }
3568
3569         if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3570                 if (rte_kvargs_process(kvlist,
3571                                        PMD_BOND_AGG_MODE_KVARG,
3572                                        &bond_ethdev_parse_slave_agg_mode_kvarg,
3573                                        &agg_mode) != 0) {
3574                         RTE_BOND_LOG(ERR,
3575                                      "Failed to parse agg selection mode for bonded device %s",
3576                                      name);
3577                 }
3578                 if (internals->mode == BONDING_MODE_8023AD) {
3579                         int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3580                                         agg_mode);
3581                         if (ret < 0) {
3582                                 RTE_BOND_LOG(ERR,
3583                                         "Invalid args for agg selection set for bonded device %s",
3584                                         name);
3585                                 return -1;
3586                         }
3587                 }
3588         }
3589
3590         /* Parse/add slave ports to bonded device */
3591         if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3592                 struct bond_ethdev_slave_ports slave_ports;
3593                 unsigned i;
3594
3595                 memset(&slave_ports, 0, sizeof(slave_ports));
3596
3597                 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3598                                        &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3599                         RTE_BOND_LOG(ERR,
3600                                      "Failed to parse slave ports for bonded device %s",
3601                                      name);
3602                         return -1;
3603                 }
3604
3605                 for (i = 0; i < slave_ports.slave_count; i++) {
3606                         if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3607                                 RTE_BOND_LOG(ERR,
3608                                              "Failed to add port %d as slave to bonded device %s",
3609                                              slave_ports.slaves[i], name);
3610                         }
3611                 }
3612
3613         } else {
3614                 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3615                 return -1;
3616         }
3617
3618         /* Parse/set primary slave port id*/
3619         arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3620         if (arg_count == 1) {
3621                 uint16_t primary_slave_port_id;
3622
3623                 if (rte_kvargs_process(kvlist,
3624                                        PMD_BOND_PRIMARY_SLAVE_KVARG,
3625                                        &bond_ethdev_parse_primary_slave_port_id_kvarg,
3626                                        &primary_slave_port_id) < 0) {
3627                         RTE_BOND_LOG(INFO,
3628                                      "Invalid primary slave port id specified for bonded device %s",
3629                                      name);
3630                         return -1;
3631                 }
3632
3633                 /* Set balance mode transmit policy*/
3634                 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3635                     != 0) {
3636                         RTE_BOND_LOG(ERR,
3637                                      "Failed to set primary slave port %d on bonded device %s",
3638                                      primary_slave_port_id, name);
3639                         return -1;
3640                 }
3641         } else if (arg_count > 1) {
3642                 RTE_BOND_LOG(INFO,
3643                              "Primary slave can be specified only once for bonded device %s",
3644                              name);
3645                 return -1;
3646         }
3647
3648         /* Parse link status monitor polling interval */
3649         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3650         if (arg_count == 1) {
3651                 uint32_t lsc_poll_interval_ms;
3652
3653                 if (rte_kvargs_process(kvlist,
3654                                        PMD_BOND_LSC_POLL_PERIOD_KVARG,
3655                                        &bond_ethdev_parse_time_ms_kvarg,
3656                                        &lsc_poll_interval_ms) < 0) {
3657                         RTE_BOND_LOG(INFO,
3658                                      "Invalid lsc polling interval value specified for bonded"
3659                                      " device %s", name);
3660                         return -1;
3661                 }
3662
3663                 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3664                     != 0) {
3665                         RTE_BOND_LOG(ERR,
3666                                      "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3667                                      lsc_poll_interval_ms, name);
3668                         return -1;
3669                 }
3670         } else if (arg_count > 1) {
3671                 RTE_BOND_LOG(INFO,
3672                              "LSC polling interval can be specified only once for bonded"
3673                              " device %s", name);
3674                 return -1;
3675         }
3676
3677         /* Parse link up interrupt propagation delay */
3678         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3679         if (arg_count == 1) {
3680                 uint32_t link_up_delay_ms;
3681
3682                 if (rte_kvargs_process(kvlist,
3683                                        PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3684                                        &bond_ethdev_parse_time_ms_kvarg,
3685                                        &link_up_delay_ms) < 0) {
3686                         RTE_BOND_LOG(INFO,
3687                                      "Invalid link up propagation delay value specified for"
3688                                      " bonded device %s", name);
3689                         return -1;
3690                 }
3691
3692                 /* Set balance mode transmit policy*/
3693                 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3694                     != 0) {
3695                         RTE_BOND_LOG(ERR,
3696                                      "Failed to set link up propagation delay (%u ms) on bonded"
3697                                      " device %s", link_up_delay_ms, name);
3698                         return -1;
3699                 }
3700         } else if (arg_count > 1) {
3701                 RTE_BOND_LOG(INFO,
3702                              "Link up propagation delay can be specified only once for"
3703                              " bonded device %s", name);
3704                 return -1;
3705         }
3706
3707         /* Parse link down interrupt propagation delay */
3708         arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3709         if (arg_count == 1) {
3710                 uint32_t link_down_delay_ms;
3711
3712                 if (rte_kvargs_process(kvlist,
3713                                        PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3714                                        &bond_ethdev_parse_time_ms_kvarg,
3715                                        &link_down_delay_ms) < 0) {
3716                         RTE_BOND_LOG(INFO,
3717                                      "Invalid link down propagation delay value specified for"
3718                                      " bonded device %s", name);
3719                         return -1;
3720                 }
3721
3722                 /* Set balance mode transmit policy*/
3723                 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3724                     != 0) {
3725                         RTE_BOND_LOG(ERR,
3726                                      "Failed to set link down propagation delay (%u ms) on bonded device %s",
3727                                      link_down_delay_ms, name);
3728                         return -1;
3729                 }
3730         } else if (arg_count > 1) {
3731                 RTE_BOND_LOG(INFO,
3732                              "Link down propagation delay can be specified only once for  bonded device %s",
3733                              name);
3734                 return -1;
3735         }
3736
3737         return 0;
3738 }
3739
3740 struct rte_vdev_driver pmd_bond_drv = {
3741         .probe = bond_probe,
3742         .remove = bond_remove,
3743 };
3744
3745 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3746 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3747
3748 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3749         "slave=<ifc> "
3750         "primary=<ifc> "
3751         "mode=[0-6] "
3752         "xmit_policy=[l2 | l23 | l34] "
3753         "agg_mode=[count | stable | bandwidth] "
3754         "socket_id=<int> "
3755         "mac=<mac addr> "
3756         "lsc_poll_period_ms=<int> "
3757         "up_delay=<int> "
3758         "down_delay=<int>");
3759
3760 RTE_LOG_REGISTER(bond_logtype, pmd.net.bond, NOTICE);