1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
9 #include <rte_flow_driver.h>
10 #include <rte_cycles.h>
12 #include "failsafe_private.h"
14 /** Print a message out of a flow error. */
16 fs_flow_complain(struct rte_flow_error *error)
18 static const char *const errstrlist[] = {
19 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
20 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
21 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
22 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
23 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
24 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
25 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
26 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
27 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
28 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
29 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
30 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
36 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
37 !errstrlist[error->type])
38 errstr = "unknown type";
40 errstr = errstrlist[error->type];
41 ERROR("Caught error type %d (%s): %s%s\n",
43 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
44 error->cause), buf) : "",
45 error->message ? error->message : "(no stated reason)");
50 eth_dev_flow_isolate_set(struct rte_eth_dev *dev,
51 struct sub_device *sdev)
53 struct rte_flow_error ferror;
56 if (!PRIV(dev)->flow_isolated) {
57 DEBUG("Flow isolation already disabled");
59 DEBUG("Enabling flow isolation");
60 ret = rte_flow_isolate(PORT_ID(sdev),
61 PRIV(dev)->flow_isolated,
64 fs_flow_complain(&ferror);
72 fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
73 struct sub_device *sdev)
75 struct rte_eth_dev *edev;
76 struct rte_vlan_filter_conf *vfc1;
77 struct rte_vlan_filter_conf *vfc2;
78 struct rte_flow *flow;
79 struct rte_flow_error ferror;
85 for (i = 0; i < dev->data->nb_rx_queues; i++) {
88 rxq = dev->data->rx_queues[i];
89 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), i,
90 rxq->info.nb_desc, rxq->socket_id,
91 &rxq->info.conf, rxq->info.mp);
93 ERROR("rx_queue_setup failed");
98 for (i = 0; i < dev->data->nb_tx_queues; i++) {
101 txq = dev->data->tx_queues[i];
102 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), i,
103 txq->info.nb_desc, txq->socket_id,
106 ERROR("tx_queue_setup failed");
110 /* dev_link.link_status */
111 if (dev->data->dev_link.link_status !=
112 edev->data->dev_link.link_status) {
113 DEBUG("Configuring link_status");
114 if (dev->data->dev_link.link_status)
115 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
117 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
119 ERROR("Failed to apply link_status");
123 DEBUG("link_status already set");
126 if (dev->data->promiscuous != edev->data->promiscuous) {
127 DEBUG("Configuring promiscuous");
128 if (dev->data->promiscuous)
129 ret = rte_eth_promiscuous_enable(PORT_ID(sdev));
131 ret = rte_eth_promiscuous_disable(PORT_ID(sdev));
133 ERROR("Failed to apply promiscuous mode");
137 DEBUG("promiscuous already set");
140 if (dev->data->all_multicast != edev->data->all_multicast) {
141 DEBUG("Configuring all_multicast");
142 if (dev->data->all_multicast)
143 ret = rte_eth_allmulticast_enable(PORT_ID(sdev));
145 ret = rte_eth_allmulticast_disable(PORT_ID(sdev));
147 ERROR("Failed to apply allmulticast mode");
151 DEBUG("all_multicast already set");
154 if (dev->data->mtu != edev->data->mtu) {
155 DEBUG("Configuring MTU");
156 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), dev->data->mtu);
158 ERROR("Failed to apply MTU");
162 DEBUG("MTU already set");
165 DEBUG("Configuring default MAC address");
166 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
167 &dev->data->mac_addrs[0]);
169 ERROR("Setting default MAC address failed");
173 if (PRIV(dev)->nb_mac_addr > 1)
174 DEBUG("Configure additional MAC address%s",
175 (PRIV(dev)->nb_mac_addr > 2 ? "es" : ""));
176 for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) {
177 struct rte_ether_addr *ea;
179 ea = &dev->data->mac_addrs[i];
180 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea,
181 PRIV(dev)->mac_addr_pool[i]);
183 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
185 rte_ether_format_addr(ea_fmt,
186 RTE_ETHER_ADDR_FMT_SIZE, ea);
187 ERROR("Adding MAC address %s failed", ea_fmt);
192 * Propagate multicast MAC addresses to sub-devices,
193 * if non zero number of addresses is set.
194 * The condition is required to avoid breakage of failsafe
195 * for sub-devices which do not support the operation
196 * if the feature is really not used.
198 if (PRIV(dev)->nb_mcast_addr > 0) {
199 DEBUG("Configuring multicast MAC addresses");
200 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
201 PRIV(dev)->mcast_addrs,
202 PRIV(dev)->nb_mcast_addr);
204 ERROR("Failed to apply multicast MAC addresses");
209 vfc1 = &dev->data->vlan_filter_conf;
210 vfc2 = &edev->data->vlan_filter_conf;
211 if (memcmp(vfc1, vfc2, sizeof(struct rte_vlan_filter_conf))) {
217 DEBUG("Configuring VLAN filter");
218 for (i = 0; i < RTE_DIM(vfc1->ids); i++) {
219 if (vfc1->ids[i] == 0)
224 /* count trailing zeroes */
225 vbit = ~ids & (ids - 1);
226 /* clear least significant bit set */
227 ids ^= (ids ^ (ids - 1)) ^ vbit;
228 for (; vbit; vlan_id++)
230 ret = rte_eth_dev_vlan_filter(
231 PORT_ID(sdev), vlan_id, 1);
233 ERROR("Failed to apply VLAN filter %hu",
240 DEBUG("VLAN filter already set");
243 if (TAILQ_EMPTY(&PRIV(dev)->flow_list)) {
244 DEBUG("rte_flow already set");
246 DEBUG("Resetting rte_flow configuration");
247 ret = rte_flow_flush(PORT_ID(sdev), &ferror);
249 fs_flow_complain(&ferror);
254 DEBUG("Configuring rte_flow");
255 TAILQ_FOREACH(flow, &PRIV(dev)->flow_list, next) {
256 DEBUG("Creating flow #%" PRIu32, i++);
257 flow->flows[SUB_ID(sdev)] =
258 rte_flow_create(PORT_ID(sdev),
268 fs_flow_complain(&ferror);
276 fs_dev_remove(struct sub_device *sdev)
282 switch (sdev->state) {
284 failsafe_rx_intr_uninstall_subdevice(sdev);
285 rte_eth_dev_stop(PORT_ID(sdev));
286 sdev->state = DEV_ACTIVE;
289 failsafe_eth_dev_unregister_callbacks(sdev);
290 rte_eth_dev_close(PORT_ID(sdev));
291 sdev->state = DEV_PROBED;
294 ret = rte_dev_remove(sdev->dev);
296 ERROR("Bus detach failed for sub_device %u",
299 rte_eth_dev_release_port(ETH(sdev));
301 sdev->state = DEV_PARSED;
305 sdev->state = DEV_UNDEFINED;
306 sdev->sdev_port_id = RTE_MAX_ETHPORTS;
311 failsafe_hotplug_alarm_install(fs_dev(sdev));
315 fs_dev_stats_save(struct sub_device *sdev)
317 struct rte_eth_stats stats;
320 /* Attempt to read current stats. */
321 err = rte_eth_stats_get(PORT_ID(sdev), &stats);
323 uint64_t timestamp = sdev->stats_snapshot.timestamp;
325 WARN("Could not access latest statistics from sub-device %d.\n",
328 WARN("Using latest snapshot taken before %"PRIu64" seconds.\n",
329 (rte_rdtsc() - timestamp) / rte_get_tsc_hz());
331 failsafe_stats_increment
332 (&PRIV(fs_dev(sdev))->stats_accumulator,
333 err ? &sdev->stats_snapshot.stats : &stats);
334 memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot));
338 fs_rxtx_clean(struct sub_device *sdev)
342 for (i = 0; i < ETH(sdev)->data->nb_rx_queues; i++)
343 if (FS_ATOMIC_RX(sdev, i))
345 for (i = 0; i < ETH(sdev)->data->nb_tx_queues; i++)
346 if (FS_ATOMIC_TX(sdev, i))
352 failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev)
358 if (sdev->rmv_callback) {
359 ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
360 RTE_ETH_EVENT_INTR_RMV,
361 failsafe_eth_rmv_event_callback,
364 WARN("Failed to unregister RMV callback for sub_device"
365 " %d", SUB_ID(sdev));
366 sdev->rmv_callback = 0;
368 if (sdev->lsc_callback) {
369 ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
370 RTE_ETH_EVENT_INTR_LSC,
371 failsafe_eth_lsc_event_callback,
374 WARN("Failed to unregister LSC callback for sub_device"
375 " %d", SUB_ID(sdev));
376 sdev->lsc_callback = 0;
381 failsafe_dev_remove(struct rte_eth_dev *dev)
383 struct sub_device *sdev;
386 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
387 if (sdev->remove && fs_rxtx_clean(sdev)) {
388 if (fs_lock(dev, 1) != 0)
390 fs_dev_stats_save(sdev);
397 failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev)
403 for (i = 0; i < dev->data->nb_rx_queues; i++) {
404 rxq = dev->data->rx_queues[i];
406 if (rxq->info.conf.rx_deferred_start &&
407 dev->data->rx_queue_state[i] ==
408 RTE_ETH_QUEUE_STATE_STARTED) {
410 * The subdevice Rx queue does not launch on device
411 * start if deferred start flag is set. It needs to be
412 * started manually in case an appropriate failsafe Rx
413 * queue has been started earlier.
415 ret = dev->dev_ops->rx_queue_start(dev, i);
417 ERROR("Could not synchronize Rx queue %d", i);
420 } else if (dev->data->rx_queue_state[i] ==
421 RTE_ETH_QUEUE_STATE_STOPPED) {
423 * The subdevice Rx queue needs to be stopped manually
424 * in case an appropriate failsafe Rx queue has been
427 ret = dev->dev_ops->rx_queue_stop(dev, i);
429 ERROR("Could not synchronize Rx queue %d", i);
438 failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev)
444 for (i = 0; i < dev->data->nb_tx_queues; i++) {
445 txq = dev->data->tx_queues[i];
447 if (txq->info.conf.tx_deferred_start &&
448 dev->data->tx_queue_state[i] ==
449 RTE_ETH_QUEUE_STATE_STARTED) {
451 * The subdevice Tx queue does not launch on device
452 * start if deferred start flag is set. It needs to be
453 * started manually in case an appropriate failsafe Tx
454 * queue has been started earlier.
456 ret = dev->dev_ops->tx_queue_start(dev, i);
458 ERROR("Could not synchronize Tx queue %d", i);
461 } else if (dev->data->tx_queue_state[i] ==
462 RTE_ETH_QUEUE_STATE_STOPPED) {
464 * The subdevice Tx queue needs to be stopped manually
465 * in case an appropriate failsafe Tx queue has been
468 ret = dev->dev_ops->tx_queue_stop(dev, i);
470 ERROR("Could not synchronize Tx queue %d", i);
479 failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
481 struct sub_device *sdev;
486 if (PRIV(dev)->state < DEV_PARSED)
489 ret = failsafe_args_parse_subs(dev);
493 if (PRIV(dev)->state < DEV_PROBED)
495 ret = failsafe_eal_init(dev);
498 if (PRIV(dev)->state < DEV_ACTIVE)
501 FOREACH_SUBDEV(sdev, i, dev) {
502 if (sdev->state == DEV_PROBED) {
503 inactive |= UINT32_C(1) << i;
504 ret = eth_dev_flow_isolate_set(dev, sdev);
506 ERROR("Could not apply configuration to sub_device %d",
512 ret = dev->dev_ops->dev_configure(dev);
515 FOREACH_SUBDEV(sdev, i, dev) {
516 if (inactive & (UINT32_C(1) << i)) {
517 ret = fs_eth_dev_conf_apply(dev, sdev);
519 ERROR("Could not apply configuration to sub_device %d",
526 * If new devices have been configured, check if
527 * the link state has changed.
530 dev->dev_ops->link_update(dev, 1);
531 if (PRIV(dev)->state < DEV_STARTED)
533 ret = dev->dev_ops->dev_start(dev);
536 ret = failsafe_eth_dev_rx_queues_sync(dev);
539 ret = failsafe_eth_dev_tx_queues_sync(dev);
544 FOREACH_SUBDEV(sdev, i, dev)
545 if (sdev->state != PRIV(dev)->state)
551 failsafe_stats_increment(struct rte_eth_stats *to, struct rte_eth_stats *from)
555 RTE_ASSERT(to != NULL && from != NULL);
556 to->ipackets += from->ipackets;
557 to->opackets += from->opackets;
558 to->ibytes += from->ibytes;
559 to->obytes += from->obytes;
560 to->imissed += from->imissed;
561 to->ierrors += from->ierrors;
562 to->oerrors += from->oerrors;
563 to->rx_nombuf += from->rx_nombuf;
564 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
565 to->q_ipackets[i] += from->q_ipackets[i];
566 to->q_opackets[i] += from->q_opackets[i];
567 to->q_ibytes[i] += from->q_ibytes[i];
568 to->q_obytes[i] += from->q_obytes[i];
569 to->q_errors[i] += from->q_errors[i];
574 failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
575 enum rte_eth_event_type event __rte_unused,
576 void *cb_arg, void *out __rte_unused)
578 struct sub_device *sdev = cb_arg;
580 fs_lock(fs_dev(sdev), 0);
581 /* Switch as soon as possible tx_dev. */
582 fs_switch_dev(fs_dev(sdev), sdev);
583 /* Use safe bursts in any case. */
584 failsafe_set_burst_fn(fs_dev(sdev), 1);
586 * Async removal, the sub-PMD will try to unregister
587 * the callback at the source of the current thread context.
590 fs_unlock(fs_dev(sdev), 0);
595 failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
596 enum rte_eth_event_type event __rte_unused,
597 void *cb_arg, void *out __rte_unused)
599 struct rte_eth_dev *dev = cb_arg;
602 ret = dev->dev_ops->link_update(dev, 0);
603 /* We must pass on the LSC event */
605 return _rte_eth_dev_callback_process(dev,
606 RTE_ETH_EVENT_INTR_LSC,
612 /* Take sub-device ownership before it becomes exposed to the application. */
614 failsafe_eth_new_event_callback(uint16_t port_id,
615 enum rte_eth_event_type event __rte_unused,
616 void *cb_arg, void *out __rte_unused)
618 struct rte_eth_dev *fs_dev = cb_arg;
619 struct sub_device *sdev;
620 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
623 FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) {
624 if (sdev->state >= DEV_PROBED)
626 if (dev->device == NULL) {
627 WARN("Trying to probe malformed device %s.\n",
631 if (strcmp(sdev->devargs.name, dev->device->name) != 0)
633 rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner);
634 /* The actual owner will be checked after the port probing. */