1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved.
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
16 #include <rte_string_fns.h>
18 #include "enic_compat.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
27 static uint16_t enic_vf_recv_pkts(void *rx_queue,
28 struct rte_mbuf **rx_pkts,
31 return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
34 static uint16_t enic_vf_xmit_pkts(void *tx_queue,
35 struct rte_mbuf **tx_pkts,
38 return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
44 unsigned int socket_id,
45 const struct rte_eth_txconf *tx_conf)
47 struct enic_vf_representor *vf;
53 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54 return -E_RTE_SECONDARY;
55 /* Only one queue now */
58 vf = eth_dev->data->dev_private;
60 wq = &pf->wq[vf->pf_wq_idx];
61 wq->offloads = tx_conf->offloads |
62 eth_dev->data->dev_conf.txmode.offloads;
63 eth_dev->data->tx_queues[0] = (void *)wq;
64 /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
65 err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
67 ENICPMD_LOG(ERR, "error in allocating wq\n");
73 static void enic_vf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
75 void *txq = dev->data->tx_queues[qid];
78 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
83 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
86 unsigned int socket_id,
87 const struct rte_eth_rxconf *rx_conf,
88 struct rte_mempool *mp)
90 struct enic_vf_representor *vf;
95 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
96 return -E_RTE_SECONDARY;
97 /* Only 1 queue now */
100 vf = eth_dev->data->dev_private;
102 eth_dev->data->rx_queues[queue_idx] =
103 (void *)&pf->rq[vf->pf_rq_sop_idx];
104 ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
105 rx_conf->rx_free_thresh);
107 ENICPMD_LOG(ERR, "error in allocating rq\n");
113 static void enic_vf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
115 void *rxq = dev->data->rx_queues[qid];
117 ENICPMD_FUNC_TRACE();
118 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
123 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
125 ENICPMD_FUNC_TRACE();
126 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
127 return -E_RTE_SECONDARY;
132 setup_rep_vf_fwd(struct enic_vf_representor *vf)
136 ENICPMD_FUNC_TRACE();
137 /* Representor -> VF rule
138 * Egress packets from this representor are on the representor's WQ.
139 * So, loop back that WQ to VF.
141 ret = enic_fm_add_rep2vf_flow(vf);
143 ENICPMD_LOG(ERR, "Cannot create representor->VF flow");
146 /* VF -> representor rule
147 * Packets from VF loop back to the representor, unless they match
150 ret = enic_fm_add_vf2rep_flow(vf);
152 ENICPMD_LOG(ERR, "Cannot create VF->representor flow");
158 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
160 struct enic_vf_representor *vf;
161 struct vnic_rq *data_rq;
166 ENICPMD_FUNC_TRACE();
167 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
168 return -E_RTE_SECONDARY;
170 vf = eth_dev->data->dev_private;
172 /* Get representor flowman for flow API and representor path */
173 ret = enic_fm_init(&vf->enic);
176 /* Set up implicit flow rules to forward between representor and VF */
177 ret = setup_rep_vf_fwd(vf);
179 ENICPMD_LOG(ERR, "Cannot set up representor-VF flows");
182 /* Remove all packet filters so no ingress packets go to VF.
183 * When PF enables switchdev, it will ensure packet filters
184 * are removed. So, this is not technically needed.
186 ENICPMD_LOG(DEBUG, "Clear packet filters");
187 ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
189 ENICPMD_LOG(ERR, "Cannot clear packet filters");
193 /* Start WQ: see enic_init_vnic_resources */
194 index = vf->pf_wq_idx;
195 cq_idx = vf->pf_wq_cq_idx;
196 vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
197 vnic_cq_init(&pf->cq[cq_idx],
198 0 /* flow_control_enable */,
199 1 /* color_enable */,
202 1 /* cq_tail_color */,
203 0 /* interrupt_enable */,
204 0 /* cq_entry_enable */,
205 1 /* cq_message_enable */,
206 0 /* interrupt offset */,
207 (uint64_t)pf->wq[index].cqmsg_rz->iova);
209 vnic_wq_enable(&pf->wq[index]);
210 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
212 /* Start RQ: see enic_init_vnic_resources */
213 index = vf->pf_rq_sop_idx;
214 cq_idx = enic_cq_rq(vf->pf, index);
215 vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
216 data_rq = &pf->rq[vf->pf_rq_data_idx];
218 vnic_rq_init(data_rq, cq_idx, 1, 0);
219 vnic_cq_init(&pf->cq[cq_idx],
220 0 /* flow_control_enable */,
221 1 /* color_enable */,
224 1 /* cq_tail_color */,
226 1 /* cq_entry_enable */,
227 0 /* cq_message_enable */,
229 0 /* cq_message_addr */);
231 ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
233 ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n");
236 ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
238 /* Release the allocated mbufs for the sop rq*/
239 enic_rxmbuf_queue_release(pf, &pf->rq[index]);
240 ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n");
243 enic_start_rq(pf, vf->pf_rq_sop_idx);
244 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
245 eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
249 static int enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
251 struct enic_vf_representor *vf;
255 ENICPMD_FUNC_TRACE();
256 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
258 /* Undo dev_start. Disable/clean WQ */
259 vf = eth_dev->data->dev_private;
261 vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
262 vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
263 vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
264 /* Disable/clean RQ */
265 rq = &pf->rq[vf->pf_rq_sop_idx];
267 vnic_rq_clean(rq, enic_free_rq_buf);
268 rq = &pf->rq[vf->pf_rq_data_idx];
271 vnic_rq_clean(rq, enic_free_rq_buf);
273 vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
274 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
275 eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
276 /* Clean up representor flowman */
277 enic_fm_destroy(&vf->enic);
283 * "close" is no-op for now and solely exists so that rte_eth_dev_close()
284 * can finish its own cleanup without errors.
286 static int enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
288 ENICPMD_FUNC_TRACE();
289 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
295 adjust_flow_attr(const struct rte_flow_attr *attrs,
296 struct rte_flow_attr *vf_attrs,
297 struct rte_flow_error *error)
300 return rte_flow_error_set(error, EINVAL,
301 RTE_FLOW_ERROR_TYPE_ATTR,
302 NULL, "no attribute specified");
305 * Swap ingress and egress as the firmware view of direction
306 * is the opposite of the representor.
309 if (attrs->ingress && !attrs->egress) {
310 vf_attrs->ingress = 0;
311 vf_attrs->egress = 1;
314 return rte_flow_error_set(error, ENOTSUP,
315 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
316 "representor only supports ingress");
320 enic_vf_flow_validate(struct rte_eth_dev *dev,
321 const struct rte_flow_attr *attrs,
322 const struct rte_flow_item pattern[],
323 const struct rte_flow_action actions[],
324 struct rte_flow_error *error)
326 struct rte_flow_attr vf_attrs;
329 ret = adjust_flow_attr(attrs, &vf_attrs, error);
333 return enic_fm_flow_ops.validate(dev, attrs, pattern, actions, error);
336 static struct rte_flow *
337 enic_vf_flow_create(struct rte_eth_dev *dev,
338 const struct rte_flow_attr *attrs,
339 const struct rte_flow_item pattern[],
340 const struct rte_flow_action actions[],
341 struct rte_flow_error *error)
343 struct rte_flow_attr vf_attrs;
345 if (adjust_flow_attr(attrs, &vf_attrs, error))
348 return enic_fm_flow_ops.create(dev, attrs, pattern, actions, error);
352 enic_vf_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
353 struct rte_flow_error *error)
355 return enic_fm_flow_ops.destroy(dev, flow, error);
359 enic_vf_flow_query(struct rte_eth_dev *dev,
360 struct rte_flow *flow,
361 const struct rte_flow_action *actions,
363 struct rte_flow_error *error)
365 return enic_fm_flow_ops.query(dev, flow, actions, data, error);
369 enic_vf_flow_flush(struct rte_eth_dev *dev,
370 struct rte_flow_error *error)
372 return enic_fm_flow_ops.flush(dev, error);
375 static const struct rte_flow_ops enic_vf_flow_ops = {
376 .validate = enic_vf_flow_validate,
377 .create = enic_vf_flow_create,
378 .destroy = enic_vf_flow_destroy,
379 .flush = enic_vf_flow_flush,
380 .query = enic_vf_flow_query,
384 enic_vf_flow_ops_get(struct rte_eth_dev *eth_dev,
385 const struct rte_flow_ops **ops)
387 struct enic_vf_representor *vf;
389 ENICPMD_FUNC_TRACE();
390 vf = eth_dev->data->dev_private;
391 if (vf->enic.flow_filter_mode != FILTER_FLOWMAN) {
393 "VF representors require flowman support for rte_flow API");
397 *ops = &enic_vf_flow_ops;
401 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
402 int wait_to_complete __rte_unused)
404 struct enic_vf_representor *vf;
405 struct rte_eth_link link;
408 ENICPMD_FUNC_TRACE();
409 vf = eth_dev->data->dev_private;
412 * Link status and speed are same as PF. Update PF status and then
415 enic_link_update(pf->rte_dev);
416 rte_eth_linkstatus_get(pf->rte_dev, &link);
417 rte_eth_linkstatus_set(eth_dev, &link);
421 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
422 struct rte_eth_stats *stats)
424 struct enic_vf_representor *vf;
425 struct vnic_stats *vs;
428 ENICPMD_FUNC_TRACE();
429 vf = eth_dev->data->dev_private;
430 /* Get VF stats via PF */
431 err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
433 ENICPMD_LOG(ERR, "error in getting stats\n");
436 stats->ipackets = vs->rx.rx_frames_ok;
437 stats->opackets = vs->tx.tx_frames_ok;
438 stats->ibytes = vs->rx.rx_bytes_ok;
439 stats->obytes = vs->tx.tx_bytes_ok;
440 stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
441 stats->oerrors = vs->tx.tx_errors;
442 stats->imissed = vs->rx.rx_no_bufs;
446 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
448 struct enic_vf_representor *vf;
451 ENICPMD_FUNC_TRACE();
452 vf = eth_dev->data->dev_private;
453 /* Ask PF to clear VF stats */
454 err = vnic_dev_stats_clear(vf->enic.vdev);
456 ENICPMD_LOG(ERR, "error in clearing stats\n");
460 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
461 struct rte_eth_dev_info *device_info)
463 struct enic_vf_representor *vf;
466 ENICPMD_FUNC_TRACE();
467 vf = eth_dev->data->dev_private;
469 device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
470 device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
471 device_info->min_rx_bufsize = ENIC_MIN_MTU;
472 /* Max packet size is same as PF */
473 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
474 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
475 /* No offload capa, RSS, etc. until Tx/Rx handlers are added */
476 device_info->rx_offload_capa = 0;
477 device_info->tx_offload_capa = 0;
478 device_info->switch_info.name = pf->rte_dev->device->name;
479 device_info->switch_info.domain_id = vf->switch_domain_id;
480 device_info->switch_info.port_id = vf->vf_id;
484 static void set_vf_packet_filter(struct enic_vf_representor *vf)
486 /* switchdev: packet filters are ignored */
487 if (vf->enic.switchdev_mode)
489 /* Ask PF to apply filters on VF */
490 vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
491 1 /* bcast */, vf->promisc, vf->allmulti);
494 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
496 struct enic_vf_representor *vf;
498 ENICPMD_FUNC_TRACE();
499 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
500 return -E_RTE_SECONDARY;
501 vf = eth_dev->data->dev_private;
503 set_vf_packet_filter(vf);
507 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
509 struct enic_vf_representor *vf;
511 ENICPMD_FUNC_TRACE();
512 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
513 return -E_RTE_SECONDARY;
514 vf = eth_dev->data->dev_private;
516 set_vf_packet_filter(vf);
520 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
522 struct enic_vf_representor *vf;
524 ENICPMD_FUNC_TRACE();
525 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
526 return -E_RTE_SECONDARY;
527 vf = eth_dev->data->dev_private;
529 set_vf_packet_filter(vf);
533 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
535 struct enic_vf_representor *vf;
537 ENICPMD_FUNC_TRACE();
538 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
539 return -E_RTE_SECONDARY;
540 vf = eth_dev->data->dev_private;
542 set_vf_packet_filter(vf);
547 * A minimal set of handlers.
548 * The representor can get/set a small set of VF settings via "proxy" devcmd.
549 * With proxy devcmd, the PF driver basically tells the VIC firmware to
550 * "perform this devcmd on that VF".
552 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
553 .allmulticast_enable = enic_vf_allmulticast_enable,
554 .allmulticast_disable = enic_vf_allmulticast_disable,
555 .dev_configure = enic_vf_dev_configure,
556 .dev_infos_get = enic_vf_dev_infos_get,
557 .dev_start = enic_vf_dev_start,
558 .dev_stop = enic_vf_dev_stop,
559 .dev_close = enic_vf_dev_close,
560 .flow_ops_get = enic_vf_flow_ops_get,
561 .link_update = enic_vf_link_update,
562 .promiscuous_enable = enic_vf_promiscuous_enable,
563 .promiscuous_disable = enic_vf_promiscuous_disable,
564 .stats_get = enic_vf_stats_get,
565 .stats_reset = enic_vf_stats_reset,
566 .rx_queue_setup = enic_vf_dev_rx_queue_setup,
567 .rx_queue_release = enic_vf_dev_rx_queue_release,
568 .tx_queue_setup = enic_vf_dev_tx_queue_setup,
569 .tx_queue_release = enic_vf_dev_tx_queue_release,
572 static int get_vf_config(struct enic_vf_representor *vf)
574 struct vnic_enet_config *c;
582 err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
584 ENICPMD_LOG(ERR, "error in getting MAC address\n");
587 rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
589 /* VF MTU per its vNIC setting */
590 err = vnic_dev_spec(vf->enic.vdev,
591 offsetof(struct vnic_enet_config, mtu),
592 sizeof(c->mtu), &c->mtu);
594 ENICPMD_LOG(ERR, "error in getting MTU\n");
598 * Blade switch (fabric interconnect) port's MTU. Assume the kernel
599 * enic driver runs on VF. That driver automatically adjusts its MTU
600 * according to the switch MTU.
602 switch_mtu = vnic_dev_mtu(pf->vdev);
603 vf->eth_dev->data->mtu = c->mtu;
604 if (switch_mtu > c->mtu)
605 vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
609 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
611 struct enic_vf_representor *vf, *params;
612 struct rte_pci_device *pdev;
613 struct enic *pf, *vf_enic;
614 struct rte_pci_addr *addr;
617 ENICPMD_FUNC_TRACE();
618 params = init_params;
619 vf = eth_dev->data->dev_private;
620 vf->switch_domain_id = params->switch_domain_id;
621 vf->vf_id = params->vf_id;
622 vf->eth_dev = eth_dev;
627 vf->enic.switchdev_mode = pf->switchdev_mode;
628 /* Only switchdev is supported now */
629 RTE_ASSERT(vf->enic.switchdev_mode);
630 /* Allocate WQ, RQ, CQ for the representor */
631 vf->pf_wq_idx = vf_wq_idx(vf);
632 vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
633 vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
634 vf->pf_rq_data_idx = vf_rq_data_idx(vf);
635 /* Remove these assertions once queue allocation has an easy-to-use
636 * allocator API instead of index number calculations used throughout
639 RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
640 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
642 /* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
643 pf->vf_required_wq++;
644 pf->vf_required_rq += 2; /* sop and data */
645 pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
646 ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
647 vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
648 vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
649 if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
650 ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
651 " >= number of RQs (%u) in CIMC or UCSM",
652 pf->conf_cq_count, pf->conf_rq_count);
656 /* Check for non-existent VFs */
657 pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
658 if (vf->vf_id >= pdev->max_vfs) {
659 ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
660 vf->vf_id, pdev->max_vfs);
664 eth_dev->device->driver = pf->rte_dev->device->driver;
665 eth_dev->dev_ops = &enic_vf_representor_dev_ops;
666 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
667 eth_dev->data->representor_id = vf->vf_id;
668 eth_dev->data->backer_port_id = pf->port_id;
669 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
670 sizeof(struct rte_ether_addr) *
671 ENIC_UNICAST_PERFECT_FILTERS, 0);
672 if (eth_dev->data->mac_addrs == NULL)
674 /* Use 1 RX queue and 1 TX queue for representor path */
675 eth_dev->data->nb_rx_queues = 1;
676 eth_dev->data->nb_tx_queues = 1;
677 eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
678 eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
679 /* Initial link state copied from PF */
680 eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
681 /* Representor vdev to perform devcmd */
682 vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
683 if (vf->enic.vdev == NULL)
685 ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
688 /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
689 ret = get_vf_config(vf);
694 * Calculate VF BDF. The firmware ensures that PF BDF is always
695 * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
697 vf->bdf = pdev->addr;
698 vf->bdf.function += vf->vf_id + 1;
700 /* Copy a few fields used by enic_fm_flow */
702 vf_enic->switch_domain_id = vf->switch_domain_id;
703 vf_enic->flow_filter_mode = pf->flow_filter_mode;
704 vf_enic->rte_dev = eth_dev;
705 vf_enic->dev_data = eth_dev->data;
706 LIST_INIT(&vf_enic->flows);
707 LIST_INIT(&vf_enic->memzone_list);
708 rte_spinlock_init(&vf_enic->memzone_list_lock);
710 snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
711 addr->domain, addr->bus, addr->devid, addr->function);
715 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
717 struct enic_vf_representor *vf;
719 ENICPMD_FUNC_TRACE();
720 vf = eth_dev->data->dev_private;
721 vnic_dev_unregister(vf->enic.vdev);