net/ring: set ethernet device field
[dpdk.git] / drivers / net / ring / rte_eth_ring.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "rte_eth_ring.h"
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_memzone.h>
40 #include <rte_string_fns.h>
41 #include <rte_vdev.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
44
45 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
46 #define ETH_RING_ACTION_CREATE          "CREATE"
47 #define ETH_RING_ACTION_ATTACH          "ATTACH"
48
49 static const char *valid_arguments[] = {
50         ETH_RING_NUMA_NODE_ACTION_ARG,
51         NULL
52 };
53
54 enum dev_action {
55         DEV_CREATE,
56         DEV_ATTACH
57 };
58
59 struct ring_queue {
60         struct rte_ring *rng;
61         rte_atomic64_t rx_pkts;
62         rte_atomic64_t tx_pkts;
63         rte_atomic64_t err_pkts;
64 };
65
66 struct pmd_internals {
67         unsigned max_rx_queues;
68         unsigned max_tx_queues;
69
70         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
71         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
72
73         struct ether_addr address;
74         enum dev_action action;
75 };
76
77
78 static struct rte_eth_link pmd_link = {
79                 .link_speed = ETH_SPEED_NUM_10G,
80                 .link_duplex = ETH_LINK_FULL_DUPLEX,
81                 .link_status = ETH_LINK_DOWN,
82                 .link_autoneg = ETH_LINK_SPEED_AUTONEG
83 };
84
85 static uint16_t
86 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
87 {
88         void **ptrs = (void *)&bufs[0];
89         struct ring_queue *r = q;
90         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
91                         ptrs, nb_bufs, NULL);
92         if (r->rng->flags & RING_F_SC_DEQ)
93                 r->rx_pkts.cnt += nb_rx;
94         else
95                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
96         return nb_rx;
97 }
98
99 static uint16_t
100 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 {
102         void **ptrs = (void *)&bufs[0];
103         struct ring_queue *r = q;
104         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
105                         ptrs, nb_bufs, NULL);
106         if (r->rng->flags & RING_F_SP_ENQ) {
107                 r->tx_pkts.cnt += nb_tx;
108                 r->err_pkts.cnt += nb_bufs - nb_tx;
109         } else {
110                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
111                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
112         }
113         return nb_tx;
114 }
115
116 static int
117 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
118
119 static int
120 eth_dev_start(struct rte_eth_dev *dev)
121 {
122         dev->data->dev_link.link_status = ETH_LINK_UP;
123         return 0;
124 }
125
126 static void
127 eth_dev_stop(struct rte_eth_dev *dev)
128 {
129         dev->data->dev_link.link_status = ETH_LINK_DOWN;
130 }
131
132 static int
133 eth_dev_set_link_down(struct rte_eth_dev *dev)
134 {
135         dev->data->dev_link.link_status = ETH_LINK_DOWN;
136         return 0;
137 }
138
139 static int
140 eth_dev_set_link_up(struct rte_eth_dev *dev)
141 {
142         dev->data->dev_link.link_status = ETH_LINK_UP;
143         return 0;
144 }
145
146 static int
147 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
148                                     uint16_t nb_rx_desc __rte_unused,
149                                     unsigned int socket_id __rte_unused,
150                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
151                                     struct rte_mempool *mb_pool __rte_unused)
152 {
153         struct pmd_internals *internals = dev->data->dev_private;
154         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
155         return 0;
156 }
157
158 static int
159 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
160                                     uint16_t nb_tx_desc __rte_unused,
161                                     unsigned int socket_id __rte_unused,
162                                     const struct rte_eth_txconf *tx_conf __rte_unused)
163 {
164         struct pmd_internals *internals = dev->data->dev_private;
165         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
166         return 0;
167 }
168
169
170 static void
171 eth_dev_info(struct rte_eth_dev *dev,
172                 struct rte_eth_dev_info *dev_info)
173 {
174         struct pmd_internals *internals = dev->data->dev_private;
175         dev_info->max_mac_addrs = 1;
176         dev_info->max_rx_pktlen = (uint32_t)-1;
177         dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
178         dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
179         dev_info->min_rx_bufsize = 0;
180 }
181
182 static void
183 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
184 {
185         unsigned i;
186         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
187         const struct pmd_internals *internal = dev->data->dev_private;
188
189         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
190                         i < dev->data->nb_rx_queues; i++) {
191                 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
192                 rx_total += stats->q_ipackets[i];
193         }
194
195         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
196                         i < dev->data->nb_tx_queues; i++) {
197                 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
198                 stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
199                 tx_total += stats->q_opackets[i];
200                 tx_err_total += stats->q_errors[i];
201         }
202
203         stats->ipackets = rx_total;
204         stats->opackets = tx_total;
205         stats->oerrors = tx_err_total;
206 }
207
208 static void
209 eth_stats_reset(struct rte_eth_dev *dev)
210 {
211         unsigned i;
212         struct pmd_internals *internal = dev->data->dev_private;
213         for (i = 0; i < dev->data->nb_rx_queues; i++)
214                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
215         for (i = 0; i < dev->data->nb_tx_queues; i++) {
216                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
217                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
218         }
219 }
220
221 static void
222 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
223         uint32_t index __rte_unused)
224 {
225 }
226
227 static int
228 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
229         struct ether_addr *mac_addr __rte_unused,
230         uint32_t index __rte_unused,
231         uint32_t vmdq __rte_unused)
232 {
233         return 0;
234 }
235
236 static void
237 eth_queue_release(void *q __rte_unused) { ; }
238 static int
239 eth_link_update(struct rte_eth_dev *dev __rte_unused,
240                 int wait_to_complete __rte_unused) { return 0; }
241
242 static const struct eth_dev_ops ops = {
243         .dev_start = eth_dev_start,
244         .dev_stop = eth_dev_stop,
245         .dev_set_link_up = eth_dev_set_link_up,
246         .dev_set_link_down = eth_dev_set_link_down,
247         .dev_configure = eth_dev_configure,
248         .dev_infos_get = eth_dev_info,
249         .rx_queue_setup = eth_rx_queue_setup,
250         .tx_queue_setup = eth_tx_queue_setup,
251         .rx_queue_release = eth_queue_release,
252         .tx_queue_release = eth_queue_release,
253         .link_update = eth_link_update,
254         .stats_get = eth_stats_get,
255         .stats_reset = eth_stats_reset,
256         .mac_addr_remove = eth_mac_addr_remove,
257         .mac_addr_add = eth_mac_addr_add,
258 };
259
260 static struct rte_vdev_driver pmd_ring_drv;
261
262 static int
263 do_eth_dev_ring_create(const char *name,
264                 struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
265                 struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
266                 const unsigned int numa_node, enum dev_action action,
267                 struct rte_eth_dev **eth_dev_p)
268 {
269         struct rte_eth_dev_data *data = NULL;
270         struct pmd_internals *internals = NULL;
271         struct rte_eth_dev *eth_dev = NULL;
272         unsigned i;
273
274         RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
275                         numa_node);
276
277         /* now do all data allocation - for eth_dev structure, dummy pci driver
278          * and internal (private) data
279          */
280         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
281         if (data == NULL) {
282                 rte_errno = ENOMEM;
283                 goto error;
284         }
285
286         data->rx_queues = rte_zmalloc_socket(name,
287                         sizeof(void *) * nb_rx_queues, 0, numa_node);
288         if (data->rx_queues == NULL) {
289                 rte_errno = ENOMEM;
290                 goto error;
291         }
292
293         data->tx_queues = rte_zmalloc_socket(name,
294                         sizeof(void *) * nb_tx_queues, 0, numa_node);
295         if (data->tx_queues == NULL) {
296                 rte_errno = ENOMEM;
297                 goto error;
298         }
299
300         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
301         if (internals == NULL) {
302                 rte_errno = ENOMEM;
303                 goto error;
304         }
305
306         /* reserve an ethdev entry */
307         eth_dev = rte_eth_dev_allocate(name);
308         if (eth_dev == NULL) {
309                 rte_errno = ENOSPC;
310                 goto error;
311         }
312
313         /* now put it all together
314          * - store queue data in internals,
315          * - store numa_node info in eth_dev_data
316          * - point eth_dev_data to internals
317          * - and point eth_dev structure to new eth_dev_data structure
318          */
319         /* NOTE: we'll replace the data element, of originally allocated eth_dev
320          * so the rings are local per-process */
321
322         internals->action = action;
323         internals->max_rx_queues = nb_rx_queues;
324         internals->max_tx_queues = nb_tx_queues;
325         for (i = 0; i < nb_rx_queues; i++) {
326                 internals->rx_ring_queues[i].rng = rx_queues[i];
327                 data->rx_queues[i] = &internals->rx_ring_queues[i];
328         }
329         for (i = 0; i < nb_tx_queues; i++) {
330                 internals->tx_ring_queues[i].rng = tx_queues[i];
331                 data->tx_queues[i] = &internals->tx_ring_queues[i];
332         }
333
334         data->dev_private = internals;
335         data->port_id = eth_dev->data->port_id;
336         memmove(data->name, eth_dev->data->name, sizeof(data->name));
337         data->nb_rx_queues = (uint16_t)nb_rx_queues;
338         data->nb_tx_queues = (uint16_t)nb_tx_queues;
339         data->dev_link = pmd_link;
340         data->mac_addrs = &internals->address;
341
342         eth_dev->data = data;
343         eth_dev->dev_ops = &ops;
344         data->dev_flags = RTE_ETH_DEV_DETACHABLE;
345         data->kdrv = RTE_KDRV_NONE;
346         data->drv_name = pmd_ring_drv.driver.name;
347         data->numa_node = numa_node;
348
349         /* finally assign rx and tx ops */
350         eth_dev->rx_pkt_burst = eth_ring_rx;
351         eth_dev->tx_pkt_burst = eth_ring_tx;
352
353         *eth_dev_p = eth_dev;
354
355         return data->port_id;
356
357 error:
358         if (data) {
359                 rte_free(data->rx_queues);
360                 rte_free(data->tx_queues);
361         }
362         rte_free(data);
363         rte_free(internals);
364
365         return -1;
366 }
367
368 int
369 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
370                 const unsigned nb_rx_queues,
371                 struct rte_ring *const tx_queues[],
372                 const unsigned nb_tx_queues,
373                 const unsigned numa_node)
374 {
375         struct rte_eth_dev *eth_dev = NULL;
376
377         /* do some parameter checking */
378         if (rx_queues == NULL && nb_rx_queues > 0) {
379                 rte_errno = EINVAL;
380                 return -1;
381         }
382         if (tx_queues == NULL && nb_tx_queues > 0) {
383                 rte_errno = EINVAL;
384                 return -1;
385         }
386         if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
387                 rte_errno = EINVAL;
388                 return -1;
389         }
390
391         return do_eth_dev_ring_create(name, rx_queues, nb_rx_queues,
392                         tx_queues, nb_tx_queues, numa_node, DEV_ATTACH,
393                         &eth_dev);
394 }
395
396 int
397 rte_eth_from_ring(struct rte_ring *r)
398 {
399         return rte_eth_from_rings(r->name, &r, 1, &r, 1,
400                         r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
401 }
402
403 static int
404 eth_dev_ring_create(const char *name, const unsigned numa_node,
405                 enum dev_action action, struct rte_eth_dev **eth_dev)
406 {
407         /* rx and tx are so-called from point of view of first port.
408          * They are inverted from the point of view of second port
409          */
410         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
411         unsigned i;
412         char rng_name[RTE_RING_NAMESIZE];
413         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
414                         RTE_PMD_RING_MAX_TX_RINGS);
415
416         for (i = 0; i < num_rings; i++) {
417                 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
418                 rxtx[i] = (action == DEV_CREATE) ?
419                                 rte_ring_create(rng_name, 1024, numa_node,
420                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
421                                 rte_ring_lookup(rng_name);
422                 if (rxtx[i] == NULL)
423                         return -1;
424         }
425
426         if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
427                 numa_node, action, eth_dev) < 0)
428                 return -1;
429
430         return 0;
431 }
432
433 struct node_action_pair {
434         char name[PATH_MAX];
435         unsigned node;
436         enum dev_action action;
437 };
438
439 struct node_action_list {
440         unsigned total;
441         unsigned count;
442         struct node_action_pair *list;
443 };
444
445 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
446 {
447         struct node_action_list *info = data;
448         int ret;
449         char *name;
450         char *action;
451         char *node;
452         char *end;
453
454         name = strdup(value);
455
456         ret = -EINVAL;
457
458         if (!name) {
459                 RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
460                 goto out;
461         }
462
463         node = strchr(name, ':');
464         if (!node) {
465                 RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
466                 goto out;
467         }
468
469         *node = '\0';
470         node++;
471
472         action = strchr(node, ':');
473         if (!action) {
474                 RTE_LOG(WARNING, PMD, "could not action value from %s", node);
475                 goto out;
476         }
477
478         *action = '\0';
479         action++;
480
481         /*
482          * Need to do some sanity checking here
483          */
484
485         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
486                 info->list[info->count].action = DEV_ATTACH;
487         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
488                 info->list[info->count].action = DEV_CREATE;
489         else
490                 goto out;
491
492         errno = 0;
493         info->list[info->count].node = strtol(node, &end, 10);
494
495         if ((errno != 0) || (*end != '\0')) {
496                 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
497                 goto out;
498         }
499
500         snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
501
502         info->count++;
503
504         ret = 0;
505 out:
506         free(name);
507         return ret;
508 }
509
510 static int
511 rte_pmd_ring_probe(struct rte_vdev_device *dev)
512 {
513         const char *name, *params;
514         struct rte_kvargs *kvlist = NULL;
515         int ret = 0;
516         struct node_action_list *info = NULL;
517         struct rte_eth_dev *eth_dev = NULL;
518
519         name = rte_vdev_device_name(dev);
520         params = rte_vdev_device_args(dev);
521
522         RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
523
524         if (params == NULL || params[0] == '\0') {
525                 ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
526                                 &eth_dev);
527                 if (ret == -1) {
528                         RTE_LOG(INFO, PMD,
529                                 "Attach to pmd_ring for %s\n", name);
530                         ret = eth_dev_ring_create(name, rte_socket_id(),
531                                                   DEV_ATTACH, &eth_dev);
532                 }
533         }
534         else {
535                 kvlist = rte_kvargs_parse(params, valid_arguments);
536
537                 if (!kvlist) {
538                         RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
539                                         " rings-backed ethernet device\n");
540                         ret = eth_dev_ring_create(name, rte_socket_id(),
541                                                   DEV_CREATE, &eth_dev);
542                         if (ret == -1) {
543                                 RTE_LOG(INFO, PMD,
544                                         "Attach to pmd_ring for %s\n",
545                                         name);
546                                 ret = eth_dev_ring_create(name, rte_socket_id(),
547                                                           DEV_ATTACH, &eth_dev);
548                         }
549                         return ret;
550                 } else {
551                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
552                         info = rte_zmalloc("struct node_action_list",
553                                            sizeof(struct node_action_list) +
554                                            (sizeof(struct node_action_pair) * ret),
555                                            0);
556                         if (!info)
557                                 goto out_free;
558
559                         info->total = ret;
560                         info->list = (struct node_action_pair*)(info + 1);
561
562                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
563                                                  parse_kvlist, info);
564
565                         if (ret < 0)
566                                 goto out_free;
567
568                         for (info->count = 0; info->count < info->total; info->count++) {
569                                 ret = eth_dev_ring_create(info->list[info->count].name,
570                                                           info->list[info->count].node,
571                                                           info->list[info->count].action,
572                                                           &eth_dev);
573                                 if ((ret == -1) &&
574                                     (info->list[info->count].action == DEV_CREATE)) {
575                                         RTE_LOG(INFO, PMD,
576                                                 "Attach to pmd_ring for %s\n",
577                                                 name);
578                                         ret = eth_dev_ring_create(name,
579                                                         info->list[info->count].node,
580                                                         DEV_ATTACH,
581                                                         &eth_dev);
582                                 }
583                         }
584                 }
585         }
586
587         if (eth_dev)
588                 eth_dev->device = &dev->device;
589
590 out_free:
591         rte_kvargs_free(kvlist);
592         rte_free(info);
593         return ret;
594 }
595
596 static int
597 rte_pmd_ring_remove(struct rte_vdev_device *dev)
598 {
599         const char *name = rte_vdev_device_name(dev);
600         struct rte_eth_dev *eth_dev = NULL;
601         struct pmd_internals *internals = NULL;
602         struct ring_queue *r = NULL;
603         uint16_t i;
604
605         RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
606
607         if (name == NULL)
608                 return -EINVAL;
609
610         /* find an ethdev entry */
611         eth_dev = rte_eth_dev_allocated(name);
612         if (eth_dev == NULL)
613                 return -ENODEV;
614
615         eth_dev_stop(eth_dev);
616
617         internals = eth_dev->data->dev_private;
618         if (internals->action == DEV_CREATE) {
619                 /*
620                  * it is only necessary to delete the rings in rx_queues because
621                  * they are the same used in tx_queues
622                  */
623                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
624                         r = eth_dev->data->rx_queues[i];
625                         rte_ring_free(r->rng);
626                 }
627         }
628
629         rte_free(eth_dev->data->rx_queues);
630         rte_free(eth_dev->data->tx_queues);
631         rte_free(eth_dev->data->dev_private);
632
633         rte_free(eth_dev->data);
634
635         rte_eth_dev_release_port(eth_dev);
636         return 0;
637 }
638
639 static struct rte_vdev_driver pmd_ring_drv = {
640         .probe = rte_pmd_ring_probe,
641         .remove = rte_pmd_ring_remove,
642 };
643
644 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
645 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
646 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
647         ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");