4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_launch.h>
49 #include <rte_cycles.h>
50 #include <rte_tailq.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_malloc.h>
58 #include <rte_random.h>
59 #include <rte_common.h>
60 #include <rte_errno.h>
62 #include <cmdline_parse.h>
70 * #. Basic tests: done on one core:
72 * - Using single producer/single consumer functions:
74 * - Enqueue one object, two objects, MAX_BULK objects
75 * - Dequeue one object, two objects, MAX_BULK objects
76 * - Check that dequeued pointers are correct
78 * - Using multi producers/multi consumers functions:
80 * - Enqueue one object, two objects, MAX_BULK objects
81 * - Dequeue one object, two objects, MAX_BULK objects
82 * - Check that dequeued pointers are correct
84 * - Test watermark and default bulk enqueue/dequeue:
87 * - Set default bulk value
88 * - Enqueue objects, check that -EDQUOT is returned when
89 * watermark is exceeded
90 * - Check that dequeued pointers are correct
92 * #. Check quota and watermark
94 * - Start a loop on another lcore that will enqueue and dequeue
95 * objects in a ring. It will monitor the value of quota (default
96 * bulk count) and watermark.
97 * - At the same time, change the quota and the watermark on the
99 * - The slave lcore will check that bulk count changes from 4 to
100 * 8, and watermark changes from 16 to 32.
102 * #. Performance tests.
104 * This test is done on the following configurations:
106 * - One core enqueuing, one core dequeuing
107 * - One core enqueuing, other cores dequeuing
108 * - One core dequeuing, other cores enqueuing
109 * - Half of the cores enqueuing, the other half dequeuing
111 * When only one core enqueues/dequeues, the test is done with the
112 * SP/SC functions in addition to the MP/MC functions.
114 * The test is done with different bulk size.
116 * On each core, the test enqueues or dequeues objects during
117 * TIME_S seconds. The number of successes and failures are stored on
118 * each core, then summed and displayed.
120 * The test checks that the number of enqueues is equal to the
121 * number of dequeues.
124 #define RING_SIZE 4096
129 static rte_atomic32_t synchro;
131 static unsigned bulk_enqueue;
132 static unsigned bulk_dequeue;
133 static struct rte_ring *r;
136 unsigned enq_success ;
140 unsigned deq_success;
142 } __rte_cache_aligned;
144 static struct test_stats test_stats[RTE_MAX_LCORE];
146 #define DEFINE_ENQUEUE_FUNCTION(name, enq_code) \
148 name(__attribute__((unused)) void *arg) \
150 unsigned success = 0; \
151 unsigned quota = 0; \
154 unsigned long dummy_obj; \
155 void *obj_table[MAX_BULK]; \
157 unsigned lcore_id = rte_lcore_id(); \
158 uint64_t start_cycles, end_cycles; \
159 uint64_t time_diff = 0, hz = rte_get_hpet_hz(); \
161 /* init dummy object table */ \
162 for (i = 0; i< MAX_BULK; i++) { \
163 dummy_obj = lcore_id + 0x1000 + i; \
164 obj_table[i] = (void *)dummy_obj; \
167 /* wait synchro for slaves */ \
168 if (lcore_id != rte_get_master_lcore()) \
169 while (rte_atomic32_read(&synchro) == 0); \
171 start_cycles = rte_get_hpet_cycles(); \
173 /* enqueue as many object as possible */ \
174 while (time_diff/hz < TIME_S) { \
175 for (i = 0; likely(i < N); i++) { \
179 else if (ret == -EDQUOT) \
184 end_cycles = rte_get_hpet_cycles(); \
185 time_diff = end_cycles - start_cycles; \
188 /* write statistics in a shared structure */ \
189 test_stats[lcore_id].enq_success = success; \
190 test_stats[lcore_id].enq_quota = quota; \
191 test_stats[lcore_id].enq_fail = fail; \
196 #define DEFINE_DEQUEUE_FUNCTION(name, deq_code) \
198 name(__attribute__((unused)) void *arg) \
200 unsigned success = 0; \
203 void *obj_table[MAX_BULK]; \
205 unsigned lcore_id = rte_lcore_id(); \
206 uint64_t start_cycles, end_cycles; \
207 uint64_t time_diff = 0, hz = rte_get_hpet_hz(); \
209 /* wait synchro for slaves */ \
210 if (lcore_id != rte_get_master_lcore()) \
211 while (rte_atomic32_read(&synchro) == 0); \
213 start_cycles = rte_get_hpet_cycles(); \
215 /* dequeue as many object as possible */ \
216 while (time_diff/hz < TIME_S) { \
217 for (i = 0; likely(i < N); i++) { \
224 end_cycles = rte_get_hpet_cycles(); \
225 time_diff = end_cycles - start_cycles; \
228 /* write statistics in a shared structure */ \
229 test_stats[lcore_id].deq_success = success; \
230 test_stats[lcore_id].deq_fail = fail; \
235 DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_sp_enqueue,
236 rte_ring_sp_enqueue_bulk(r, obj_table, bulk_enqueue))
238 DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_sc_dequeue,
239 rte_ring_sc_dequeue_bulk(r, obj_table, bulk_dequeue))
241 DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_mp_enqueue,
242 rte_ring_mp_enqueue_bulk(r, obj_table, bulk_enqueue))
244 DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_mc_dequeue,
245 rte_ring_mc_dequeue_bulk(r, obj_table, bulk_dequeue))
247 #define TEST_RING_VERIFY(exp) \
249 printf("error at %s:%d\tcondition " #exp " failed\n", \
250 __func__, __LINE__); \
255 #define TEST_RING_FULL_EMTPY_ITER 8
259 launch_cores(unsigned enq_core_count, unsigned deq_core_count, int sp, int sc)
263 unsigned rate, deq_remain = 0;
264 unsigned enq_total, deq_total;
265 struct test_stats sum;
266 int (*enq_f)(void *);
267 int (*deq_f)(void *);
268 unsigned cores = enq_core_count + deq_core_count;
271 rte_atomic32_set(&synchro, 0);
273 printf("ring_autotest e/d_core=%u,%u e/d_bulk=%u,%u ",
274 enq_core_count, deq_core_count, bulk_enqueue, bulk_dequeue);
275 printf("sp=%d sc=%d ", sp, sc);
277 /* set enqueue function to be used */
279 enq_f = test_ring_per_core_sp_enqueue;
281 enq_f = test_ring_per_core_mp_enqueue;
283 /* set dequeue function to be used */
285 deq_f = test_ring_per_core_sc_dequeue;
287 deq_f = test_ring_per_core_mc_dequeue;
289 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
290 if (enq_core_count != 0) {
292 rte_eal_remote_launch(enq_f, NULL, lcore_id);
294 if (deq_core_count != 1) {
296 rte_eal_remote_launch(deq_f, NULL, lcore_id);
300 memset(test_stats, 0, sizeof(test_stats));
302 /* start synchro and launch test on master */
303 rte_atomic32_set(&synchro, 1);
307 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
311 if (rte_eal_wait_lcore(lcore_id) < 0)
315 memset(&sum, 0, sizeof(sum));
316 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
317 sum.enq_success += test_stats[lcore_id].enq_success;
318 sum.enq_quota += test_stats[lcore_id].enq_quota;
319 sum.enq_fail += test_stats[lcore_id].enq_fail;
320 sum.deq_success += test_stats[lcore_id].deq_success;
321 sum.deq_fail += test_stats[lcore_id].deq_fail;
325 while (rte_ring_sc_dequeue(r, &obj) == 0)
329 printf("per-lcore test returned -1\n");
333 enq_total = (sum.enq_success * bulk_enqueue) +
334 (sum.enq_quota * bulk_enqueue);
335 deq_total = (sum.deq_success * bulk_dequeue) + deq_remain;
337 rate = deq_total/TIME_S;
339 printf("rate_persec=%u\n", rate);
341 if (enq_total != deq_total) {
342 printf("invalid enq/deq_success counter: %u %u\n",
343 enq_total, deq_total);
351 do_one_ring_test2(unsigned enq_core_count, unsigned deq_core_count,
352 unsigned n_enq_bulk, unsigned n_deq_bulk)
358 bulk_enqueue = n_enq_bulk;
359 bulk_dequeue = n_deq_bulk;
361 do_sp = (enq_core_count == 1) ? 1 : 0;
362 do_sc = (deq_core_count == 1) ? 1 : 0;
364 for (sp = 0; sp <= do_sp; sp ++) {
365 for (sc = 0; sc <= do_sc; sc ++) {
366 ret = launch_cores(enq_core_count,
377 do_one_ring_test(unsigned enq_core_count, unsigned deq_core_count)
379 unsigned bulk_enqueue_tab[] = { 1, 2, 4, 32, 0 };
380 unsigned bulk_dequeue_tab[] = { 1, 2, 4, 32, 0 };
381 unsigned *bulk_enqueue_ptr;
382 unsigned *bulk_dequeue_ptr;
385 for (bulk_enqueue_ptr = bulk_enqueue_tab;
387 bulk_enqueue_ptr++) {
389 for (bulk_dequeue_ptr = bulk_dequeue_tab;
391 bulk_dequeue_ptr++) {
393 ret = do_one_ring_test2(enq_core_count, deq_core_count,
404 check_quota_and_watermark(__attribute__((unused)) void *dummy)
406 uint64_t hz = rte_get_hpet_hz();
407 void *obj_table[MAX_BULK];
408 unsigned watermark, watermark_old = 16;
409 uint64_t cur_time, end_time;
412 unsigned quota, quota_old = 4;
414 /* init the object table */
415 memset(obj_table, 0, sizeof(obj_table));
416 end_time = rte_get_hpet_cycles() + (hz * 2);
418 /* check that bulk and watermark are 4 and 32 (respectively) */
421 /* read quota, the only change allowed is from 4 to 8 */
422 quota = rte_ring_get_bulk_count(r);
423 if (quota != quota_old && (quota_old != 4 || quota != 8)) {
424 printf("Bad quota change %u -> %u\n", quota_old,
430 /* add in ring until we reach watermark */
432 for (i = 0; i < 16; i ++) {
435 ret = rte_ring_enqueue_bulk(r, obj_table, quota);
438 if (ret != -EDQUOT) {
439 printf("Cannot enqueue objects, or watermark not "
440 "reached (ret=%d)\n", ret);
444 /* read watermark, the only change allowed is from 16 to 32 */
445 watermark = i * quota;
446 if (watermark != watermark_old &&
447 (watermark_old != 16 || watermark != 32)) {
448 printf("Bad watermark change %u -> %u\n", watermark_old,
452 watermark_old = watermark;
454 /* dequeue objects from ring */
456 ret = rte_ring_dequeue_bulk(r, obj_table, quota);
458 printf("Cannot dequeue (ret=%d)\n", ret);
463 cur_time = rte_get_hpet_cycles();
464 diff = end_time - cur_time;
467 if (watermark_old != 32 || quota_old != 8) {
468 printf("quota or watermark was not updated (q=%u wm=%u)\n",
469 quota_old, watermark_old);
477 test_quota_and_watermark(void)
479 unsigned lcore_id = rte_lcore_id();
480 unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
482 printf("Test quota and watermark live modification\n");
484 rte_ring_set_bulk_count(r, 4);
485 rte_ring_set_water_mark(r, 16);
487 /* launch a thread that will enqueue and dequeue, checking
488 * watermark and quota */
489 rte_eal_remote_launch(check_quota_and_watermark, NULL, lcore_id2);
492 rte_ring_set_bulk_count(r, 8);
493 rte_ring_set_water_mark(r, 32);
496 if (rte_eal_wait_lcore(lcore_id2) < 0)
501 /* Test for catch on invalid watermark values */
503 test_set_watermark( void ){
507 struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
509 printf( " ring lookup failed\n" );
512 count = r->prod.size*2;
513 setwm = rte_ring_set_water_mark(r, count);
514 if (setwm != -EINVAL){
515 printf("Test failed to detect invalid watermark count value\n");
520 setwm = rte_ring_set_water_mark(r, count);
521 if (r->prod.watermark != r->prod.size) {
522 printf("Test failed to detect invalid watermark count value\n");
532 * helper routine for test_ring_basic
535 test_ring_basic_full_empty(void * const src[], void *dst[])
538 const unsigned rsz = RING_SIZE - 1;
540 printf("Basic full/empty test\n");
542 for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) {
544 /* random shift in the ring */
545 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
546 printf("%s: iteration %u, random shift: %u;\n",
548 TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
550 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
553 TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
555 TEST_RING_VERIFY(0 == rte_ring_free_count(r));
556 TEST_RING_VERIFY(rsz == rte_ring_count(r));
557 TEST_RING_VERIFY(rte_ring_full(r));
558 TEST_RING_VERIFY(0 == rte_ring_empty(r));
561 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
562 TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
563 TEST_RING_VERIFY(0 == rte_ring_count(r));
564 TEST_RING_VERIFY(0 == rte_ring_full(r));
565 TEST_RING_VERIFY(rte_ring_empty(r));
568 TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
575 test_ring_basic(void)
577 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
581 /* alloc dummy object pointers */
582 src = malloc(RING_SIZE*2*sizeof(void *));
586 for (i = 0; i < RING_SIZE*2 ; i++) {
587 src[i] = (void *)(unsigned long)i;
591 /* alloc some room for copied objects */
592 dst = malloc(RING_SIZE*2*sizeof(void *));
596 memset(dst, 0, RING_SIZE*2*sizeof(void *));
599 printf("enqueue 1 obj\n");
600 ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
605 printf("enqueue 2 objs\n");
606 ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
611 printf("enqueue MAX_BULK objs\n");
612 ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
617 printf("dequeue 1 obj\n");
618 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
623 printf("dequeue 2 objs\n");
624 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
629 printf("dequeue MAX_BULK objs\n");
630 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
636 if (memcmp(src, dst, cur_dst - dst)) {
637 test_hexdump("src", src, cur_src - src);
638 test_hexdump("dst", dst, cur_dst - dst);
639 printf("data after dequeue is not the same\n");
645 printf("enqueue 1 obj\n");
646 ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
651 printf("enqueue 2 objs\n");
652 ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
657 printf("enqueue MAX_BULK objs\n");
658 ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
663 printf("dequeue 1 obj\n");
664 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
669 printf("dequeue 2 objs\n");
670 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
675 printf("dequeue MAX_BULK objs\n");
676 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
682 if (memcmp(src, dst, cur_dst - dst)) {
683 test_hexdump("src", src, cur_src - src);
684 test_hexdump("dst", dst, cur_dst - dst);
685 printf("data after dequeue is not the same\n");
691 printf("fill and empty the ring\n");
692 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
693 ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
697 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
704 if (memcmp(src, dst, cur_dst - dst)) {
705 test_hexdump("src", src, cur_src - src);
706 test_hexdump("dst", dst, cur_dst - dst);
707 printf("data after dequeue is not the same\n");
711 if (test_ring_basic_full_empty(src, dst) != 0)
717 printf("test watermark and default bulk enqueue / dequeue\n");
718 rte_ring_set_bulk_count(r, 16);
719 rte_ring_set_water_mark(r, 20);
720 n = rte_ring_get_bulk_count(r);
722 printf("rte_ring_get_bulk_count() returned %u instead "
729 ret = rte_ring_enqueue_bulk(r, cur_src, n);
732 printf("Cannot enqueue\n");
735 ret = rte_ring_enqueue_bulk(r, cur_src, n);
737 if (ret != -EDQUOT) {
738 printf("Watermark not exceeded\n");
741 ret = rte_ring_dequeue_bulk(r, cur_dst, n);
744 printf("Cannot dequeue\n");
747 ret = rte_ring_dequeue_bulk(r, cur_dst, n);
750 printf("Cannot dequeue2\n");
755 if (memcmp(src, dst, cur_dst - dst)) {
756 test_hexdump("src", src, cur_src - src);
757 test_hexdump("dst", dst, cur_dst - dst);
758 printf("data after dequeue is not the same\n");
779 * it will always fail to create ring with a wrong ring size number in this function
782 test_ring_creation_with_wrong_size(void)
784 struct rte_ring * rp = NULL;
786 rp = rte_ring_create("test_bad_ring_size", RING_SIZE+1, SOCKET_ID_ANY, 0);
795 * it tests if it would always fail to create ring with an used ring name
798 test_ring_creation_with_an_used_name(void)
800 struct rte_ring * rp;
802 rp = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
810 * Test to if a non-power of 2 count causes the create
811 * function to fail correctly
814 test_create_count_odd(void)
816 struct rte_ring *r = rte_ring_create("test_ring_count",
817 4097, SOCKET_ID_ANY, 0 );
825 test_lookup_null(void)
827 struct rte_ring *rlp = rte_ring_lookup("ring_not_found");
829 if (rte_errno != ENOENT){
830 printf( "test failed to returnn error on null pointer\n");
837 * it tests some more basic ring operations
840 test_ring_basic_ex(void)
844 struct rte_ring * rp;
847 obj = (void **)rte_zmalloc("test_ring_basic_ex_malloc", (RING_SIZE * sizeof(void *)), 0);
849 printf("test_ring_basic_ex fail to rte_malloc\n");
853 rp = rte_ring_create("test_ring_basic_ex", RING_SIZE, SOCKET_ID_ANY, 0);
855 printf("test_ring_basic_ex fail to create ring\n");
859 if (rte_ring_lookup("test_ring_basic_ex") != rp) {
863 if (rte_ring_empty(rp) != 1) {
864 printf("test_ring_basic_ex ring is not empty but it should be\n");
868 printf("%u ring entries are now free\n", rte_ring_free_count(rp));
870 for (i = 0; i < RING_SIZE; i ++) {
871 rte_ring_enqueue(rp, obj[i]);
874 if (rte_ring_full(rp) != 1) {
875 printf("test_ring_basic_ex ring is not full but it should be\n");
879 for (i = 0; i < RING_SIZE; i ++) {
880 rte_ring_dequeue(rp, &obj[i]);
883 if (rte_ring_empty(rp) != 1) {
884 printf("test_ring_basic_ex ring is not empty but it should be\n");
899 unsigned enq_core_count, deq_core_count;
901 /* some more basic operations */
902 if (test_ring_basic_ex() < 0)
905 rte_atomic32_init(&synchro);
908 r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
912 /* retrieve the ring from its name */
913 if (rte_ring_lookup("test") != r) {
914 printf("Cannot lookup ring from its name\n");
918 /* basic operations */
919 if (test_ring_basic() < 0)
922 /* basic operations */
923 if (test_quota_and_watermark() < 0)
926 if ( test_set_watermark() < 0){
927 printf ("Test failed to detect invalid parameter\n");
931 printf ( "Test detected forced bad watermark values\n");
933 if ( test_create_count_odd() < 0){
934 printf ("Test failed to detect odd count\n");
938 printf ( "Test detected odd count\n");
940 if ( test_lookup_null() < 0){
941 printf ("Test failed to detect NULL ring lookup\n");
945 printf ( "Test detected NULL ring lookup \n");
948 printf("start performance tests\n");
950 /* one lcore for enqueue, one for dequeue */
953 if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
956 /* max cores for enqueue, one for dequeue */
957 enq_core_count = rte_lcore_count() - 1;
959 if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
962 /* max cores for dequeue, one for enqueue */
964 deq_core_count = rte_lcore_count() - 1;
965 if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
968 /* half for enqueue and half for dequeue */
969 enq_core_count = rte_lcore_count() / 2;
970 deq_core_count = rte_lcore_count() / 2;
971 if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
974 /* test of creating ring with wrong size */
975 if (test_ring_creation_with_wrong_size() < 0)
978 /* test of creation ring with an used name */
979 if (test_ring_creation_with_an_used_name() < 0)
982 /* dump the ring status */
983 rte_ring_list_dump();